Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Shared glue code for 128bit block ciphers
  3 *
  4 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  5 *
  6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
  7 *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  8 * CTR part based on code (crypto/ctr.c) by:
  9 *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License as published by
 13 * the Free Software Foundation; either version 2 of the License, or
 14 * (at your option) any later version.
 15 *
 16 * This program is distributed in the hope that it will be useful,
 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 19 * GNU General Public License for more details.
 20 *
 21 * You should have received a copy of the GNU General Public License
 22 * along with this program; if not, write to the Free Software
 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
 24 * USA
 25 *
 26 */
 27
 28#include <linux/module.h>
 29#include <crypto/b128ops.h>
 30#include <crypto/internal/skcipher.h>
 31#include <crypto/lrw.h>
 32#include <crypto/xts.h>
 33#include <asm/crypto/glue_helper.h>
 34
 35static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
 36				   struct blkcipher_desc *desc,
 37				   struct blkcipher_walk *walk)
 38{
 39	void *ctx = crypto_blkcipher_ctx(desc->tfm);
 40	const unsigned int bsize = 128 / 8;
 41	unsigned int nbytes, i, func_bytes;
 42	bool fpu_enabled = false;
 43	int err;
 44
 45	err = blkcipher_walk_virt(desc, walk);
 46
 47	while ((nbytes = walk->nbytes)) {
 48		u8 *wsrc = walk->src.virt.addr;
 49		u8 *wdst = walk->dst.virt.addr;
 50
 51		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
 52					     desc, fpu_enabled, nbytes);
 53
 54		for (i = 0; i < gctx->num_funcs; i++) {
 55			func_bytes = bsize * gctx->funcs[i].num_blocks;
 56
 57			/* Process multi-block batch */
 58			if (nbytes >= func_bytes) {
 59				do {
 60					gctx->funcs[i].fn_u.ecb(ctx, wdst,
 61								wsrc);
 62
 63					wsrc += func_bytes;
 64					wdst += func_bytes;
 65					nbytes -= func_bytes;
 66				} while (nbytes >= func_bytes);
 67
 68				if (nbytes < bsize)
 69					goto done;
 70			}
 71		}
 72
 73done:
 74		err = blkcipher_walk_done(desc, walk, nbytes);
 75	}
 76
 77	glue_fpu_end(fpu_enabled);
 78	return err;
 79}
 80
 81int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
 82			  struct blkcipher_desc *desc, struct scatterlist *dst,
 83			  struct scatterlist *src, unsigned int nbytes)
 84{
 85	struct blkcipher_walk walk;
 86
 87	blkcipher_walk_init(&walk, dst, src, nbytes);
 88	return __glue_ecb_crypt_128bit(gctx, desc, &walk);
 89}
 90EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
 91
 92static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
 93					      struct blkcipher_desc *desc,
 94					      struct blkcipher_walk *walk)
 95{
 96	void *ctx = crypto_blkcipher_ctx(desc->tfm);
 97	const unsigned int bsize = 128 / 8;
 98	unsigned int nbytes = walk->nbytes;
 99	u128 *src = (u128 *)walk->src.virt.addr;
100	u128 *dst = (u128 *)walk->dst.virt.addr;
101	u128 *iv = (u128 *)walk->iv;
102
103	do {
104		u128_xor(dst, src, iv);
105		fn(ctx, (u8 *)dst, (u8 *)dst);
106		iv = dst;
107
108		src += 1;
109		dst += 1;
110		nbytes -= bsize;
111	} while (nbytes >= bsize);
112
113	*(u128 *)walk->iv = *iv;
114	return nbytes;
115}
116
117int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
118			    struct blkcipher_desc *desc,
119			    struct scatterlist *dst,
120			    struct scatterlist *src, unsigned int nbytes)
121{
122	struct blkcipher_walk walk;
123	int err;
124
125	blkcipher_walk_init(&walk, dst, src, nbytes);
126	err = blkcipher_walk_virt(desc, &walk);
127
128	while ((nbytes = walk.nbytes)) {
129		nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
130		err = blkcipher_walk_done(desc, &walk, nbytes);
131	}
132
133	return err;
134}
135EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
136
137static unsigned int
138__glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
139			  struct blkcipher_desc *desc,
140			  struct blkcipher_walk *walk)
141{
142	void *ctx = crypto_blkcipher_ctx(desc->tfm);
143	const unsigned int bsize = 128 / 8;
144	unsigned int nbytes = walk->nbytes;
145	u128 *src = (u128 *)walk->src.virt.addr;
146	u128 *dst = (u128 *)walk->dst.virt.addr;
147	u128 last_iv;
148	unsigned int num_blocks, func_bytes;
149	unsigned int i;
150
151	/* Start of the last block. */
152	src += nbytes / bsize - 1;
153	dst += nbytes / bsize - 1;
154
155	last_iv = *src;
156
157	for (i = 0; i < gctx->num_funcs; i++) {
158		num_blocks = gctx->funcs[i].num_blocks;
159		func_bytes = bsize * num_blocks;
160
161		/* Process multi-block batch */
162		if (nbytes >= func_bytes) {
163			do {
164				nbytes -= func_bytes - bsize;
165				src -= num_blocks - 1;
166				dst -= num_blocks - 1;
167
168				gctx->funcs[i].fn_u.cbc(ctx, dst, src);
169
170				nbytes -= bsize;
171				if (nbytes < bsize)
172					goto done;
173
174				u128_xor(dst, dst, src - 1);
175				src -= 1;
176				dst -= 1;
177			} while (nbytes >= func_bytes);
178
179			if (nbytes < bsize)
180				goto done;
181		}
182	}
183
184done:
185	u128_xor(dst, dst, (u128 *)walk->iv);
186	*(u128 *)walk->iv = last_iv;
187
188	return nbytes;
189}
190
191int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
192			    struct blkcipher_desc *desc,
193			    struct scatterlist *dst,
194			    struct scatterlist *src, unsigned int nbytes)
195{
196	const unsigned int bsize = 128 / 8;
197	bool fpu_enabled = false;
198	struct blkcipher_walk walk;
199	int err;
200
201	blkcipher_walk_init(&walk, dst, src, nbytes);
202	err = blkcipher_walk_virt(desc, &walk);
203
204	while ((nbytes = walk.nbytes)) {
205		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
206					     desc, fpu_enabled, nbytes);
207		nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
208		err = blkcipher_walk_done(desc, &walk, nbytes);
209	}
210
211	glue_fpu_end(fpu_enabled);
212	return err;
213}
214EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
215
216static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
217					struct blkcipher_desc *desc,
218					struct blkcipher_walk *walk)
219{
220	void *ctx = crypto_blkcipher_ctx(desc->tfm);
221	u8 *src = (u8 *)walk->src.virt.addr;
222	u8 *dst = (u8 *)walk->dst.virt.addr;
223	unsigned int nbytes = walk->nbytes;
224	le128 ctrblk;
225	u128 tmp;
226
227	be128_to_le128(&ctrblk, (be128 *)walk->iv);
228
229	memcpy(&tmp, src, nbytes);
230	fn_ctr(ctx, &tmp, &tmp, &ctrblk);
231	memcpy(dst, &tmp, nbytes);
232
233	le128_to_be128((be128 *)walk->iv, &ctrblk);
234}
235
236static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
237					    struct blkcipher_desc *desc,
238					    struct blkcipher_walk *walk)
239{
240	const unsigned int bsize = 128 / 8;
241	void *ctx = crypto_blkcipher_ctx(desc->tfm);
242	unsigned int nbytes = walk->nbytes;
243	u128 *src = (u128 *)walk->src.virt.addr;
244	u128 *dst = (u128 *)walk->dst.virt.addr;
245	le128 ctrblk;
246	unsigned int num_blocks, func_bytes;
247	unsigned int i;
248
249	be128_to_le128(&ctrblk, (be128 *)walk->iv);
250
251	/* Process multi-block batch */
252	for (i = 0; i < gctx->num_funcs; i++) {
253		num_blocks = gctx->funcs[i].num_blocks;
254		func_bytes = bsize * num_blocks;
255
256		if (nbytes >= func_bytes) {
257			do {
258				gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
259
260				src += num_blocks;
261				dst += num_blocks;
262				nbytes -= func_bytes;
263			} while (nbytes >= func_bytes);
264
265			if (nbytes < bsize)
266				goto done;
267		}
268	}
269
270done:
271	le128_to_be128((be128 *)walk->iv, &ctrblk);
272	return nbytes;
273}
274
275int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
276			  struct blkcipher_desc *desc, struct scatterlist *dst,
277			  struct scatterlist *src, unsigned int nbytes)
278{
279	const unsigned int bsize = 128 / 8;
280	bool fpu_enabled = false;
281	struct blkcipher_walk walk;
282	int err;
283
284	blkcipher_walk_init(&walk, dst, src, nbytes);
285	err = blkcipher_walk_virt_block(desc, &walk, bsize);
286
287	while ((nbytes = walk.nbytes) >= bsize) {
288		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
289					     desc, fpu_enabled, nbytes);
290		nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
291		err = blkcipher_walk_done(desc, &walk, nbytes);
292	}
293
294	glue_fpu_end(fpu_enabled);
295
296	if (walk.nbytes) {
297		glue_ctr_crypt_final_128bit(
298			gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
299		err = blkcipher_walk_done(desc, &walk, 0);
300	}
301
302	return err;
303}
304EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
305
306static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
307					    void *ctx,
308					    struct blkcipher_desc *desc,
309					    struct blkcipher_walk *walk)
310{
311	const unsigned int bsize = 128 / 8;
312	unsigned int nbytes = walk->nbytes;
313	u128 *src = (u128 *)walk->src.virt.addr;
314	u128 *dst = (u128 *)walk->dst.virt.addr;
315	unsigned int num_blocks, func_bytes;
316	unsigned int i;
317
318	/* Process multi-block batch */
319	for (i = 0; i < gctx->num_funcs; i++) {
320		num_blocks = gctx->funcs[i].num_blocks;
321		func_bytes = bsize * num_blocks;
322
323		if (nbytes >= func_bytes) {
324			do {
325				gctx->funcs[i].fn_u.xts(ctx, dst, src,
326							(le128 *)walk->iv);
327
328				src += num_blocks;
329				dst += num_blocks;
330				nbytes -= func_bytes;
331			} while (nbytes >= func_bytes);
332
333			if (nbytes < bsize)
334				goto done;
335		}
336	}
337
338done:
339	return nbytes;
340}
341
342static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
343					  void *ctx,
344					  struct skcipher_walk *walk)
345{
346	const unsigned int bsize = 128 / 8;
347	unsigned int nbytes = walk->nbytes;
348	u128 *src = walk->src.virt.addr;
349	u128 *dst = walk->dst.virt.addr;
350	unsigned int num_blocks, func_bytes;
351	unsigned int i;
352
353	/* Process multi-block batch */
354	for (i = 0; i < gctx->num_funcs; i++) {
355		num_blocks = gctx->funcs[i].num_blocks;
356		func_bytes = bsize * num_blocks;
357
358		if (nbytes >= func_bytes) {
359			do {
360				gctx->funcs[i].fn_u.xts(ctx, dst, src,
361							walk->iv);
362
363				src += num_blocks;
364				dst += num_blocks;
365				nbytes -= func_bytes;
366			} while (nbytes >= func_bytes);
367
368			if (nbytes < bsize)
369				goto done;
370		}
371	}
372
373done:
374	return nbytes;
375}
376
377/* for implementations implementing faster XTS IV generator */
378int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
379			  struct blkcipher_desc *desc, struct scatterlist *dst,
380			  struct scatterlist *src, unsigned int nbytes,
381			  void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
382			  void *tweak_ctx, void *crypt_ctx)
383{
384	const unsigned int bsize = 128 / 8;
385	bool fpu_enabled = false;
386	struct blkcipher_walk walk;
387	int err;
388
389	blkcipher_walk_init(&walk, dst, src, nbytes);
390
391	err = blkcipher_walk_virt(desc, &walk);
392	nbytes = walk.nbytes;
393	if (!nbytes)
394		return err;
395
396	/* set minimum length to bsize, for tweak_fn */
397	fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
398				     desc, fpu_enabled,
399				     nbytes < bsize ? bsize : nbytes);
400
401	/* calculate first value of T */
402	tweak_fn(tweak_ctx, walk.iv, walk.iv);
403
404	while (nbytes) {
405		nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
406
407		err = blkcipher_walk_done(desc, &walk, nbytes);
408		nbytes = walk.nbytes;
409	}
410
411	glue_fpu_end(fpu_enabled);
412
413	return err;
414}
415EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
416
417int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
418			struct skcipher_request *req,
419			common_glue_func_t tweak_fn, void *tweak_ctx,
420			void *crypt_ctx)
421{
422	const unsigned int bsize = 128 / 8;
423	struct skcipher_walk walk;
424	bool fpu_enabled = false;
425	unsigned int nbytes;
426	int err;
427
428	err = skcipher_walk_virt(&walk, req, false);
429	nbytes = walk.nbytes;
430	if (!nbytes)
431		return err;
432
433	/* set minimum length to bsize, for tweak_fn */
434	fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
435					    &walk, fpu_enabled,
436					    nbytes < bsize ? bsize : nbytes);
437
438	/* calculate first value of T */
439	tweak_fn(tweak_ctx, walk.iv, walk.iv);
440
441	while (nbytes) {
442		nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
443
444		err = skcipher_walk_done(&walk, nbytes);
445		nbytes = walk.nbytes;
446	}
447
448	glue_fpu_end(fpu_enabled);
449
450	return err;
451}
452EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
453
454void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
455			       common_glue_func_t fn)
456{
457	le128 ivblk = *iv;
458
459	/* generate next IV */
460	le128_gf128mul_x_ble(iv, &ivblk);
461
462	/* CC <- T xor C */
463	u128_xor(dst, src, (u128 *)&ivblk);
464
465	/* PP <- D(Key2,CC) */
466	fn(ctx, (u8 *)dst, (u8 *)dst);
467
468	/* P <- T xor PP */
469	u128_xor(dst, dst, (u128 *)&ivblk);
470}
471EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
472
473MODULE_LICENSE("GPL");