Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 *  linux/net/sunrpc/gss_krb5_crypto.c
  3 *
  4 *  Copyright (c) 2000-2008 The Regents of the University of Michigan.
  5 *  All rights reserved.
  6 *
  7 *  Andy Adamson   <andros@umich.edu>
  8 *  Bruce Fields   <bfields@umich.edu>
  9 */
 10
 11/*
 12 * Copyright (C) 1998 by the FundsXpress, INC.
 13 *
 14 * All rights reserved.
 15 *
 16 * Export of this software from the United States of America may require
 17 * a specific license from the United States Government.  It is the
 18 * responsibility of any person or organization contemplating export to
 19 * obtain such a license before exporting.
 20 *
 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
 22 * distribute this software and its documentation for any purpose and
 23 * without fee is hereby granted, provided that the above copyright
 24 * notice appear in all copies and that both that copyright notice and
 25 * this permission notice appear in supporting documentation, and that
 26 * the name of FundsXpress. not be used in advertising or publicity pertaining
 27 * to distribution of the software without specific, written prior
 28 * permission.  FundsXpress makes no representations about the suitability of
 29 * this software for any purpose.  It is provided "as is" without express
 30 * or implied warranty.
 31 *
 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
 35 */
 36
 37#include <crypto/algapi.h>
 38#include <crypto/hash.h>
 39#include <crypto/skcipher.h>
 40#include <linux/err.h>
 41#include <linux/types.h>
 42#include <linux/mm.h>
 43#include <linux/scatterlist.h>
 44#include <linux/highmem.h>
 45#include <linux/pagemap.h>
 46#include <linux/random.h>
 47#include <linux/sunrpc/gss_krb5.h>
 48#include <linux/sunrpc/xdr.h>
 49
 50#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 51# define RPCDBG_FACILITY        RPCDBG_AUTH
 52#endif
 53
 54u32
 55krb5_encrypt(
 56	struct crypto_sync_skcipher *tfm,
 57	void * iv,
 58	void * in,
 59	void * out,
 60	int length)
 61{
 62	u32 ret = -EINVAL;
 63	struct scatterlist sg[1];
 64	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
 65	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 66
 67	if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
 68		goto out;
 69
 70	if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
 71		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
 72			crypto_sync_skcipher_ivsize(tfm));
 73		goto out;
 74	}
 75
 76	if (iv)
 77		memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
 78
 79	memcpy(out, in, length);
 80	sg_init_one(sg, out, length);
 81
 82	skcipher_request_set_sync_tfm(req, tfm);
 83	skcipher_request_set_callback(req, 0, NULL, NULL);
 84	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
 85
 86	ret = crypto_skcipher_encrypt(req);
 87	skcipher_request_zero(req);
 88out:
 89	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
 90	return ret;
 91}
 92
 93u32
 94krb5_decrypt(
 95     struct crypto_sync_skcipher *tfm,
 96     void * iv,
 97     void * in,
 98     void * out,
 99     int length)
100{
101	u32 ret = -EINVAL;
102	struct scatterlist sg[1];
103	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
104	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
105
106	if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
107		goto out;
108
109	if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
110		dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
111			crypto_sync_skcipher_ivsize(tfm));
112		goto out;
113	}
114	if (iv)
115		memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
116
117	memcpy(out, in, length);
118	sg_init_one(sg, out, length);
119
120	skcipher_request_set_sync_tfm(req, tfm);
121	skcipher_request_set_callback(req, 0, NULL, NULL);
122	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
123
124	ret = crypto_skcipher_decrypt(req);
125	skcipher_request_zero(req);
126out:
127	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
128	return ret;
129}
130
131static int
132checksummer(struct scatterlist *sg, void *data)
133{
134	struct ahash_request *req = data;
135
136	ahash_request_set_crypt(req, sg, NULL, sg->length);
137
138	return crypto_ahash_update(req);
139}
140
141/*
142 * checksum the plaintext data and hdrlen bytes of the token header
143 * The checksum is performed over the first 8 bytes of the
144 * gss token header and then over the data body
145 */
146u32
147make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
148	      struct xdr_buf *body, int body_offset, u8 *cksumkey,
149	      unsigned int usage, struct xdr_netobj *cksumout)
150{
151	struct crypto_ahash *tfm;
152	struct ahash_request *req;
153	struct scatterlist              sg[1];
154	int err = -1;
155	u8 *checksumdata;
156	unsigned int checksumlen;
157
158	if (cksumout->len < kctx->gk5e->cksumlength) {
159		dprintk("%s: checksum buffer length, %u, too small for %s\n",
160			__func__, cksumout->len, kctx->gk5e->name);
161		return GSS_S_FAILURE;
162	}
163
164	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
165	if (checksumdata == NULL)
166		return GSS_S_FAILURE;
167
168	tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
169	if (IS_ERR(tfm))
170		goto out_free_cksum;
171
172	req = ahash_request_alloc(tfm, GFP_KERNEL);
173	if (!req)
174		goto out_free_ahash;
175
176	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
177
178	checksumlen = crypto_ahash_digestsize(tfm);
179
180	if (cksumkey != NULL) {
181		err = crypto_ahash_setkey(tfm, cksumkey,
182					  kctx->gk5e->keylength);
183		if (err)
184			goto out;
185	}
186
187	err = crypto_ahash_init(req);
188	if (err)
189		goto out;
190	sg_init_one(sg, header, hdrlen);
191	ahash_request_set_crypt(req, sg, NULL, hdrlen);
192	err = crypto_ahash_update(req);
193	if (err)
194		goto out;
195	err = xdr_process_buf(body, body_offset, body->len - body_offset,
196			      checksummer, req);
197	if (err)
198		goto out;
199	ahash_request_set_crypt(req, NULL, checksumdata, 0);
200	err = crypto_ahash_final(req);
201	if (err)
202		goto out;
203
204	switch (kctx->gk5e->ctype) {
205	case CKSUMTYPE_RSA_MD5:
206		err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
207					  checksumdata, checksumlen);
208		if (err)
209			goto out;
210		memcpy(cksumout->data,
211		       checksumdata + checksumlen - kctx->gk5e->cksumlength,
212		       kctx->gk5e->cksumlength);
213		break;
214	case CKSUMTYPE_HMAC_SHA1_DES3:
215		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
216		break;
217	default:
218		BUG();
219		break;
220	}
221	cksumout->len = kctx->gk5e->cksumlength;
222out:
223	ahash_request_free(req);
224out_free_ahash:
225	crypto_free_ahash(tfm);
226out_free_cksum:
227	kfree(checksumdata);
228	return err ? GSS_S_FAILURE : 0;
229}
230
231/*
232 * checksum the plaintext data and hdrlen bytes of the token header
233 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
234 * body then over the first 16 octets of the MIC token
235 * Inclusion of the header data in the calculation of the
236 * checksum is optional.
237 */
238u32
239make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
240		 struct xdr_buf *body, int body_offset, u8 *cksumkey,
241		 unsigned int usage, struct xdr_netobj *cksumout)
242{
243	struct crypto_ahash *tfm;
244	struct ahash_request *req;
245	struct scatterlist sg[1];
246	int err = -1;
247	u8 *checksumdata;
248
249	if (kctx->gk5e->keyed_cksum == 0) {
250		dprintk("%s: expected keyed hash for %s\n",
251			__func__, kctx->gk5e->name);
252		return GSS_S_FAILURE;
253	}
254	if (cksumkey == NULL) {
255		dprintk("%s: no key supplied for %s\n",
256			__func__, kctx->gk5e->name);
257		return GSS_S_FAILURE;
258	}
259
260	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
261	if (!checksumdata)
262		return GSS_S_FAILURE;
263
264	tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
265	if (IS_ERR(tfm))
266		goto out_free_cksum;
267
268	req = ahash_request_alloc(tfm, GFP_KERNEL);
269	if (!req)
270		goto out_free_ahash;
271
272	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
273
274	err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
275	if (err)
276		goto out;
277
278	err = crypto_ahash_init(req);
279	if (err)
280		goto out;
281	err = xdr_process_buf(body, body_offset, body->len - body_offset,
282			      checksummer, req);
283	if (err)
284		goto out;
285	if (header != NULL) {
286		sg_init_one(sg, header, hdrlen);
287		ahash_request_set_crypt(req, sg, NULL, hdrlen);
288		err = crypto_ahash_update(req);
289		if (err)
290			goto out;
291	}
292	ahash_request_set_crypt(req, NULL, checksumdata, 0);
293	err = crypto_ahash_final(req);
294	if (err)
295		goto out;
296
297	cksumout->len = kctx->gk5e->cksumlength;
298
299	switch (kctx->gk5e->ctype) {
300	case CKSUMTYPE_HMAC_SHA1_96_AES128:
301	case CKSUMTYPE_HMAC_SHA1_96_AES256:
302		/* note that this truncates the hash */
303		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
304		break;
305	default:
306		BUG();
307		break;
308	}
309out:
310	ahash_request_free(req);
311out_free_ahash:
312	crypto_free_ahash(tfm);
313out_free_cksum:
314	kfree(checksumdata);
315	return err ? GSS_S_FAILURE : 0;
316}
317
318struct encryptor_desc {
319	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
320	struct skcipher_request *req;
321	int pos;
322	struct xdr_buf *outbuf;
323	struct page **pages;
324	struct scatterlist infrags[4];
325	struct scatterlist outfrags[4];
326	int fragno;
327	int fraglen;
328};
329
330static int
331encryptor(struct scatterlist *sg, void *data)
332{
333	struct encryptor_desc *desc = data;
334	struct xdr_buf *outbuf = desc->outbuf;
335	struct crypto_sync_skcipher *tfm =
336		crypto_sync_skcipher_reqtfm(desc->req);
337	struct page *in_page;
338	int thislen = desc->fraglen + sg->length;
339	int fraglen, ret;
340	int page_pos;
341
342	/* Worst case is 4 fragments: head, end of page 1, start
343	 * of page 2, tail.  Anything more is a bug. */
344	BUG_ON(desc->fragno > 3);
345
346	page_pos = desc->pos - outbuf->head[0].iov_len;
347	if (page_pos >= 0 && page_pos < outbuf->page_len) {
348		/* pages are not in place: */
349		int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
350		in_page = desc->pages[i];
351	} else {
352		in_page = sg_page(sg);
353	}
354	sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
355		    sg->offset);
356	sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
357		    sg->offset);
358	desc->fragno++;
359	desc->fraglen += sg->length;
360	desc->pos += sg->length;
361
362	fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
363	thislen -= fraglen;
364
365	if (thislen == 0)
366		return 0;
367
368	sg_mark_end(&desc->infrags[desc->fragno - 1]);
369	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
370
371	skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
372				   thislen, desc->iv);
373
374	ret = crypto_skcipher_encrypt(desc->req);
375	if (ret)
376		return ret;
377
378	sg_init_table(desc->infrags, 4);
379	sg_init_table(desc->outfrags, 4);
380
381	if (fraglen) {
382		sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
383				sg->offset + sg->length - fraglen);
384		desc->infrags[0] = desc->outfrags[0];
385		sg_assign_page(&desc->infrags[0], in_page);
386		desc->fragno = 1;
387		desc->fraglen = fraglen;
388	} else {
389		desc->fragno = 0;
390		desc->fraglen = 0;
391	}
392	return 0;
393}
394
395int
396gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
397		    int offset, struct page **pages)
398{
399	int ret;
400	struct encryptor_desc desc;
401	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
402
403	BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
404
405	skcipher_request_set_sync_tfm(req, tfm);
406	skcipher_request_set_callback(req, 0, NULL, NULL);
407
408	memset(desc.iv, 0, sizeof(desc.iv));
409	desc.req = req;
410	desc.pos = offset;
411	desc.outbuf = buf;
412	desc.pages = pages;
413	desc.fragno = 0;
414	desc.fraglen = 0;
415
416	sg_init_table(desc.infrags, 4);
417	sg_init_table(desc.outfrags, 4);
418
419	ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
420	skcipher_request_zero(req);
421	return ret;
422}
423
424struct decryptor_desc {
425	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
426	struct skcipher_request *req;
427	struct scatterlist frags[4];
428	int fragno;
429	int fraglen;
430};
431
432static int
433decryptor(struct scatterlist *sg, void *data)
434{
435	struct decryptor_desc *desc = data;
436	int thislen = desc->fraglen + sg->length;
437	struct crypto_sync_skcipher *tfm =
438		crypto_sync_skcipher_reqtfm(desc->req);
439	int fraglen, ret;
440
441	/* Worst case is 4 fragments: head, end of page 1, start
442	 * of page 2, tail.  Anything more is a bug. */
443	BUG_ON(desc->fragno > 3);
444	sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
445		    sg->offset);
446	desc->fragno++;
447	desc->fraglen += sg->length;
448
449	fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
450	thislen -= fraglen;
451
452	if (thislen == 0)
453		return 0;
454
455	sg_mark_end(&desc->frags[desc->fragno - 1]);
456
457	skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
458				   thislen, desc->iv);
459
460	ret = crypto_skcipher_decrypt(desc->req);
461	if (ret)
462		return ret;
463
464	sg_init_table(desc->frags, 4);
465
466	if (fraglen) {
467		sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
468				sg->offset + sg->length - fraglen);
469		desc->fragno = 1;
470		desc->fraglen = fraglen;
471	} else {
472		desc->fragno = 0;
473		desc->fraglen = 0;
474	}
475	return 0;
476}
477
478int
479gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
480		    int offset)
481{
482	int ret;
483	struct decryptor_desc desc;
484	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
485
486	/* XXXJBF: */
487	BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
488
489	skcipher_request_set_sync_tfm(req, tfm);
490	skcipher_request_set_callback(req, 0, NULL, NULL);
491
492	memset(desc.iv, 0, sizeof(desc.iv));
493	desc.req = req;
494	desc.fragno = 0;
495	desc.fraglen = 0;
496
497	sg_init_table(desc.frags, 4);
498
499	ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
500	skcipher_request_zero(req);
501	return ret;
502}
503
504/*
505 * This function makes the assumption that it was ultimately called
506 * from gss_wrap().
507 *
508 * The client auth_gss code moves any existing tail data into a
509 * separate page before calling gss_wrap.
510 * The server svcauth_gss code ensures that both the head and the
511 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
512 *
513 * Even with that guarantee, this function may be called more than
514 * once in the processing of gss_wrap().  The best we can do is
515 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
516 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
517 * At run-time we can verify that a single invocation of this
518 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
519 */
520
521int
522xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
523{
524	u8 *p;
525
526	if (shiftlen == 0)
527		return 0;
528
529	BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
530	BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
531
532	p = buf->head[0].iov_base + base;
533
534	memmove(p + shiftlen, p, buf->head[0].iov_len - base);
535
536	buf->head[0].iov_len += shiftlen;
537	buf->len += shiftlen;
538
539	return 0;
540}
541
542static u32
543gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
544		   u32 offset, u8 *iv, struct page **pages, int encrypt)
545{
546	u32 ret;
547	struct scatterlist sg[1];
548	SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
549	u8 *data;
550	struct page **save_pages;
551	u32 len = buf->len - offset;
552
553	if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
554		WARN_ON(0);
555		return -ENOMEM;
556	}
557	data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_KERNEL);
558	if (!data)
559		return -ENOMEM;
560
561	/*
562	 * For encryption, we want to read from the cleartext
563	 * page cache pages, and write the encrypted data to
564	 * the supplied xdr_buf pages.
565	 */
566	save_pages = buf->pages;
567	if (encrypt)
568		buf->pages = pages;
569
570	ret = read_bytes_from_xdr_buf(buf, offset, data, len);
571	buf->pages = save_pages;
572	if (ret)
573		goto out;
574
575	sg_init_one(sg, data, len);
576
577	skcipher_request_set_sync_tfm(req, cipher);
578	skcipher_request_set_callback(req, 0, NULL, NULL);
579	skcipher_request_set_crypt(req, sg, sg, len, iv);
580
581	if (encrypt)
582		ret = crypto_skcipher_encrypt(req);
583	else
584		ret = crypto_skcipher_decrypt(req);
585
586	skcipher_request_zero(req);
587
588	if (ret)
589		goto out;
590
591	ret = write_bytes_to_xdr_buf(buf, offset, data, len);
592
593out:
594	kfree(data);
595	return ret;
596}
597
598u32
599gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
600		     struct xdr_buf *buf, struct page **pages)
601{
602	u32 err;
603	struct xdr_netobj hmac;
604	u8 *cksumkey;
605	u8 *ecptr;
606	struct crypto_sync_skcipher *cipher, *aux_cipher;
607	int blocksize;
608	struct page **save_pages;
609	int nblocks, nbytes;
610	struct encryptor_desc desc;
611	u32 cbcbytes;
612	unsigned int usage;
613
614	if (kctx->initiate) {
615		cipher = kctx->initiator_enc;
616		aux_cipher = kctx->initiator_enc_aux;
617		cksumkey = kctx->initiator_integ;
618		usage = KG_USAGE_INITIATOR_SEAL;
619	} else {
620		cipher = kctx->acceptor_enc;
621		aux_cipher = kctx->acceptor_enc_aux;
622		cksumkey = kctx->acceptor_integ;
623		usage = KG_USAGE_ACCEPTOR_SEAL;
624	}
625	blocksize = crypto_sync_skcipher_blocksize(cipher);
626
627	/* hide the gss token header and insert the confounder */
628	offset += GSS_KRB5_TOK_HDR_LEN;
629	if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
630		return GSS_S_FAILURE;
631	gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
632	offset -= GSS_KRB5_TOK_HDR_LEN;
633
634	if (buf->tail[0].iov_base != NULL) {
635		ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
636	} else {
637		buf->tail[0].iov_base = buf->head[0].iov_base
638							+ buf->head[0].iov_len;
639		buf->tail[0].iov_len = 0;
640		ecptr = buf->tail[0].iov_base;
641	}
642
643	/* copy plaintext gss token header after filler (if any) */
644	memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
645	buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
646	buf->len += GSS_KRB5_TOK_HDR_LEN;
647
648	/* Do the HMAC */
649	hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
650	hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
651
652	/*
653	 * When we are called, pages points to the real page cache
654	 * data -- which we can't go and encrypt!  buf->pages points
655	 * to scratch pages which we are going to send off to the
656	 * client/server.  Swap in the plaintext pages to calculate
657	 * the hmac.
658	 */
659	save_pages = buf->pages;
660	buf->pages = pages;
661
662	err = make_checksum_v2(kctx, NULL, 0, buf,
663			       offset + GSS_KRB5_TOK_HDR_LEN,
664			       cksumkey, usage, &hmac);
665	buf->pages = save_pages;
666	if (err)
667		return GSS_S_FAILURE;
668
669	nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
670	nblocks = (nbytes + blocksize - 1) / blocksize;
671	cbcbytes = 0;
672	if (nblocks > 2)
673		cbcbytes = (nblocks - 2) * blocksize;
674
675	memset(desc.iv, 0, sizeof(desc.iv));
676
677	if (cbcbytes) {
678		SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
679
680		desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
681		desc.fragno = 0;
682		desc.fraglen = 0;
683		desc.pages = pages;
684		desc.outbuf = buf;
685		desc.req = req;
686
687		skcipher_request_set_sync_tfm(req, aux_cipher);
688		skcipher_request_set_callback(req, 0, NULL, NULL);
689
690		sg_init_table(desc.infrags, 4);
691		sg_init_table(desc.outfrags, 4);
692
693		err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
694				      cbcbytes, encryptor, &desc);
695		skcipher_request_zero(req);
696		if (err)
697			goto out_err;
698	}
699
700	/* Make sure IV carries forward from any CBC results. */
701	err = gss_krb5_cts_crypt(cipher, buf,
702				 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
703				 desc.iv, pages, 1);
704	if (err) {
705		err = GSS_S_FAILURE;
706		goto out_err;
707	}
708
709	/* Now update buf to account for HMAC */
710	buf->tail[0].iov_len += kctx->gk5e->cksumlength;
711	buf->len += kctx->gk5e->cksumlength;
712
713out_err:
714	if (err)
715		err = GSS_S_FAILURE;
716	return err;
717}
718
719u32
720gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
721		     struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
722{
723	struct xdr_buf subbuf;
724	u32 ret = 0;
725	u8 *cksum_key;
726	struct crypto_sync_skcipher *cipher, *aux_cipher;
727	struct xdr_netobj our_hmac_obj;
728	u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
729	u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
730	int nblocks, blocksize, cbcbytes;
731	struct decryptor_desc desc;
732	unsigned int usage;
733
734	if (kctx->initiate) {
735		cipher = kctx->acceptor_enc;
736		aux_cipher = kctx->acceptor_enc_aux;
737		cksum_key = kctx->acceptor_integ;
738		usage = KG_USAGE_ACCEPTOR_SEAL;
739	} else {
740		cipher = kctx->initiator_enc;
741		aux_cipher = kctx->initiator_enc_aux;
742		cksum_key = kctx->initiator_integ;
743		usage = KG_USAGE_INITIATOR_SEAL;
744	}
745	blocksize = crypto_sync_skcipher_blocksize(cipher);
746
747
748	/* create a segment skipping the header and leaving out the checksum */
749	xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
750				    (len - offset - GSS_KRB5_TOK_HDR_LEN -
751				     kctx->gk5e->cksumlength));
752
753	nblocks = (subbuf.len + blocksize - 1) / blocksize;
754
755	cbcbytes = 0;
756	if (nblocks > 2)
757		cbcbytes = (nblocks - 2) * blocksize;
758
759	memset(desc.iv, 0, sizeof(desc.iv));
760
761	if (cbcbytes) {
762		SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
763
764		desc.fragno = 0;
765		desc.fraglen = 0;
766		desc.req = req;
767
768		skcipher_request_set_sync_tfm(req, aux_cipher);
769		skcipher_request_set_callback(req, 0, NULL, NULL);
770
771		sg_init_table(desc.frags, 4);
772
773		ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
774		skcipher_request_zero(req);
775		if (ret)
776			goto out_err;
777	}
778
779	/* Make sure IV carries forward from any CBC results. */
780	ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
781	if (ret)
782		goto out_err;
783
784
785	/* Calculate our hmac over the plaintext data */
786	our_hmac_obj.len = sizeof(our_hmac);
787	our_hmac_obj.data = our_hmac;
788
789	ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
790			       cksum_key, usage, &our_hmac_obj);
791	if (ret)
792		goto out_err;
793
794	/* Get the packet's hmac value */
795	ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
796				      pkt_hmac, kctx->gk5e->cksumlength);
797	if (ret)
798		goto out_err;
799
800	if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
801		ret = GSS_S_BAD_SIG;
802		goto out_err;
803	}
804	*headskip = kctx->gk5e->conflen;
805	*tailskip = kctx->gk5e->cksumlength;
806out_err:
807	if (ret && ret != GSS_S_BAD_SIG)
808		ret = GSS_S_FAILURE;
809	return ret;
810}
v6.2
  1/*
  2 *  linux/net/sunrpc/gss_krb5_crypto.c
  3 *
  4 *  Copyright (c) 2000-2008 The Regents of the University of Michigan.
  5 *  All rights reserved.
  6 *
  7 *  Andy Adamson   <andros@umich.edu>
  8 *  Bruce Fields   <bfields@umich.edu>
  9 */
 10
 11/*
 12 * Copyright (C) 1998 by the FundsXpress, INC.
 13 *
 14 * All rights reserved.
 15 *
 16 * Export of this software from the United States of America may require
 17 * a specific license from the United States Government.  It is the
 18 * responsibility of any person or organization contemplating export to
 19 * obtain such a license before exporting.
 20 *
 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
 22 * distribute this software and its documentation for any purpose and
 23 * without fee is hereby granted, provided that the above copyright
 24 * notice appear in all copies and that both that copyright notice and
 25 * this permission notice appear in supporting documentation, and that
 26 * the name of FundsXpress. not be used in advertising or publicity pertaining
 27 * to distribution of the software without specific, written prior
 28 * permission.  FundsXpress makes no representations about the suitability of
 29 * this software for any purpose.  It is provided "as is" without express
 30 * or implied warranty.
 31 *
 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
 35 */
 36
 37#include <crypto/algapi.h>
 38#include <crypto/hash.h>
 39#include <crypto/skcipher.h>
 40#include <linux/err.h>
 41#include <linux/types.h>
 42#include <linux/mm.h>
 43#include <linux/scatterlist.h>
 44#include <linux/highmem.h>
 45#include <linux/pagemap.h>
 46#include <linux/random.h>
 47#include <linux/sunrpc/gss_krb5.h>
 48#include <linux/sunrpc/xdr.h>
 49
 50#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 51# define RPCDBG_FACILITY        RPCDBG_AUTH
 52#endif
 53
 54u32
 55krb5_encrypt(
 56	struct crypto_sync_skcipher *tfm,
 57	void * iv,
 58	void * in,
 59	void * out,
 60	int length)
 61{
 62	u32 ret = -EINVAL;
 63	struct scatterlist sg[1];
 64	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
 65	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
 66
 67	if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
 68		goto out;
 69
 70	if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
 71		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
 72			crypto_sync_skcipher_ivsize(tfm));
 73		goto out;
 74	}
 75
 76	if (iv)
 77		memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
 78
 79	memcpy(out, in, length);
 80	sg_init_one(sg, out, length);
 81
 82	skcipher_request_set_sync_tfm(req, tfm);
 83	skcipher_request_set_callback(req, 0, NULL, NULL);
 84	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
 85
 86	ret = crypto_skcipher_encrypt(req);
 87	skcipher_request_zero(req);
 88out:
 89	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
 90	return ret;
 91}
 92
 93u32
 94krb5_decrypt(
 95     struct crypto_sync_skcipher *tfm,
 96     void * iv,
 97     void * in,
 98     void * out,
 99     int length)
100{
101	u32 ret = -EINVAL;
102	struct scatterlist sg[1];
103	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
104	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
105
106	if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
107		goto out;
108
109	if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
110		dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
111			crypto_sync_skcipher_ivsize(tfm));
112		goto out;
113	}
114	if (iv)
115		memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
116
117	memcpy(out, in, length);
118	sg_init_one(sg, out, length);
119
120	skcipher_request_set_sync_tfm(req, tfm);
121	skcipher_request_set_callback(req, 0, NULL, NULL);
122	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
123
124	ret = crypto_skcipher_decrypt(req);
125	skcipher_request_zero(req);
126out:
127	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
128	return ret;
129}
130
131static int
132checksummer(struct scatterlist *sg, void *data)
133{
134	struct ahash_request *req = data;
135
136	ahash_request_set_crypt(req, sg, NULL, sg->length);
137
138	return crypto_ahash_update(req);
139}
140
141/*
142 * checksum the plaintext data and hdrlen bytes of the token header
143 * The checksum is performed over the first 8 bytes of the
144 * gss token header and then over the data body
145 */
146u32
147make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
148	      struct xdr_buf *body, int body_offset, u8 *cksumkey,
149	      unsigned int usage, struct xdr_netobj *cksumout)
150{
151	struct crypto_ahash *tfm;
152	struct ahash_request *req;
153	struct scatterlist              sg[1];
154	int err = -1;
155	u8 *checksumdata;
156	unsigned int checksumlen;
157
158	if (cksumout->len < kctx->gk5e->cksumlength) {
159		dprintk("%s: checksum buffer length, %u, too small for %s\n",
160			__func__, cksumout->len, kctx->gk5e->name);
161		return GSS_S_FAILURE;
162	}
163
164	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
165	if (checksumdata == NULL)
166		return GSS_S_FAILURE;
167
168	tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
169	if (IS_ERR(tfm))
170		goto out_free_cksum;
171
172	req = ahash_request_alloc(tfm, GFP_KERNEL);
173	if (!req)
174		goto out_free_ahash;
175
176	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
177
178	checksumlen = crypto_ahash_digestsize(tfm);
179
180	if (cksumkey != NULL) {
181		err = crypto_ahash_setkey(tfm, cksumkey,
182					  kctx->gk5e->keylength);
183		if (err)
184			goto out;
185	}
186
187	err = crypto_ahash_init(req);
188	if (err)
189		goto out;
190	sg_init_one(sg, header, hdrlen);
191	ahash_request_set_crypt(req, sg, NULL, hdrlen);
192	err = crypto_ahash_update(req);
193	if (err)
194		goto out;
195	err = xdr_process_buf(body, body_offset, body->len - body_offset,
196			      checksummer, req);
197	if (err)
198		goto out;
199	ahash_request_set_crypt(req, NULL, checksumdata, 0);
200	err = crypto_ahash_final(req);
201	if (err)
202		goto out;
203
204	switch (kctx->gk5e->ctype) {
205	case CKSUMTYPE_RSA_MD5:
206		err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
207					  checksumdata, checksumlen);
208		if (err)
209			goto out;
210		memcpy(cksumout->data,
211		       checksumdata + checksumlen - kctx->gk5e->cksumlength,
212		       kctx->gk5e->cksumlength);
213		break;
214	case CKSUMTYPE_HMAC_SHA1_DES3:
215		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
216		break;
217	default:
218		BUG();
219		break;
220	}
221	cksumout->len = kctx->gk5e->cksumlength;
222out:
223	ahash_request_free(req);
224out_free_ahash:
225	crypto_free_ahash(tfm);
226out_free_cksum:
227	kfree(checksumdata);
228	return err ? GSS_S_FAILURE : 0;
229}
230
231/*
232 * checksum the plaintext data and hdrlen bytes of the token header
233 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
234 * body then over the first 16 octets of the MIC token
235 * Inclusion of the header data in the calculation of the
236 * checksum is optional.
237 */
238u32
239make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
240		 struct xdr_buf *body, int body_offset, u8 *cksumkey,
241		 unsigned int usage, struct xdr_netobj *cksumout)
242{
243	struct crypto_ahash *tfm;
244	struct ahash_request *req;
245	struct scatterlist sg[1];
246	int err = -1;
247	u8 *checksumdata;
248
249	if (kctx->gk5e->keyed_cksum == 0) {
250		dprintk("%s: expected keyed hash for %s\n",
251			__func__, kctx->gk5e->name);
252		return GSS_S_FAILURE;
253	}
254	if (cksumkey == NULL) {
255		dprintk("%s: no key supplied for %s\n",
256			__func__, kctx->gk5e->name);
257		return GSS_S_FAILURE;
258	}
259
260	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
261	if (!checksumdata)
262		return GSS_S_FAILURE;
263
264	tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
265	if (IS_ERR(tfm))
266		goto out_free_cksum;
267
268	req = ahash_request_alloc(tfm, GFP_KERNEL);
269	if (!req)
270		goto out_free_ahash;
271
272	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
273
274	err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
275	if (err)
276		goto out;
277
278	err = crypto_ahash_init(req);
279	if (err)
280		goto out;
281	err = xdr_process_buf(body, body_offset, body->len - body_offset,
282			      checksummer, req);
283	if (err)
284		goto out;
285	if (header != NULL) {
286		sg_init_one(sg, header, hdrlen);
287		ahash_request_set_crypt(req, sg, NULL, hdrlen);
288		err = crypto_ahash_update(req);
289		if (err)
290			goto out;
291	}
292	ahash_request_set_crypt(req, NULL, checksumdata, 0);
293	err = crypto_ahash_final(req);
294	if (err)
295		goto out;
296
297	cksumout->len = kctx->gk5e->cksumlength;
298
299	switch (kctx->gk5e->ctype) {
300	case CKSUMTYPE_HMAC_SHA1_96_AES128:
301	case CKSUMTYPE_HMAC_SHA1_96_AES256:
302		/* note that this truncates the hash */
303		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
304		break;
305	default:
306		BUG();
307		break;
308	}
309out:
310	ahash_request_free(req);
311out_free_ahash:
312	crypto_free_ahash(tfm);
313out_free_cksum:
314	kfree(checksumdata);
315	return err ? GSS_S_FAILURE : 0;
316}
317
318struct encryptor_desc {
319	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
320	struct skcipher_request *req;
321	int pos;
322	struct xdr_buf *outbuf;
323	struct page **pages;
324	struct scatterlist infrags[4];
325	struct scatterlist outfrags[4];
326	int fragno;
327	int fraglen;
328};
329
330static int
331encryptor(struct scatterlist *sg, void *data)
332{
333	struct encryptor_desc *desc = data;
334	struct xdr_buf *outbuf = desc->outbuf;
335	struct crypto_sync_skcipher *tfm =
336		crypto_sync_skcipher_reqtfm(desc->req);
337	struct page *in_page;
338	int thislen = desc->fraglen + sg->length;
339	int fraglen, ret;
340	int page_pos;
341
342	/* Worst case is 4 fragments: head, end of page 1, start
343	 * of page 2, tail.  Anything more is a bug. */
344	BUG_ON(desc->fragno > 3);
345
346	page_pos = desc->pos - outbuf->head[0].iov_len;
347	if (page_pos >= 0 && page_pos < outbuf->page_len) {
348		/* pages are not in place: */
349		int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
350		in_page = desc->pages[i];
351	} else {
352		in_page = sg_page(sg);
353	}
354	sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
355		    sg->offset);
356	sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
357		    sg->offset);
358	desc->fragno++;
359	desc->fraglen += sg->length;
360	desc->pos += sg->length;
361
362	fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
363	thislen -= fraglen;
364
365	if (thislen == 0)
366		return 0;
367
368	sg_mark_end(&desc->infrags[desc->fragno - 1]);
369	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
370
371	skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
372				   thislen, desc->iv);
373
374	ret = crypto_skcipher_encrypt(desc->req);
375	if (ret)
376		return ret;
377
378	sg_init_table(desc->infrags, 4);
379	sg_init_table(desc->outfrags, 4);
380
381	if (fraglen) {
382		sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
383				sg->offset + sg->length - fraglen);
384		desc->infrags[0] = desc->outfrags[0];
385		sg_assign_page(&desc->infrags[0], in_page);
386		desc->fragno = 1;
387		desc->fraglen = fraglen;
388	} else {
389		desc->fragno = 0;
390		desc->fraglen = 0;
391	}
392	return 0;
393}
394
395int
396gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
397		    int offset, struct page **pages)
398{
399	int ret;
400	struct encryptor_desc desc;
401	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
402
403	BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
404
405	skcipher_request_set_sync_tfm(req, tfm);
406	skcipher_request_set_callback(req, 0, NULL, NULL);
407
408	memset(desc.iv, 0, sizeof(desc.iv));
409	desc.req = req;
410	desc.pos = offset;
411	desc.outbuf = buf;
412	desc.pages = pages;
413	desc.fragno = 0;
414	desc.fraglen = 0;
415
416	sg_init_table(desc.infrags, 4);
417	sg_init_table(desc.outfrags, 4);
418
419	ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
420	skcipher_request_zero(req);
421	return ret;
422}
423
424struct decryptor_desc {
425	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
426	struct skcipher_request *req;
427	struct scatterlist frags[4];
428	int fragno;
429	int fraglen;
430};
431
432static int
433decryptor(struct scatterlist *sg, void *data)
434{
435	struct decryptor_desc *desc = data;
436	int thislen = desc->fraglen + sg->length;
437	struct crypto_sync_skcipher *tfm =
438		crypto_sync_skcipher_reqtfm(desc->req);
439	int fraglen, ret;
440
441	/* Worst case is 4 fragments: head, end of page 1, start
442	 * of page 2, tail.  Anything more is a bug. */
443	BUG_ON(desc->fragno > 3);
444	sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
445		    sg->offset);
446	desc->fragno++;
447	desc->fraglen += sg->length;
448
449	fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
450	thislen -= fraglen;
451
452	if (thislen == 0)
453		return 0;
454
455	sg_mark_end(&desc->frags[desc->fragno - 1]);
456
457	skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
458				   thislen, desc->iv);
459
460	ret = crypto_skcipher_decrypt(desc->req);
461	if (ret)
462		return ret;
463
464	sg_init_table(desc->frags, 4);
465
466	if (fraglen) {
467		sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
468				sg->offset + sg->length - fraglen);
469		desc->fragno = 1;
470		desc->fraglen = fraglen;
471	} else {
472		desc->fragno = 0;
473		desc->fraglen = 0;
474	}
475	return 0;
476}
477
478int
479gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
480		    int offset)
481{
482	int ret;
483	struct decryptor_desc desc;
484	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
485
486	/* XXXJBF: */
487	BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
488
489	skcipher_request_set_sync_tfm(req, tfm);
490	skcipher_request_set_callback(req, 0, NULL, NULL);
491
492	memset(desc.iv, 0, sizeof(desc.iv));
493	desc.req = req;
494	desc.fragno = 0;
495	desc.fraglen = 0;
496
497	sg_init_table(desc.frags, 4);
498
499	ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
500	skcipher_request_zero(req);
501	return ret;
502}
503
504/*
505 * This function makes the assumption that it was ultimately called
506 * from gss_wrap().
507 *
508 * The client auth_gss code moves any existing tail data into a
509 * separate page before calling gss_wrap.
510 * The server svcauth_gss code ensures that both the head and the
511 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
512 *
513 * Even with that guarantee, this function may be called more than
514 * once in the processing of gss_wrap().  The best we can do is
515 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
516 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
517 * At run-time we can verify that a single invocation of this
518 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
519 */
520
521int
522xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
523{
524	u8 *p;
525
526	if (shiftlen == 0)
527		return 0;
528
529	BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
530	BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
531
532	p = buf->head[0].iov_base + base;
533
534	memmove(p + shiftlen, p, buf->head[0].iov_len - base);
535
536	buf->head[0].iov_len += shiftlen;
537	buf->len += shiftlen;
538
539	return 0;
540}
541
542static u32
543gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
544		   u32 offset, u8 *iv, struct page **pages, int encrypt)
545{
546	u32 ret;
547	struct scatterlist sg[1];
548	SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
549	u8 *data;
550	struct page **save_pages;
551	u32 len = buf->len - offset;
552
553	if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
554		WARN_ON(0);
555		return -ENOMEM;
556	}
557	data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_KERNEL);
558	if (!data)
559		return -ENOMEM;
560
561	/*
562	 * For encryption, we want to read from the cleartext
563	 * page cache pages, and write the encrypted data to
564	 * the supplied xdr_buf pages.
565	 */
566	save_pages = buf->pages;
567	if (encrypt)
568		buf->pages = pages;
569
570	ret = read_bytes_from_xdr_buf(buf, offset, data, len);
571	buf->pages = save_pages;
572	if (ret)
573		goto out;
574
575	sg_init_one(sg, data, len);
576
577	skcipher_request_set_sync_tfm(req, cipher);
578	skcipher_request_set_callback(req, 0, NULL, NULL);
579	skcipher_request_set_crypt(req, sg, sg, len, iv);
580
581	if (encrypt)
582		ret = crypto_skcipher_encrypt(req);
583	else
584		ret = crypto_skcipher_decrypt(req);
585
586	skcipher_request_zero(req);
587
588	if (ret)
589		goto out;
590
591	ret = write_bytes_to_xdr_buf(buf, offset, data, len);
592
593out:
594	kfree(data);
595	return ret;
596}
597
598u32
599gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
600		     struct xdr_buf *buf, struct page **pages)
601{
602	u32 err;
603	struct xdr_netobj hmac;
604	u8 *cksumkey;
605	u8 *ecptr;
606	struct crypto_sync_skcipher *cipher, *aux_cipher;
607	int blocksize;
608	struct page **save_pages;
609	int nblocks, nbytes;
610	struct encryptor_desc desc;
611	u32 cbcbytes;
612	unsigned int usage;
613
614	if (kctx->initiate) {
615		cipher = kctx->initiator_enc;
616		aux_cipher = kctx->initiator_enc_aux;
617		cksumkey = kctx->initiator_integ;
618		usage = KG_USAGE_INITIATOR_SEAL;
619	} else {
620		cipher = kctx->acceptor_enc;
621		aux_cipher = kctx->acceptor_enc_aux;
622		cksumkey = kctx->acceptor_integ;
623		usage = KG_USAGE_ACCEPTOR_SEAL;
624	}
625	blocksize = crypto_sync_skcipher_blocksize(cipher);
626
627	/* hide the gss token header and insert the confounder */
628	offset += GSS_KRB5_TOK_HDR_LEN;
629	if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
630		return GSS_S_FAILURE;
631	gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
632	offset -= GSS_KRB5_TOK_HDR_LEN;
633
634	if (buf->tail[0].iov_base != NULL) {
635		ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
636	} else {
637		buf->tail[0].iov_base = buf->head[0].iov_base
638							+ buf->head[0].iov_len;
639		buf->tail[0].iov_len = 0;
640		ecptr = buf->tail[0].iov_base;
641	}
642
643	/* copy plaintext gss token header after filler (if any) */
644	memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
645	buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
646	buf->len += GSS_KRB5_TOK_HDR_LEN;
647
648	/* Do the HMAC */
649	hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
650	hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
651
652	/*
653	 * When we are called, pages points to the real page cache
654	 * data -- which we can't go and encrypt!  buf->pages points
655	 * to scratch pages which we are going to send off to the
656	 * client/server.  Swap in the plaintext pages to calculate
657	 * the hmac.
658	 */
659	save_pages = buf->pages;
660	buf->pages = pages;
661
662	err = make_checksum_v2(kctx, NULL, 0, buf,
663			       offset + GSS_KRB5_TOK_HDR_LEN,
664			       cksumkey, usage, &hmac);
665	buf->pages = save_pages;
666	if (err)
667		return GSS_S_FAILURE;
668
669	nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
670	nblocks = (nbytes + blocksize - 1) / blocksize;
671	cbcbytes = 0;
672	if (nblocks > 2)
673		cbcbytes = (nblocks - 2) * blocksize;
674
675	memset(desc.iv, 0, sizeof(desc.iv));
676
677	if (cbcbytes) {
678		SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
679
680		desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
681		desc.fragno = 0;
682		desc.fraglen = 0;
683		desc.pages = pages;
684		desc.outbuf = buf;
685		desc.req = req;
686
687		skcipher_request_set_sync_tfm(req, aux_cipher);
688		skcipher_request_set_callback(req, 0, NULL, NULL);
689
690		sg_init_table(desc.infrags, 4);
691		sg_init_table(desc.outfrags, 4);
692
693		err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
694				      cbcbytes, encryptor, &desc);
695		skcipher_request_zero(req);
696		if (err)
697			goto out_err;
698	}
699
700	/* Make sure IV carries forward from any CBC results. */
701	err = gss_krb5_cts_crypt(cipher, buf,
702				 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
703				 desc.iv, pages, 1);
704	if (err) {
705		err = GSS_S_FAILURE;
706		goto out_err;
707	}
708
709	/* Now update buf to account for HMAC */
710	buf->tail[0].iov_len += kctx->gk5e->cksumlength;
711	buf->len += kctx->gk5e->cksumlength;
712
713out_err:
714	if (err)
715		err = GSS_S_FAILURE;
716	return err;
717}
718
719u32
720gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
721		     struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
722{
723	struct xdr_buf subbuf;
724	u32 ret = 0;
725	u8 *cksum_key;
726	struct crypto_sync_skcipher *cipher, *aux_cipher;
727	struct xdr_netobj our_hmac_obj;
728	u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
729	u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
730	int nblocks, blocksize, cbcbytes;
731	struct decryptor_desc desc;
732	unsigned int usage;
733
734	if (kctx->initiate) {
735		cipher = kctx->acceptor_enc;
736		aux_cipher = kctx->acceptor_enc_aux;
737		cksum_key = kctx->acceptor_integ;
738		usage = KG_USAGE_ACCEPTOR_SEAL;
739	} else {
740		cipher = kctx->initiator_enc;
741		aux_cipher = kctx->initiator_enc_aux;
742		cksum_key = kctx->initiator_integ;
743		usage = KG_USAGE_INITIATOR_SEAL;
744	}
745	blocksize = crypto_sync_skcipher_blocksize(cipher);
746
747
748	/* create a segment skipping the header and leaving out the checksum */
749	xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
750				    (len - offset - GSS_KRB5_TOK_HDR_LEN -
751				     kctx->gk5e->cksumlength));
752
753	nblocks = (subbuf.len + blocksize - 1) / blocksize;
754
755	cbcbytes = 0;
756	if (nblocks > 2)
757		cbcbytes = (nblocks - 2) * blocksize;
758
759	memset(desc.iv, 0, sizeof(desc.iv));
760
761	if (cbcbytes) {
762		SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
763
764		desc.fragno = 0;
765		desc.fraglen = 0;
766		desc.req = req;
767
768		skcipher_request_set_sync_tfm(req, aux_cipher);
769		skcipher_request_set_callback(req, 0, NULL, NULL);
770
771		sg_init_table(desc.frags, 4);
772
773		ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
774		skcipher_request_zero(req);
775		if (ret)
776			goto out_err;
777	}
778
779	/* Make sure IV carries forward from any CBC results. */
780	ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
781	if (ret)
782		goto out_err;
783
784
785	/* Calculate our hmac over the plaintext data */
786	our_hmac_obj.len = sizeof(our_hmac);
787	our_hmac_obj.data = our_hmac;
788
789	ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
790			       cksum_key, usage, &our_hmac_obj);
791	if (ret)
792		goto out_err;
793
794	/* Get the packet's hmac value */
795	ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
796				      pkt_hmac, kctx->gk5e->cksumlength);
797	if (ret)
798		goto out_err;
799
800	if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
801		ret = GSS_S_BAD_SIG;
802		goto out_err;
803	}
804	*headskip = kctx->gk5e->conflen;
805	*tailskip = kctx->gk5e->cksumlength;
806out_err:
807	if (ret && ret != GSS_S_BAD_SIG)
808		ret = GSS_S_FAILURE;
809	return ret;
810}