Linux Audio

Check our new training course

Loading...
  1/*
  2 *  linux/net/sunrpc/gss_krb5_crypto.c
  3 *
  4 *  Copyright (c) 2000-2008 The Regents of the University of Michigan.
  5 *  All rights reserved.
  6 *
  7 *  Andy Adamson   <andros@umich.edu>
  8 *  Bruce Fields   <bfields@umich.edu>
  9 */
 10
 11/*
 12 * Copyright (C) 1998 by the FundsXpress, INC.
 13 *
 14 * All rights reserved.
 15 *
 16 * Export of this software from the United States of America may require
 17 * a specific license from the United States Government.  It is the
 18 * responsibility of any person or organization contemplating export to
 19 * obtain such a license before exporting.
 20 *
 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
 22 * distribute this software and its documentation for any purpose and
 23 * without fee is hereby granted, provided that the above copyright
 24 * notice appear in all copies and that both that copyright notice and
 25 * this permission notice appear in supporting documentation, and that
 26 * the name of FundsXpress. not be used in advertising or publicity pertaining
 27 * to distribution of the software without specific, written prior
 28 * permission.  FundsXpress makes no representations about the suitability of
 29 * this software for any purpose.  It is provided "as is" without express
 30 * or implied warranty.
 31 *
 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
 35 */
 36
 37#include <linux/err.h>
 38#include <linux/types.h>
 39#include <linux/mm.h>
 40#include <linux/scatterlist.h>
 41#include <linux/crypto.h>
 42#include <linux/highmem.h>
 43#include <linux/pagemap.h>
 44#include <linux/random.h>
 45#include <linux/sunrpc/gss_krb5.h>
 46#include <linux/sunrpc/xdr.h>
 47
 48#ifdef RPC_DEBUG
 49# define RPCDBG_FACILITY        RPCDBG_AUTH
 50#endif
 51
 52u32
 53krb5_encrypt(
 54	struct crypto_blkcipher *tfm,
 55	void * iv,
 56	void * in,
 57	void * out,
 58	int length)
 59{
 60	u32 ret = -EINVAL;
 61	struct scatterlist sg[1];
 62	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
 63	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 64
 65	if (length % crypto_blkcipher_blocksize(tfm) != 0)
 66		goto out;
 67
 68	if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
 69		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
 70			crypto_blkcipher_ivsize(tfm));
 71		goto out;
 72	}
 73
 74	if (iv)
 75		memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
 76
 77	memcpy(out, in, length);
 78	sg_init_one(sg, out, length);
 79
 80	ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
 81out:
 82	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
 83	return ret;
 84}
 85
 86u32
 87krb5_decrypt(
 88     struct crypto_blkcipher *tfm,
 89     void * iv,
 90     void * in,
 91     void * out,
 92     int length)
 93{
 94	u32 ret = -EINVAL;
 95	struct scatterlist sg[1];
 96	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
 97	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 98
 99	if (length % crypto_blkcipher_blocksize(tfm) != 0)
100		goto out;
101
102	if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
103		dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
104			crypto_blkcipher_ivsize(tfm));
105		goto out;
106	}
107	if (iv)
108		memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
109
110	memcpy(out, in, length);
111	sg_init_one(sg, out, length);
112
113	ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
114out:
115	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
116	return ret;
117}
118
119static int
120checksummer(struct scatterlist *sg, void *data)
121{
122	struct hash_desc *desc = data;
123
124	return crypto_hash_update(desc, sg, sg->length);
125}
126
127static int
128arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
129{
130	unsigned int ms_usage;
131
132	switch (usage) {
133	case KG_USAGE_SIGN:
134		ms_usage = 15;
135		break;
136	case KG_USAGE_SEAL:
137		ms_usage = 13;
138		break;
139	default:
140		return -EINVAL;
141	}
142	salt[0] = (ms_usage >> 0) & 0xff;
143	salt[1] = (ms_usage >> 8) & 0xff;
144	salt[2] = (ms_usage >> 16) & 0xff;
145	salt[3] = (ms_usage >> 24) & 0xff;
146
147	return 0;
148}
149
150static u32
151make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
152		       struct xdr_buf *body, int body_offset, u8 *cksumkey,
153		       unsigned int usage, struct xdr_netobj *cksumout)
154{
155	struct hash_desc                desc;
156	struct scatterlist              sg[1];
157	int err;
158	u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
159	u8 rc4salt[4];
160	struct crypto_hash *md5;
161	struct crypto_hash *hmac_md5;
162
163	if (cksumkey == NULL)
164		return GSS_S_FAILURE;
165
166	if (cksumout->len < kctx->gk5e->cksumlength) {
167		dprintk("%s: checksum buffer length, %u, too small for %s\n",
168			__func__, cksumout->len, kctx->gk5e->name);
169		return GSS_S_FAILURE;
170	}
171
172	if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
173		dprintk("%s: invalid usage value %u\n", __func__, usage);
174		return GSS_S_FAILURE;
175	}
176
177	md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
178	if (IS_ERR(md5))
179		return GSS_S_FAILURE;
180
181	hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
182				     CRYPTO_ALG_ASYNC);
183	if (IS_ERR(hmac_md5)) {
184		crypto_free_hash(md5);
185		return GSS_S_FAILURE;
186	}
187
188	desc.tfm = md5;
189	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
190
191	err = crypto_hash_init(&desc);
192	if (err)
193		goto out;
194	sg_init_one(sg, rc4salt, 4);
195	err = crypto_hash_update(&desc, sg, 4);
196	if (err)
197		goto out;
198
199	sg_init_one(sg, header, hdrlen);
200	err = crypto_hash_update(&desc, sg, hdrlen);
201	if (err)
202		goto out;
203	err = xdr_process_buf(body, body_offset, body->len - body_offset,
204			      checksummer, &desc);
205	if (err)
206		goto out;
207	err = crypto_hash_final(&desc, checksumdata);
208	if (err)
209		goto out;
210
211	desc.tfm = hmac_md5;
212	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
213
214	err = crypto_hash_init(&desc);
215	if (err)
216		goto out;
217	err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
218	if (err)
219		goto out;
220
221	sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
222	err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
223				 checksumdata);
224	if (err)
225		goto out;
226
227	memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
228	cksumout->len = kctx->gk5e->cksumlength;
229out:
230	crypto_free_hash(md5);
231	crypto_free_hash(hmac_md5);
232	return err ? GSS_S_FAILURE : 0;
233}
234
235/*
236 * checksum the plaintext data and hdrlen bytes of the token header
237 * The checksum is performed over the first 8 bytes of the
238 * gss token header and then over the data body
239 */
240u32
241make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
242	      struct xdr_buf *body, int body_offset, u8 *cksumkey,
243	      unsigned int usage, struct xdr_netobj *cksumout)
244{
245	struct hash_desc                desc;
246	struct scatterlist              sg[1];
247	int err;
248	u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
249	unsigned int checksumlen;
250
251	if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
252		return make_checksum_hmac_md5(kctx, header, hdrlen,
253					      body, body_offset,
254					      cksumkey, usage, cksumout);
255
256	if (cksumout->len < kctx->gk5e->cksumlength) {
257		dprintk("%s: checksum buffer length, %u, too small for %s\n",
258			__func__, cksumout->len, kctx->gk5e->name);
259		return GSS_S_FAILURE;
260	}
261
262	desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
263	if (IS_ERR(desc.tfm))
264		return GSS_S_FAILURE;
265	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
266
267	checksumlen = crypto_hash_digestsize(desc.tfm);
268
269	if (cksumkey != NULL) {
270		err = crypto_hash_setkey(desc.tfm, cksumkey,
271					 kctx->gk5e->keylength);
272		if (err)
273			goto out;
274	}
275
276	err = crypto_hash_init(&desc);
277	if (err)
278		goto out;
279	sg_init_one(sg, header, hdrlen);
280	err = crypto_hash_update(&desc, sg, hdrlen);
281	if (err)
282		goto out;
283	err = xdr_process_buf(body, body_offset, body->len - body_offset,
284			      checksummer, &desc);
285	if (err)
286		goto out;
287	err = crypto_hash_final(&desc, checksumdata);
288	if (err)
289		goto out;
290
291	switch (kctx->gk5e->ctype) {
292	case CKSUMTYPE_RSA_MD5:
293		err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
294					  checksumdata, checksumlen);
295		if (err)
296			goto out;
297		memcpy(cksumout->data,
298		       checksumdata + checksumlen - kctx->gk5e->cksumlength,
299		       kctx->gk5e->cksumlength);
300		break;
301	case CKSUMTYPE_HMAC_SHA1_DES3:
302		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
303		break;
304	default:
305		BUG();
306		break;
307	}
308	cksumout->len = kctx->gk5e->cksumlength;
309out:
310	crypto_free_hash(desc.tfm);
311	return err ? GSS_S_FAILURE : 0;
312}
313
314/*
315 * checksum the plaintext data and hdrlen bytes of the token header
316 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
317 * body then over the first 16 octets of the MIC token
318 * Inclusion of the header data in the calculation of the
319 * checksum is optional.
320 */
321u32
322make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
323		 struct xdr_buf *body, int body_offset, u8 *cksumkey,
324		 unsigned int usage, struct xdr_netobj *cksumout)
325{
326	struct hash_desc desc;
327	struct scatterlist sg[1];
328	int err;
329	u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
330	unsigned int checksumlen;
331
332	if (kctx->gk5e->keyed_cksum == 0) {
333		dprintk("%s: expected keyed hash for %s\n",
334			__func__, kctx->gk5e->name);
335		return GSS_S_FAILURE;
336	}
337	if (cksumkey == NULL) {
338		dprintk("%s: no key supplied for %s\n",
339			__func__, kctx->gk5e->name);
340		return GSS_S_FAILURE;
341	}
342
343	desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
344							CRYPTO_ALG_ASYNC);
345	if (IS_ERR(desc.tfm))
346		return GSS_S_FAILURE;
347	checksumlen = crypto_hash_digestsize(desc.tfm);
348	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
349
350	err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
351	if (err)
352		goto out;
353
354	err = crypto_hash_init(&desc);
355	if (err)
356		goto out;
357	err = xdr_process_buf(body, body_offset, body->len - body_offset,
358			      checksummer, &desc);
359	if (err)
360		goto out;
361	if (header != NULL) {
362		sg_init_one(sg, header, hdrlen);
363		err = crypto_hash_update(&desc, sg, hdrlen);
364		if (err)
365			goto out;
366	}
367	err = crypto_hash_final(&desc, checksumdata);
368	if (err)
369		goto out;
370
371	cksumout->len = kctx->gk5e->cksumlength;
372
373	switch (kctx->gk5e->ctype) {
374	case CKSUMTYPE_HMAC_SHA1_96_AES128:
375	case CKSUMTYPE_HMAC_SHA1_96_AES256:
376		/* note that this truncates the hash */
377		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
378		break;
379	default:
380		BUG();
381		break;
382	}
383out:
384	crypto_free_hash(desc.tfm);
385	return err ? GSS_S_FAILURE : 0;
386}
387
388struct encryptor_desc {
389	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
390	struct blkcipher_desc desc;
391	int pos;
392	struct xdr_buf *outbuf;
393	struct page **pages;
394	struct scatterlist infrags[4];
395	struct scatterlist outfrags[4];
396	int fragno;
397	int fraglen;
398};
399
400static int
401encryptor(struct scatterlist *sg, void *data)
402{
403	struct encryptor_desc *desc = data;
404	struct xdr_buf *outbuf = desc->outbuf;
405	struct page *in_page;
406	int thislen = desc->fraglen + sg->length;
407	int fraglen, ret;
408	int page_pos;
409
410	/* Worst case is 4 fragments: head, end of page 1, start
411	 * of page 2, tail.  Anything more is a bug. */
412	BUG_ON(desc->fragno > 3);
413
414	page_pos = desc->pos - outbuf->head[0].iov_len;
415	if (page_pos >= 0 && page_pos < outbuf->page_len) {
416		/* pages are not in place: */
417		int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
418		in_page = desc->pages[i];
419	} else {
420		in_page = sg_page(sg);
421	}
422	sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
423		    sg->offset);
424	sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
425		    sg->offset);
426	desc->fragno++;
427	desc->fraglen += sg->length;
428	desc->pos += sg->length;
429
430	fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
431	thislen -= fraglen;
432
433	if (thislen == 0)
434		return 0;
435
436	sg_mark_end(&desc->infrags[desc->fragno - 1]);
437	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
438
439	ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
440					  desc->infrags, thislen);
441	if (ret)
442		return ret;
443
444	sg_init_table(desc->infrags, 4);
445	sg_init_table(desc->outfrags, 4);
446
447	if (fraglen) {
448		sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
449				sg->offset + sg->length - fraglen);
450		desc->infrags[0] = desc->outfrags[0];
451		sg_assign_page(&desc->infrags[0], in_page);
452		desc->fragno = 1;
453		desc->fraglen = fraglen;
454	} else {
455		desc->fragno = 0;
456		desc->fraglen = 0;
457	}
458	return 0;
459}
460
461int
462gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
463		    int offset, struct page **pages)
464{
465	int ret;
466	struct encryptor_desc desc;
467
468	BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
469
470	memset(desc.iv, 0, sizeof(desc.iv));
471	desc.desc.tfm = tfm;
472	desc.desc.info = desc.iv;
473	desc.desc.flags = 0;
474	desc.pos = offset;
475	desc.outbuf = buf;
476	desc.pages = pages;
477	desc.fragno = 0;
478	desc.fraglen = 0;
479
480	sg_init_table(desc.infrags, 4);
481	sg_init_table(desc.outfrags, 4);
482
483	ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
484	return ret;
485}
486
487struct decryptor_desc {
488	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
489	struct blkcipher_desc desc;
490	struct scatterlist frags[4];
491	int fragno;
492	int fraglen;
493};
494
495static int
496decryptor(struct scatterlist *sg, void *data)
497{
498	struct decryptor_desc *desc = data;
499	int thislen = desc->fraglen + sg->length;
500	int fraglen, ret;
501
502	/* Worst case is 4 fragments: head, end of page 1, start
503	 * of page 2, tail.  Anything more is a bug. */
504	BUG_ON(desc->fragno > 3);
505	sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
506		    sg->offset);
507	desc->fragno++;
508	desc->fraglen += sg->length;
509
510	fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
511	thislen -= fraglen;
512
513	if (thislen == 0)
514		return 0;
515
516	sg_mark_end(&desc->frags[desc->fragno - 1]);
517
518	ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
519					  desc->frags, thislen);
520	if (ret)
521		return ret;
522
523	sg_init_table(desc->frags, 4);
524
525	if (fraglen) {
526		sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
527				sg->offset + sg->length - fraglen);
528		desc->fragno = 1;
529		desc->fraglen = fraglen;
530	} else {
531		desc->fragno = 0;
532		desc->fraglen = 0;
533	}
534	return 0;
535}
536
537int
538gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
539		    int offset)
540{
541	struct decryptor_desc desc;
542
543	/* XXXJBF: */
544	BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
545
546	memset(desc.iv, 0, sizeof(desc.iv));
547	desc.desc.tfm = tfm;
548	desc.desc.info = desc.iv;
549	desc.desc.flags = 0;
550	desc.fragno = 0;
551	desc.fraglen = 0;
552
553	sg_init_table(desc.frags, 4);
554
555	return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
556}
557
558/*
559 * This function makes the assumption that it was ultimately called
560 * from gss_wrap().
561 *
562 * The client auth_gss code moves any existing tail data into a
563 * separate page before calling gss_wrap.
564 * The server svcauth_gss code ensures that both the head and the
565 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
566 *
567 * Even with that guarantee, this function may be called more than
568 * once in the processing of gss_wrap().  The best we can do is
569 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
570 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
571 * At run-time we can verify that a single invocation of this
572 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
573 */
574
575int
576xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
577{
578	u8 *p;
579
580	if (shiftlen == 0)
581		return 0;
582
583	BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
584	BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
585
586	p = buf->head[0].iov_base + base;
587
588	memmove(p + shiftlen, p, buf->head[0].iov_len - base);
589
590	buf->head[0].iov_len += shiftlen;
591	buf->len += shiftlen;
592
593	return 0;
594}
595
596static u32
597gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
598		   u32 offset, u8 *iv, struct page **pages, int encrypt)
599{
600	u32 ret;
601	struct scatterlist sg[1];
602	struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
603	u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
604	struct page **save_pages;
605	u32 len = buf->len - offset;
606
607	if (len > ARRAY_SIZE(data)) {
608		WARN_ON(0);
609		return -ENOMEM;
610	}
611
612	/*
613	 * For encryption, we want to read from the cleartext
614	 * page cache pages, and write the encrypted data to
615	 * the supplied xdr_buf pages.
616	 */
617	save_pages = buf->pages;
618	if (encrypt)
619		buf->pages = pages;
620
621	ret = read_bytes_from_xdr_buf(buf, offset, data, len);
622	buf->pages = save_pages;
623	if (ret)
624		goto out;
625
626	sg_init_one(sg, data, len);
627
628	if (encrypt)
629		ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
630	else
631		ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
632
633	if (ret)
634		goto out;
635
636	ret = write_bytes_to_xdr_buf(buf, offset, data, len);
637
638out:
639	return ret;
640}
641
642u32
643gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
644		     struct xdr_buf *buf, int ec, struct page **pages)
645{
646	u32 err;
647	struct xdr_netobj hmac;
648	u8 *cksumkey;
649	u8 *ecptr;
650	struct crypto_blkcipher *cipher, *aux_cipher;
651	int blocksize;
652	struct page **save_pages;
653	int nblocks, nbytes;
654	struct encryptor_desc desc;
655	u32 cbcbytes;
656	unsigned int usage;
657
658	if (kctx->initiate) {
659		cipher = kctx->initiator_enc;
660		aux_cipher = kctx->initiator_enc_aux;
661		cksumkey = kctx->initiator_integ;
662		usage = KG_USAGE_INITIATOR_SEAL;
663	} else {
664		cipher = kctx->acceptor_enc;
665		aux_cipher = kctx->acceptor_enc_aux;
666		cksumkey = kctx->acceptor_integ;
667		usage = KG_USAGE_ACCEPTOR_SEAL;
668	}
669	blocksize = crypto_blkcipher_blocksize(cipher);
670
671	/* hide the gss token header and insert the confounder */
672	offset += GSS_KRB5_TOK_HDR_LEN;
673	if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
674		return GSS_S_FAILURE;
675	gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
676	offset -= GSS_KRB5_TOK_HDR_LEN;
677
678	if (buf->tail[0].iov_base != NULL) {
679		ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
680	} else {
681		buf->tail[0].iov_base = buf->head[0].iov_base
682							+ buf->head[0].iov_len;
683		buf->tail[0].iov_len = 0;
684		ecptr = buf->tail[0].iov_base;
685	}
686
687	memset(ecptr, 'X', ec);
688	buf->tail[0].iov_len += ec;
689	buf->len += ec;
690
691	/* copy plaintext gss token header after filler (if any) */
692	memcpy(ecptr + ec, buf->head[0].iov_base + offset,
693						GSS_KRB5_TOK_HDR_LEN);
694	buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
695	buf->len += GSS_KRB5_TOK_HDR_LEN;
696
697	/* Do the HMAC */
698	hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
699	hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
700
701	/*
702	 * When we are called, pages points to the real page cache
703	 * data -- which we can't go and encrypt!  buf->pages points
704	 * to scratch pages which we are going to send off to the
705	 * client/server.  Swap in the plaintext pages to calculate
706	 * the hmac.
707	 */
708	save_pages = buf->pages;
709	buf->pages = pages;
710
711	err = make_checksum_v2(kctx, NULL, 0, buf,
712			       offset + GSS_KRB5_TOK_HDR_LEN,
713			       cksumkey, usage, &hmac);
714	buf->pages = save_pages;
715	if (err)
716		return GSS_S_FAILURE;
717
718	nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
719	nblocks = (nbytes + blocksize - 1) / blocksize;
720	cbcbytes = 0;
721	if (nblocks > 2)
722		cbcbytes = (nblocks - 2) * blocksize;
723
724	memset(desc.iv, 0, sizeof(desc.iv));
725
726	if (cbcbytes) {
727		desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
728		desc.fragno = 0;
729		desc.fraglen = 0;
730		desc.pages = pages;
731		desc.outbuf = buf;
732		desc.desc.info = desc.iv;
733		desc.desc.flags = 0;
734		desc.desc.tfm = aux_cipher;
735
736		sg_init_table(desc.infrags, 4);
737		sg_init_table(desc.outfrags, 4);
738
739		err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
740				      cbcbytes, encryptor, &desc);
741		if (err)
742			goto out_err;
743	}
744
745	/* Make sure IV carries forward from any CBC results. */
746	err = gss_krb5_cts_crypt(cipher, buf,
747				 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
748				 desc.iv, pages, 1);
749	if (err) {
750		err = GSS_S_FAILURE;
751		goto out_err;
752	}
753
754	/* Now update buf to account for HMAC */
755	buf->tail[0].iov_len += kctx->gk5e->cksumlength;
756	buf->len += kctx->gk5e->cksumlength;
757
758out_err:
759	if (err)
760		err = GSS_S_FAILURE;
761	return err;
762}
763
764u32
765gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
766		     u32 *headskip, u32 *tailskip)
767{
768	struct xdr_buf subbuf;
769	u32 ret = 0;
770	u8 *cksum_key;
771	struct crypto_blkcipher *cipher, *aux_cipher;
772	struct xdr_netobj our_hmac_obj;
773	u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
774	u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
775	int nblocks, blocksize, cbcbytes;
776	struct decryptor_desc desc;
777	unsigned int usage;
778
779	if (kctx->initiate) {
780		cipher = kctx->acceptor_enc;
781		aux_cipher = kctx->acceptor_enc_aux;
782		cksum_key = kctx->acceptor_integ;
783		usage = KG_USAGE_ACCEPTOR_SEAL;
784	} else {
785		cipher = kctx->initiator_enc;
786		aux_cipher = kctx->initiator_enc_aux;
787		cksum_key = kctx->initiator_integ;
788		usage = KG_USAGE_INITIATOR_SEAL;
789	}
790	blocksize = crypto_blkcipher_blocksize(cipher);
791
792
793	/* create a segment skipping the header and leaving out the checksum */
794	xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
795				    (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
796				     kctx->gk5e->cksumlength));
797
798	nblocks = (subbuf.len + blocksize - 1) / blocksize;
799
800	cbcbytes = 0;
801	if (nblocks > 2)
802		cbcbytes = (nblocks - 2) * blocksize;
803
804	memset(desc.iv, 0, sizeof(desc.iv));
805
806	if (cbcbytes) {
807		desc.fragno = 0;
808		desc.fraglen = 0;
809		desc.desc.info = desc.iv;
810		desc.desc.flags = 0;
811		desc.desc.tfm = aux_cipher;
812
813		sg_init_table(desc.frags, 4);
814
815		ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
816		if (ret)
817			goto out_err;
818	}
819
820	/* Make sure IV carries forward from any CBC results. */
821	ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
822	if (ret)
823		goto out_err;
824
825
826	/* Calculate our hmac over the plaintext data */
827	our_hmac_obj.len = sizeof(our_hmac);
828	our_hmac_obj.data = our_hmac;
829
830	ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
831			       cksum_key, usage, &our_hmac_obj);
832	if (ret)
833		goto out_err;
834
835	/* Get the packet's hmac value */
836	ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
837				      pkt_hmac, kctx->gk5e->cksumlength);
838	if (ret)
839		goto out_err;
840
841	if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
842		ret = GSS_S_BAD_SIG;
843		goto out_err;
844	}
845	*headskip = kctx->gk5e->conflen;
846	*tailskip = kctx->gk5e->cksumlength;
847out_err:
848	if (ret && ret != GSS_S_BAD_SIG)
849		ret = GSS_S_FAILURE;
850	return ret;
851}
852
853/*
854 * Compute Kseq given the initial session key and the checksum.
855 * Set the key of the given cipher.
856 */
857int
858krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
859		       unsigned char *cksum)
860{
861	struct crypto_hash *hmac;
862	struct hash_desc desc;
863	struct scatterlist sg[1];
864	u8 Kseq[GSS_KRB5_MAX_KEYLEN];
865	u32 zeroconstant = 0;
866	int err;
867
868	dprintk("%s: entered\n", __func__);
869
870	hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
871	if (IS_ERR(hmac)) {
872		dprintk("%s: error %ld, allocating hash '%s'\n",
873			__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
874		return PTR_ERR(hmac);
875	}
876
877	desc.tfm = hmac;
878	desc.flags = 0;
879
880	err = crypto_hash_init(&desc);
881	if (err)
882		goto out_err;
883
884	/* Compute intermediate Kseq from session key */
885	err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
886	if (err)
887		goto out_err;
888
889	sg_init_table(sg, 1);
890	sg_set_buf(sg, &zeroconstant, 4);
891
892	err = crypto_hash_digest(&desc, sg, 4, Kseq);
893	if (err)
894		goto out_err;
895
896	/* Compute final Kseq from the checksum and intermediate Kseq */
897	err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
898	if (err)
899		goto out_err;
900
901	sg_set_buf(sg, cksum, 8);
902
903	err = crypto_hash_digest(&desc, sg, 8, Kseq);
904	if (err)
905		goto out_err;
906
907	err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
908	if (err)
909		goto out_err;
910
911	err = 0;
912
913out_err:
914	crypto_free_hash(hmac);
915	dprintk("%s: returning %d\n", __func__, err);
916	return err;
917}
918
919/*
920 * Compute Kcrypt given the initial session key and the plaintext seqnum.
921 * Set the key of cipher kctx->enc.
922 */
923int
924krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
925		       s32 seqnum)
926{
927	struct crypto_hash *hmac;
928	struct hash_desc desc;
929	struct scatterlist sg[1];
930	u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
931	u8 zeroconstant[4] = {0};
932	u8 seqnumarray[4];
933	int err, i;
934
935	dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
936
937	hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
938	if (IS_ERR(hmac)) {
939		dprintk("%s: error %ld, allocating hash '%s'\n",
940			__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
941		return PTR_ERR(hmac);
942	}
943
944	desc.tfm = hmac;
945	desc.flags = 0;
946
947	err = crypto_hash_init(&desc);
948	if (err)
949		goto out_err;
950
951	/* Compute intermediate Kcrypt from session key */
952	for (i = 0; i < kctx->gk5e->keylength; i++)
953		Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
954
955	err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
956	if (err)
957		goto out_err;
958
959	sg_init_table(sg, 1);
960	sg_set_buf(sg, zeroconstant, 4);
961
962	err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
963	if (err)
964		goto out_err;
965
966	/* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
967	err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
968	if (err)
969		goto out_err;
970
971	seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
972	seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
973	seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
974	seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
975
976	sg_set_buf(sg, seqnumarray, 4);
977
978	err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
979	if (err)
980		goto out_err;
981
982	err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
983	if (err)
984		goto out_err;
985
986	err = 0;
987
988out_err:
989	crypto_free_hash(hmac);
990	dprintk("%s: returning %d\n", __func__, err);
991	return err;
992}
993