Linux Audio

Check our new training course

Loading...
   1/*
   2 * linux/net/sunrpc/xdr.c
   3 *
   4 * Generic XDR support.
   5 *
   6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/types.h>
  12#include <linux/string.h>
  13#include <linux/kernel.h>
  14#include <linux/pagemap.h>
  15#include <linux/errno.h>
  16#include <linux/sunrpc/xdr.h>
  17#include <linux/sunrpc/msg_prot.h>
  18
  19/*
  20 * XDR functions for basic NFS types
  21 */
  22__be32 *
  23xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  24{
  25	unsigned int	quadlen = XDR_QUADLEN(obj->len);
  26
  27	p[quadlen] = 0;		/* zero trailing bytes */
  28	*p++ = cpu_to_be32(obj->len);
  29	memcpy(p, obj->data, obj->len);
  30	return p + XDR_QUADLEN(obj->len);
  31}
  32EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  33
  34__be32 *
  35xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  36{
  37	unsigned int	len;
  38
  39	if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  40		return NULL;
  41	obj->len  = len;
  42	obj->data = (u8 *) p;
  43	return p + XDR_QUADLEN(len);
  44}
  45EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  46
  47/**
  48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
  49 * @p: pointer to current position in XDR buffer.
  50 * @ptr: pointer to data to encode (or NULL)
  51 * @nbytes: size of data.
  52 *
  53 * Copy the array of data of length nbytes at ptr to the XDR buffer
  54 * at position p, then align to the next 32-bit boundary by padding
  55 * with zero bytes (see RFC1832).
  56 * Note: if ptr is NULL, only the padding is performed.
  57 *
  58 * Returns the updated current XDR buffer position
  59 *
  60 */
  61__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  62{
  63	if (likely(nbytes != 0)) {
  64		unsigned int quadlen = XDR_QUADLEN(nbytes);
  65		unsigned int padding = (quadlen << 2) - nbytes;
  66
  67		if (ptr != NULL)
  68			memcpy(p, ptr, nbytes);
  69		if (padding != 0)
  70			memset((char *)p + nbytes, 0, padding);
  71		p += quadlen;
  72	}
  73	return p;
  74}
  75EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  76
  77/**
  78 * xdr_encode_opaque - Encode variable length opaque data
  79 * @p: pointer to current position in XDR buffer.
  80 * @ptr: pointer to data to encode (or NULL)
  81 * @nbytes: size of data.
  82 *
  83 * Returns the updated current XDR buffer position
  84 */
  85__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  86{
  87	*p++ = cpu_to_be32(nbytes);
  88	return xdr_encode_opaque_fixed(p, ptr, nbytes);
  89}
  90EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  91
  92__be32 *
  93xdr_encode_string(__be32 *p, const char *string)
  94{
  95	return xdr_encode_array(p, string, strlen(string));
  96}
  97EXPORT_SYMBOL_GPL(xdr_encode_string);
  98
  99__be32 *
 100xdr_decode_string_inplace(__be32 *p, char **sp,
 101			  unsigned int *lenp, unsigned int maxlen)
 102{
 103	u32 len;
 104
 105	len = be32_to_cpu(*p++);
 106	if (len > maxlen)
 107		return NULL;
 108	*lenp = len;
 109	*sp = (char *) p;
 110	return p + XDR_QUADLEN(len);
 111}
 112EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
 113
 114/**
 115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
 116 * @buf: XDR buffer where string resides
 117 * @len: length of string, in bytes
 118 *
 119 */
 120void
 121xdr_terminate_string(struct xdr_buf *buf, const u32 len)
 122{
 123	char *kaddr;
 124
 125	kaddr = kmap_atomic(buf->pages[0]);
 126	kaddr[buf->page_base + len] = '\0';
 127	kunmap_atomic(kaddr);
 128}
 129EXPORT_SYMBOL_GPL(xdr_terminate_string);
 130
 131void
 132xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
 133		 unsigned int len)
 134{
 135	struct kvec *tail = xdr->tail;
 136	u32 *p;
 137
 138	xdr->pages = pages;
 139	xdr->page_base = base;
 140	xdr->page_len = len;
 141
 142	p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
 143	tail->iov_base = p;
 144	tail->iov_len = 0;
 145
 146	if (len & 3) {
 147		unsigned int pad = 4 - (len & 3);
 148
 149		*p = 0;
 150		tail->iov_base = (char *)p + (len & 3);
 151		tail->iov_len  = pad;
 152		len += pad;
 153	}
 154	xdr->buflen += len;
 155	xdr->len += len;
 156}
 157EXPORT_SYMBOL_GPL(xdr_encode_pages);
 158
 159void
 160xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
 161		 struct page **pages, unsigned int base, unsigned int len)
 162{
 163	struct kvec *head = xdr->head;
 164	struct kvec *tail = xdr->tail;
 165	char *buf = (char *)head->iov_base;
 166	unsigned int buflen = head->iov_len;
 167
 168	head->iov_len  = offset;
 169
 170	xdr->pages = pages;
 171	xdr->page_base = base;
 172	xdr->page_len = len;
 173
 174	tail->iov_base = buf + offset;
 175	tail->iov_len = buflen - offset;
 176
 177	xdr->buflen += len;
 178}
 179EXPORT_SYMBOL_GPL(xdr_inline_pages);
 180
 181/*
 182 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
 183 *
 184 * _shift_data_right_pages
 185 * @pages: vector of pages containing both the source and dest memory area.
 186 * @pgto_base: page vector address of destination
 187 * @pgfrom_base: page vector address of source
 188 * @len: number of bytes to copy
 189 *
 190 * Note: the addresses pgto_base and pgfrom_base are both calculated in
 191 *       the same way:
 192 *            if a memory area starts at byte 'base' in page 'pages[i]',
 193 *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
 194 * Also note: pgfrom_base must be < pgto_base, but the memory areas
 195 * 	they point to may overlap.
 196 */
 197static void
 198_shift_data_right_pages(struct page **pages, size_t pgto_base,
 199		size_t pgfrom_base, size_t len)
 200{
 201	struct page **pgfrom, **pgto;
 202	char *vfrom, *vto;
 203	size_t copy;
 204
 205	BUG_ON(pgto_base <= pgfrom_base);
 206
 207	pgto_base += len;
 208	pgfrom_base += len;
 209
 210	pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
 211	pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
 212
 213	pgto_base &= ~PAGE_CACHE_MASK;
 214	pgfrom_base &= ~PAGE_CACHE_MASK;
 215
 216	do {
 217		/* Are any pointers crossing a page boundary? */
 218		if (pgto_base == 0) {
 219			pgto_base = PAGE_CACHE_SIZE;
 220			pgto--;
 221		}
 222		if (pgfrom_base == 0) {
 223			pgfrom_base = PAGE_CACHE_SIZE;
 224			pgfrom--;
 225		}
 226
 227		copy = len;
 228		if (copy > pgto_base)
 229			copy = pgto_base;
 230		if (copy > pgfrom_base)
 231			copy = pgfrom_base;
 232		pgto_base -= copy;
 233		pgfrom_base -= copy;
 234
 235		vto = kmap_atomic(*pgto);
 236		vfrom = kmap_atomic(*pgfrom);
 237		memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
 238		flush_dcache_page(*pgto);
 239		kunmap_atomic(vfrom);
 240		kunmap_atomic(vto);
 241
 242	} while ((len -= copy) != 0);
 243}
 244
 245/*
 246 * _copy_to_pages
 247 * @pages: array of pages
 248 * @pgbase: page vector address of destination
 249 * @p: pointer to source data
 250 * @len: length
 251 *
 252 * Copies data from an arbitrary memory location into an array of pages
 253 * The copy is assumed to be non-overlapping.
 254 */
 255static void
 256_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
 257{
 258	struct page **pgto;
 259	char *vto;
 260	size_t copy;
 261
 262	pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
 263	pgbase &= ~PAGE_CACHE_MASK;
 264
 265	for (;;) {
 266		copy = PAGE_CACHE_SIZE - pgbase;
 267		if (copy > len)
 268			copy = len;
 269
 270		vto = kmap_atomic(*pgto);
 271		memcpy(vto + pgbase, p, copy);
 272		kunmap_atomic(vto);
 273
 274		len -= copy;
 275		if (len == 0)
 276			break;
 277
 278		pgbase += copy;
 279		if (pgbase == PAGE_CACHE_SIZE) {
 280			flush_dcache_page(*pgto);
 281			pgbase = 0;
 282			pgto++;
 283		}
 284		p += copy;
 285	}
 286	flush_dcache_page(*pgto);
 287}
 288
 289/*
 290 * _copy_from_pages
 291 * @p: pointer to destination
 292 * @pages: array of pages
 293 * @pgbase: offset of source data
 294 * @len: length
 295 *
 296 * Copies data into an arbitrary memory location from an array of pages
 297 * The copy is assumed to be non-overlapping.
 298 */
 299void
 300_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
 301{
 302	struct page **pgfrom;
 303	char *vfrom;
 304	size_t copy;
 305
 306	pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
 307	pgbase &= ~PAGE_CACHE_MASK;
 308
 309	do {
 310		copy = PAGE_CACHE_SIZE - pgbase;
 311		if (copy > len)
 312			copy = len;
 313
 314		vfrom = kmap_atomic(*pgfrom);
 315		memcpy(p, vfrom + pgbase, copy);
 316		kunmap_atomic(vfrom);
 317
 318		pgbase += copy;
 319		if (pgbase == PAGE_CACHE_SIZE) {
 320			pgbase = 0;
 321			pgfrom++;
 322		}
 323		p += copy;
 324
 325	} while ((len -= copy) != 0);
 326}
 327EXPORT_SYMBOL_GPL(_copy_from_pages);
 328
 329/*
 330 * xdr_shrink_bufhead
 331 * @buf: xdr_buf
 332 * @len: bytes to remove from buf->head[0]
 333 *
 334 * Shrinks XDR buffer's header kvec buf->head[0] by
 335 * 'len' bytes. The extra data is not lost, but is instead
 336 * moved into the inlined pages and/or the tail.
 337 */
 338static void
 339xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
 340{
 341	struct kvec *head, *tail;
 342	size_t copy, offs;
 343	unsigned int pglen = buf->page_len;
 344
 345	tail = buf->tail;
 346	head = buf->head;
 347	BUG_ON (len > head->iov_len);
 348
 349	/* Shift the tail first */
 350	if (tail->iov_len != 0) {
 351		if (tail->iov_len > len) {
 352			copy = tail->iov_len - len;
 353			memmove((char *)tail->iov_base + len,
 354					tail->iov_base, copy);
 355		}
 356		/* Copy from the inlined pages into the tail */
 357		copy = len;
 358		if (copy > pglen)
 359			copy = pglen;
 360		offs = len - copy;
 361		if (offs >= tail->iov_len)
 362			copy = 0;
 363		else if (copy > tail->iov_len - offs)
 364			copy = tail->iov_len - offs;
 365		if (copy != 0)
 366			_copy_from_pages((char *)tail->iov_base + offs,
 367					buf->pages,
 368					buf->page_base + pglen + offs - len,
 369					copy);
 370		/* Do we also need to copy data from the head into the tail ? */
 371		if (len > pglen) {
 372			offs = copy = len - pglen;
 373			if (copy > tail->iov_len)
 374				copy = tail->iov_len;
 375			memcpy(tail->iov_base,
 376					(char *)head->iov_base +
 377					head->iov_len - offs,
 378					copy);
 379		}
 380	}
 381	/* Now handle pages */
 382	if (pglen != 0) {
 383		if (pglen > len)
 384			_shift_data_right_pages(buf->pages,
 385					buf->page_base + len,
 386					buf->page_base,
 387					pglen - len);
 388		copy = len;
 389		if (len > pglen)
 390			copy = pglen;
 391		_copy_to_pages(buf->pages, buf->page_base,
 392				(char *)head->iov_base + head->iov_len - len,
 393				copy);
 394	}
 395	head->iov_len -= len;
 396	buf->buflen -= len;
 397	/* Have we truncated the message? */
 398	if (buf->len > buf->buflen)
 399		buf->len = buf->buflen;
 400}
 401
 402/*
 403 * xdr_shrink_pagelen
 404 * @buf: xdr_buf
 405 * @len: bytes to remove from buf->pages
 406 *
 407 * Shrinks XDR buffer's page array buf->pages by
 408 * 'len' bytes. The extra data is not lost, but is instead
 409 * moved into the tail.
 410 */
 411static void
 412xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
 413{
 414	struct kvec *tail;
 415	size_t copy;
 416	unsigned int pglen = buf->page_len;
 417	unsigned int tailbuf_len;
 418
 419	tail = buf->tail;
 420	BUG_ON (len > pglen);
 421
 422	tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
 423
 424	/* Shift the tail first */
 425	if (tailbuf_len != 0) {
 426		unsigned int free_space = tailbuf_len - tail->iov_len;
 427
 428		if (len < free_space)
 429			free_space = len;
 430		tail->iov_len += free_space;
 431
 432		copy = len;
 433		if (tail->iov_len > len) {
 434			char *p = (char *)tail->iov_base + len;
 435			memmove(p, tail->iov_base, tail->iov_len - len);
 436		} else
 437			copy = tail->iov_len;
 438		/* Copy from the inlined pages into the tail */
 439		_copy_from_pages((char *)tail->iov_base,
 440				buf->pages, buf->page_base + pglen - len,
 441				copy);
 442	}
 443	buf->page_len -= len;
 444	buf->buflen -= len;
 445	/* Have we truncated the message? */
 446	if (buf->len > buf->buflen)
 447		buf->len = buf->buflen;
 448}
 449
 450void
 451xdr_shift_buf(struct xdr_buf *buf, size_t len)
 452{
 453	xdr_shrink_bufhead(buf, len);
 454}
 455EXPORT_SYMBOL_GPL(xdr_shift_buf);
 456
 457/**
 458 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
 459 * @xdr: pointer to xdr_stream struct
 460 * @buf: pointer to XDR buffer in which to encode data
 461 * @p: current pointer inside XDR buffer
 462 *
 463 * Note: at the moment the RPC client only passes the length of our
 464 *	 scratch buffer in the xdr_buf's header kvec. Previously this
 465 *	 meant we needed to call xdr_adjust_iovec() after encoding the
 466 *	 data. With the new scheme, the xdr_stream manages the details
 467 *	 of the buffer length, and takes care of adjusting the kvec
 468 *	 length for us.
 469 */
 470void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 471{
 472	struct kvec *iov = buf->head;
 473	int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
 474
 475	BUG_ON(scratch_len < 0);
 476	xdr->buf = buf;
 477	xdr->iov = iov;
 478	xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
 479	xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
 480	BUG_ON(iov->iov_len > scratch_len);
 481
 482	if (p != xdr->p && p != NULL) {
 483		size_t len;
 484
 485		BUG_ON(p < xdr->p || p > xdr->end);
 486		len = (char *)p - (char *)xdr->p;
 487		xdr->p = p;
 488		buf->len += len;
 489		iov->iov_len += len;
 490	}
 491}
 492EXPORT_SYMBOL_GPL(xdr_init_encode);
 493
 494/**
 495 * xdr_reserve_space - Reserve buffer space for sending
 496 * @xdr: pointer to xdr_stream
 497 * @nbytes: number of bytes to reserve
 498 *
 499 * Checks that we have enough buffer space to encode 'nbytes' more
 500 * bytes of data. If so, update the total xdr_buf length, and
 501 * adjust the length of the current kvec.
 502 */
 503__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
 504{
 505	__be32 *p = xdr->p;
 506	__be32 *q;
 507
 508	/* align nbytes on the next 32-bit boundary */
 509	nbytes += 3;
 510	nbytes &= ~3;
 511	q = p + (nbytes >> 2);
 512	if (unlikely(q > xdr->end || q < p))
 513		return NULL;
 514	xdr->p = q;
 515	xdr->iov->iov_len += nbytes;
 516	xdr->buf->len += nbytes;
 517	return p;
 518}
 519EXPORT_SYMBOL_GPL(xdr_reserve_space);
 520
 521/**
 522 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
 523 * @xdr: pointer to xdr_stream
 524 * @pages: list of pages
 525 * @base: offset of first byte
 526 * @len: length of data in bytes
 527 *
 528 */
 529void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
 530		 unsigned int len)
 531{
 532	struct xdr_buf *buf = xdr->buf;
 533	struct kvec *iov = buf->tail;
 534	buf->pages = pages;
 535	buf->page_base = base;
 536	buf->page_len = len;
 537
 538	iov->iov_base = (char *)xdr->p;
 539	iov->iov_len  = 0;
 540	xdr->iov = iov;
 541
 542	if (len & 3) {
 543		unsigned int pad = 4 - (len & 3);
 544
 545		BUG_ON(xdr->p >= xdr->end);
 546		iov->iov_base = (char *)xdr->p + (len & 3);
 547		iov->iov_len  += pad;
 548		len += pad;
 549		*xdr->p++ = 0;
 550	}
 551	buf->buflen += len;
 552	buf->len += len;
 553}
 554EXPORT_SYMBOL_GPL(xdr_write_pages);
 555
 556static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
 557		__be32 *p, unsigned int len)
 558{
 559	if (len > iov->iov_len)
 560		len = iov->iov_len;
 561	if (p == NULL)
 562		p = (__be32*)iov->iov_base;
 563	xdr->p = p;
 564	xdr->end = (__be32*)(iov->iov_base + len);
 565	xdr->iov = iov;
 566	xdr->page_ptr = NULL;
 567}
 568
 569static int xdr_set_page_base(struct xdr_stream *xdr,
 570		unsigned int base, unsigned int len)
 571{
 572	unsigned int pgnr;
 573	unsigned int maxlen;
 574	unsigned int pgoff;
 575	unsigned int pgend;
 576	void *kaddr;
 577
 578	maxlen = xdr->buf->page_len;
 579	if (base >= maxlen)
 580		return -EINVAL;
 581	maxlen -= base;
 582	if (len > maxlen)
 583		len = maxlen;
 584
 585	base += xdr->buf->page_base;
 586
 587	pgnr = base >> PAGE_SHIFT;
 588	xdr->page_ptr = &xdr->buf->pages[pgnr];
 589	kaddr = page_address(*xdr->page_ptr);
 590
 591	pgoff = base & ~PAGE_MASK;
 592	xdr->p = (__be32*)(kaddr + pgoff);
 593
 594	pgend = pgoff + len;
 595	if (pgend > PAGE_SIZE)
 596		pgend = PAGE_SIZE;
 597	xdr->end = (__be32*)(kaddr + pgend);
 598	xdr->iov = NULL;
 599	return 0;
 600}
 601
 602static void xdr_set_next_page(struct xdr_stream *xdr)
 603{
 604	unsigned int newbase;
 605
 606	newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
 607	newbase -= xdr->buf->page_base;
 608
 609	if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
 610		xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
 611}
 612
 613static bool xdr_set_next_buffer(struct xdr_stream *xdr)
 614{
 615	if (xdr->page_ptr != NULL)
 616		xdr_set_next_page(xdr);
 617	else if (xdr->iov == xdr->buf->head) {
 618		if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
 619			xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
 620	}
 621	return xdr->p != xdr->end;
 622}
 623
 624/**
 625 * xdr_init_decode - Initialize an xdr_stream for decoding data.
 626 * @xdr: pointer to xdr_stream struct
 627 * @buf: pointer to XDR buffer from which to decode data
 628 * @p: current pointer inside XDR buffer
 629 */
 630void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 631{
 632	xdr->buf = buf;
 633	xdr->scratch.iov_base = NULL;
 634	xdr->scratch.iov_len = 0;
 635	if (buf->head[0].iov_len != 0)
 636		xdr_set_iov(xdr, buf->head, p, buf->len);
 637	else if (buf->page_len != 0)
 638		xdr_set_page_base(xdr, 0, buf->len);
 639}
 640EXPORT_SYMBOL_GPL(xdr_init_decode);
 641
 642/**
 643 * xdr_init_decode - Initialize an xdr_stream for decoding data.
 644 * @xdr: pointer to xdr_stream struct
 645 * @buf: pointer to XDR buffer from which to decode data
 646 * @pages: list of pages to decode into
 647 * @len: length in bytes of buffer in pages
 648 */
 649void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
 650			   struct page **pages, unsigned int len)
 651{
 652	memset(buf, 0, sizeof(*buf));
 653	buf->pages =  pages;
 654	buf->page_len =  len;
 655	buf->buflen =  len;
 656	buf->len = len;
 657	xdr_init_decode(xdr, buf, NULL);
 658}
 659EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
 660
 661static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 662{
 663	__be32 *p = xdr->p;
 664	__be32 *q = p + XDR_QUADLEN(nbytes);
 665
 666	if (unlikely(q > xdr->end || q < p))
 667		return NULL;
 668	xdr->p = q;
 669	return p;
 670}
 671
 672/**
 673 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
 674 * @xdr: pointer to xdr_stream struct
 675 * @buf: pointer to an empty buffer
 676 * @buflen: size of 'buf'
 677 *
 678 * The scratch buffer is used when decoding from an array of pages.
 679 * If an xdr_inline_decode() call spans across page boundaries, then
 680 * we copy the data into the scratch buffer in order to allow linear
 681 * access.
 682 */
 683void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
 684{
 685	xdr->scratch.iov_base = buf;
 686	xdr->scratch.iov_len = buflen;
 687}
 688EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
 689
 690static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
 691{
 692	__be32 *p;
 693	void *cpdest = xdr->scratch.iov_base;
 694	size_t cplen = (char *)xdr->end - (char *)xdr->p;
 695
 696	if (nbytes > xdr->scratch.iov_len)
 697		return NULL;
 698	memcpy(cpdest, xdr->p, cplen);
 699	cpdest += cplen;
 700	nbytes -= cplen;
 701	if (!xdr_set_next_buffer(xdr))
 702		return NULL;
 703	p = __xdr_inline_decode(xdr, nbytes);
 704	if (p == NULL)
 705		return NULL;
 706	memcpy(cpdest, p, nbytes);
 707	return xdr->scratch.iov_base;
 708}
 709
 710/**
 711 * xdr_inline_decode - Retrieve XDR data to decode
 712 * @xdr: pointer to xdr_stream struct
 713 * @nbytes: number of bytes of data to decode
 714 *
 715 * Check if the input buffer is long enough to enable us to decode
 716 * 'nbytes' more bytes of data starting at the current position.
 717 * If so return the current pointer, then update the current
 718 * pointer position.
 719 */
 720__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 721{
 722	__be32 *p;
 723
 724	if (nbytes == 0)
 725		return xdr->p;
 726	if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
 727		return NULL;
 728	p = __xdr_inline_decode(xdr, nbytes);
 729	if (p != NULL)
 730		return p;
 731	return xdr_copy_to_scratch(xdr, nbytes);
 732}
 733EXPORT_SYMBOL_GPL(xdr_inline_decode);
 734
 735/**
 736 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
 737 * @xdr: pointer to xdr_stream struct
 738 * @len: number of bytes of page data
 739 *
 740 * Moves data beyond the current pointer position from the XDR head[] buffer
 741 * into the page list. Any data that lies beyond current position + "len"
 742 * bytes is moved into the XDR tail[].
 743 */
 744void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
 745{
 746	struct xdr_buf *buf = xdr->buf;
 747	struct kvec *iov;
 748	ssize_t shift;
 749	unsigned int end;
 750	int padding;
 751
 752	/* Realign pages to current pointer position */
 753	iov  = buf->head;
 754	shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
 755	if (shift > 0)
 756		xdr_shrink_bufhead(buf, shift);
 757
 758	/* Truncate page data and move it into the tail */
 759	if (buf->page_len > len)
 760		xdr_shrink_pagelen(buf, buf->page_len - len);
 761	padding = (XDR_QUADLEN(len) << 2) - len;
 762	xdr->iov = iov = buf->tail;
 763	/* Compute remaining message length.  */
 764	end = iov->iov_len;
 765	shift = buf->buflen - buf->len;
 766	if (shift < end)
 767		end -= shift;
 768	else if (shift > 0)
 769		end = 0;
 770	/*
 771	 * Position current pointer at beginning of tail, and
 772	 * set remaining message length.
 773	 */
 774	xdr->p = (__be32 *)((char *)iov->iov_base + padding);
 775	xdr->end = (__be32 *)((char *)iov->iov_base + end);
 776}
 777EXPORT_SYMBOL_GPL(xdr_read_pages);
 778
 779/**
 780 * xdr_enter_page - decode data from the XDR page
 781 * @xdr: pointer to xdr_stream struct
 782 * @len: number of bytes of page data
 783 *
 784 * Moves data beyond the current pointer position from the XDR head[] buffer
 785 * into the page list. Any data that lies beyond current position + "len"
 786 * bytes is moved into the XDR tail[]. The current pointer is then
 787 * repositioned at the beginning of the first XDR page.
 788 */
 789void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
 790{
 791	xdr_read_pages(xdr, len);
 792	/*
 793	 * Position current pointer at beginning of tail, and
 794	 * set remaining message length.
 795	 */
 796	xdr_set_page_base(xdr, 0, len);
 797}
 798EXPORT_SYMBOL_GPL(xdr_enter_page);
 799
 800static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
 801
 802void
 803xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
 804{
 805	buf->head[0] = *iov;
 806	buf->tail[0] = empty_iov;
 807	buf->page_len = 0;
 808	buf->buflen = buf->len = iov->iov_len;
 809}
 810EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
 811
 812/* Sets subbuf to the portion of buf of length len beginning base bytes
 813 * from the start of buf. Returns -1 if base of length are out of bounds. */
 814int
 815xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
 816			unsigned int base, unsigned int len)
 817{
 818	subbuf->buflen = subbuf->len = len;
 819	if (base < buf->head[0].iov_len) {
 820		subbuf->head[0].iov_base = buf->head[0].iov_base + base;
 821		subbuf->head[0].iov_len = min_t(unsigned int, len,
 822						buf->head[0].iov_len - base);
 823		len -= subbuf->head[0].iov_len;
 824		base = 0;
 825	} else {
 826		subbuf->head[0].iov_base = NULL;
 827		subbuf->head[0].iov_len = 0;
 828		base -= buf->head[0].iov_len;
 829	}
 830
 831	if (base < buf->page_len) {
 832		subbuf->page_len = min(buf->page_len - base, len);
 833		base += buf->page_base;
 834		subbuf->page_base = base & ~PAGE_CACHE_MASK;
 835		subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
 836		len -= subbuf->page_len;
 837		base = 0;
 838	} else {
 839		base -= buf->page_len;
 840		subbuf->page_len = 0;
 841	}
 842
 843	if (base < buf->tail[0].iov_len) {
 844		subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
 845		subbuf->tail[0].iov_len = min_t(unsigned int, len,
 846						buf->tail[0].iov_len - base);
 847		len -= subbuf->tail[0].iov_len;
 848		base = 0;
 849	} else {
 850		subbuf->tail[0].iov_base = NULL;
 851		subbuf->tail[0].iov_len = 0;
 852		base -= buf->tail[0].iov_len;
 853	}
 854
 855	if (base || len)
 856		return -1;
 857	return 0;
 858}
 859EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
 860
 861static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 862{
 863	unsigned int this_len;
 864
 865	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
 866	memcpy(obj, subbuf->head[0].iov_base, this_len);
 867	len -= this_len;
 868	obj += this_len;
 869	this_len = min_t(unsigned int, len, subbuf->page_len);
 870	if (this_len)
 871		_copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
 872	len -= this_len;
 873	obj += this_len;
 874	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
 875	memcpy(obj, subbuf->tail[0].iov_base, this_len);
 876}
 877
 878/* obj is assumed to point to allocated memory of size at least len: */
 879int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 880{
 881	struct xdr_buf subbuf;
 882	int status;
 883
 884	status = xdr_buf_subsegment(buf, &subbuf, base, len);
 885	if (status != 0)
 886		return status;
 887	__read_bytes_from_xdr_buf(&subbuf, obj, len);
 888	return 0;
 889}
 890EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
 891
 892static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 893{
 894	unsigned int this_len;
 895
 896	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
 897	memcpy(subbuf->head[0].iov_base, obj, this_len);
 898	len -= this_len;
 899	obj += this_len;
 900	this_len = min_t(unsigned int, len, subbuf->page_len);
 901	if (this_len)
 902		_copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
 903	len -= this_len;
 904	obj += this_len;
 905	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
 906	memcpy(subbuf->tail[0].iov_base, obj, this_len);
 907}
 908
 909/* obj is assumed to point to allocated memory of size at least len: */
 910int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 911{
 912	struct xdr_buf subbuf;
 913	int status;
 914
 915	status = xdr_buf_subsegment(buf, &subbuf, base, len);
 916	if (status != 0)
 917		return status;
 918	__write_bytes_to_xdr_buf(&subbuf, obj, len);
 919	return 0;
 920}
 921EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
 922
 923int
 924xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
 925{
 926	__be32	raw;
 927	int	status;
 928
 929	status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
 930	if (status)
 931		return status;
 932	*obj = be32_to_cpu(raw);
 933	return 0;
 934}
 935EXPORT_SYMBOL_GPL(xdr_decode_word);
 936
 937int
 938xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
 939{
 940	__be32	raw = cpu_to_be32(obj);
 941
 942	return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
 943}
 944EXPORT_SYMBOL_GPL(xdr_encode_word);
 945
 946/* If the netobj starting offset bytes from the start of xdr_buf is contained
 947 * entirely in the head or the tail, set object to point to it; otherwise
 948 * try to find space for it at the end of the tail, copy it there, and
 949 * set obj to point to it. */
 950int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
 951{
 952	struct xdr_buf subbuf;
 953
 954	if (xdr_decode_word(buf, offset, &obj->len))
 955		return -EFAULT;
 956	if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
 957		return -EFAULT;
 958
 959	/* Is the obj contained entirely in the head? */
 960	obj->data = subbuf.head[0].iov_base;
 961	if (subbuf.head[0].iov_len == obj->len)
 962		return 0;
 963	/* ..or is the obj contained entirely in the tail? */
 964	obj->data = subbuf.tail[0].iov_base;
 965	if (subbuf.tail[0].iov_len == obj->len)
 966		return 0;
 967
 968	/* use end of tail as storage for obj:
 969	 * (We don't copy to the beginning because then we'd have
 970	 * to worry about doing a potentially overlapping copy.
 971	 * This assumes the object is at most half the length of the
 972	 * tail.) */
 973	if (obj->len > buf->buflen - buf->len)
 974		return -ENOMEM;
 975	if (buf->tail[0].iov_len != 0)
 976		obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
 977	else
 978		obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
 979	__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
 980	return 0;
 981}
 982EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
 983
 984/* Returns 0 on success, or else a negative error code. */
 985static int
 986xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
 987		 struct xdr_array2_desc *desc, int encode)
 988{
 989	char *elem = NULL, *c;
 990	unsigned int copied = 0, todo, avail_here;
 991	struct page **ppages = NULL;
 992	int err;
 993
 994	if (encode) {
 995		if (xdr_encode_word(buf, base, desc->array_len) != 0)
 996			return -EINVAL;
 997	} else {
 998		if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
 999		    desc->array_len > desc->array_maxlen ||
1000		    (unsigned long) base + 4 + desc->array_len *
1001				    desc->elem_size > buf->len)
1002			return -EINVAL;
1003	}
1004	base += 4;
1005
1006	if (!desc->xcode)
1007		return 0;
1008
1009	todo = desc->array_len * desc->elem_size;
1010
1011	/* process head */
1012	if (todo && base < buf->head->iov_len) {
1013		c = buf->head->iov_base + base;
1014		avail_here = min_t(unsigned int, todo,
1015				   buf->head->iov_len - base);
1016		todo -= avail_here;
1017
1018		while (avail_here >= desc->elem_size) {
1019			err = desc->xcode(desc, c);
1020			if (err)
1021				goto out;
1022			c += desc->elem_size;
1023			avail_here -= desc->elem_size;
1024		}
1025		if (avail_here) {
1026			if (!elem) {
1027				elem = kmalloc(desc->elem_size, GFP_KERNEL);
1028				err = -ENOMEM;
1029				if (!elem)
1030					goto out;
1031			}
1032			if (encode) {
1033				err = desc->xcode(desc, elem);
1034				if (err)
1035					goto out;
1036				memcpy(c, elem, avail_here);
1037			} else
1038				memcpy(elem, c, avail_here);
1039			copied = avail_here;
1040		}
1041		base = buf->head->iov_len;  /* align to start of pages */
1042	}
1043
1044	/* process pages array */
1045	base -= buf->head->iov_len;
1046	if (todo && base < buf->page_len) {
1047		unsigned int avail_page;
1048
1049		avail_here = min(todo, buf->page_len - base);
1050		todo -= avail_here;
1051
1052		base += buf->page_base;
1053		ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1054		base &= ~PAGE_CACHE_MASK;
1055		avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1056					avail_here);
1057		c = kmap(*ppages) + base;
1058
1059		while (avail_here) {
1060			avail_here -= avail_page;
1061			if (copied || avail_page < desc->elem_size) {
1062				unsigned int l = min(avail_page,
1063					desc->elem_size - copied);
1064				if (!elem) {
1065					elem = kmalloc(desc->elem_size,
1066						       GFP_KERNEL);
1067					err = -ENOMEM;
1068					if (!elem)
1069						goto out;
1070				}
1071				if (encode) {
1072					if (!copied) {
1073						err = desc->xcode(desc, elem);
1074						if (err)
1075							goto out;
1076					}
1077					memcpy(c, elem + copied, l);
1078					copied += l;
1079					if (copied == desc->elem_size)
1080						copied = 0;
1081				} else {
1082					memcpy(elem + copied, c, l);
1083					copied += l;
1084					if (copied == desc->elem_size) {
1085						err = desc->xcode(desc, elem);
1086						if (err)
1087							goto out;
1088						copied = 0;
1089					}
1090				}
1091				avail_page -= l;
1092				c += l;
1093			}
1094			while (avail_page >= desc->elem_size) {
1095				err = desc->xcode(desc, c);
1096				if (err)
1097					goto out;
1098				c += desc->elem_size;
1099				avail_page -= desc->elem_size;
1100			}
1101			if (avail_page) {
1102				unsigned int l = min(avail_page,
1103					    desc->elem_size - copied);
1104				if (!elem) {
1105					elem = kmalloc(desc->elem_size,
1106						       GFP_KERNEL);
1107					err = -ENOMEM;
1108					if (!elem)
1109						goto out;
1110				}
1111				if (encode) {
1112					if (!copied) {
1113						err = desc->xcode(desc, elem);
1114						if (err)
1115							goto out;
1116					}
1117					memcpy(c, elem + copied, l);
1118					copied += l;
1119					if (copied == desc->elem_size)
1120						copied = 0;
1121				} else {
1122					memcpy(elem + copied, c, l);
1123					copied += l;
1124					if (copied == desc->elem_size) {
1125						err = desc->xcode(desc, elem);
1126						if (err)
1127							goto out;
1128						copied = 0;
1129					}
1130				}
1131			}
1132			if (avail_here) {
1133				kunmap(*ppages);
1134				ppages++;
1135				c = kmap(*ppages);
1136			}
1137
1138			avail_page = min(avail_here,
1139				 (unsigned int) PAGE_CACHE_SIZE);
1140		}
1141		base = buf->page_len;  /* align to start of tail */
1142	}
1143
1144	/* process tail */
1145	base -= buf->page_len;
1146	if (todo) {
1147		c = buf->tail->iov_base + base;
1148		if (copied) {
1149			unsigned int l = desc->elem_size - copied;
1150
1151			if (encode)
1152				memcpy(c, elem + copied, l);
1153			else {
1154				memcpy(elem + copied, c, l);
1155				err = desc->xcode(desc, elem);
1156				if (err)
1157					goto out;
1158			}
1159			todo -= l;
1160			c += l;
1161		}
1162		while (todo) {
1163			err = desc->xcode(desc, c);
1164			if (err)
1165				goto out;
1166			c += desc->elem_size;
1167			todo -= desc->elem_size;
1168		}
1169	}
1170	err = 0;
1171
1172out:
1173	kfree(elem);
1174	if (ppages)
1175		kunmap(*ppages);
1176	return err;
1177}
1178
1179int
1180xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1181		  struct xdr_array2_desc *desc)
1182{
1183	if (base >= buf->len)
1184		return -EINVAL;
1185
1186	return xdr_xcode_array2(buf, base, desc, 0);
1187}
1188EXPORT_SYMBOL_GPL(xdr_decode_array2);
1189
1190int
1191xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1192		  struct xdr_array2_desc *desc)
1193{
1194	if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1195	    buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1196		return -EINVAL;
1197
1198	return xdr_xcode_array2(buf, base, desc, 1);
1199}
1200EXPORT_SYMBOL_GPL(xdr_encode_array2);
1201
1202int
1203xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1204		int (*actor)(struct scatterlist *, void *), void *data)
1205{
1206	int i, ret = 0;
1207	unsigned int page_len, thislen, page_offset;
1208	struct scatterlist      sg[1];
1209
1210	sg_init_table(sg, 1);
1211
1212	if (offset >= buf->head[0].iov_len) {
1213		offset -= buf->head[0].iov_len;
1214	} else {
1215		thislen = buf->head[0].iov_len - offset;
1216		if (thislen > len)
1217			thislen = len;
1218		sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1219		ret = actor(sg, data);
1220		if (ret)
1221			goto out;
1222		offset = 0;
1223		len -= thislen;
1224	}
1225	if (len == 0)
1226		goto out;
1227
1228	if (offset >= buf->page_len) {
1229		offset -= buf->page_len;
1230	} else {
1231		page_len = buf->page_len - offset;
1232		if (page_len > len)
1233			page_len = len;
1234		len -= page_len;
1235		page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1236		i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1237		thislen = PAGE_CACHE_SIZE - page_offset;
1238		do {
1239			if (thislen > page_len)
1240				thislen = page_len;
1241			sg_set_page(sg, buf->pages[i], thislen, page_offset);
1242			ret = actor(sg, data);
1243			if (ret)
1244				goto out;
1245			page_len -= thislen;
1246			i++;
1247			page_offset = 0;
1248			thislen = PAGE_CACHE_SIZE;
1249		} while (page_len != 0);
1250		offset = 0;
1251	}
1252	if (len == 0)
1253		goto out;
1254	if (offset < buf->tail[0].iov_len) {
1255		thislen = buf->tail[0].iov_len - offset;
1256		if (thislen > len)
1257			thislen = len;
1258		sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1259		ret = actor(sg, data);
1260		len -= thislen;
1261	}
1262	if (len != 0)
1263		ret = -EINVAL;
1264out:
1265	return ret;
1266}
1267EXPORT_SYMBOL_GPL(xdr_process_buf);
1268