Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/net/sunrpc/xdr.c
4 *
5 * Generic XDR support.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/types.h>
13#include <linux/string.h>
14#include <linux/kernel.h>
15#include <linux/pagemap.h>
16#include <linux/errno.h>
17#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/msg_prot.h>
19#include <linux/bvec.h>
20#include <trace/events/sunrpc.h>
21
22/*
23 * XDR functions for basic NFS types
24 */
25__be32 *
26xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
27{
28 unsigned int quadlen = XDR_QUADLEN(obj->len);
29
30 p[quadlen] = 0; /* zero trailing bytes */
31 *p++ = cpu_to_be32(obj->len);
32 memcpy(p, obj->data, obj->len);
33 return p + XDR_QUADLEN(obj->len);
34}
35EXPORT_SYMBOL_GPL(xdr_encode_netobj);
36
37__be32 *
38xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
39{
40 unsigned int len;
41
42 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
43 return NULL;
44 obj->len = len;
45 obj->data = (u8 *) p;
46 return p + XDR_QUADLEN(len);
47}
48EXPORT_SYMBOL_GPL(xdr_decode_netobj);
49
50/**
51 * xdr_encode_opaque_fixed - Encode fixed length opaque data
52 * @p: pointer to current position in XDR buffer.
53 * @ptr: pointer to data to encode (or NULL)
54 * @nbytes: size of data.
55 *
56 * Copy the array of data of length nbytes at ptr to the XDR buffer
57 * at position p, then align to the next 32-bit boundary by padding
58 * with zero bytes (see RFC1832).
59 * Note: if ptr is NULL, only the padding is performed.
60 *
61 * Returns the updated current XDR buffer position
62 *
63 */
64__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
65{
66 if (likely(nbytes != 0)) {
67 unsigned int quadlen = XDR_QUADLEN(nbytes);
68 unsigned int padding = (quadlen << 2) - nbytes;
69
70 if (ptr != NULL)
71 memcpy(p, ptr, nbytes);
72 if (padding != 0)
73 memset((char *)p + nbytes, 0, padding);
74 p += quadlen;
75 }
76 return p;
77}
78EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
79
80/**
81 * xdr_encode_opaque - Encode variable length opaque data
82 * @p: pointer to current position in XDR buffer.
83 * @ptr: pointer to data to encode (or NULL)
84 * @nbytes: size of data.
85 *
86 * Returns the updated current XDR buffer position
87 */
88__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
89{
90 *p++ = cpu_to_be32(nbytes);
91 return xdr_encode_opaque_fixed(p, ptr, nbytes);
92}
93EXPORT_SYMBOL_GPL(xdr_encode_opaque);
94
95__be32 *
96xdr_encode_string(__be32 *p, const char *string)
97{
98 return xdr_encode_array(p, string, strlen(string));
99}
100EXPORT_SYMBOL_GPL(xdr_encode_string);
101
102__be32 *
103xdr_decode_string_inplace(__be32 *p, char **sp,
104 unsigned int *lenp, unsigned int maxlen)
105{
106 u32 len;
107
108 len = be32_to_cpu(*p++);
109 if (len > maxlen)
110 return NULL;
111 *lenp = len;
112 *sp = (char *) p;
113 return p + XDR_QUADLEN(len);
114}
115EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
116
117/**
118 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
119 * @buf: XDR buffer where string resides
120 * @len: length of string, in bytes
121 *
122 */
123void
124xdr_terminate_string(struct xdr_buf *buf, const u32 len)
125{
126 char *kaddr;
127
128 kaddr = kmap_atomic(buf->pages[0]);
129 kaddr[buf->page_base + len] = '\0';
130 kunmap_atomic(kaddr);
131}
132EXPORT_SYMBOL_GPL(xdr_terminate_string);
133
134size_t
135xdr_buf_pagecount(struct xdr_buf *buf)
136{
137 if (!buf->page_len)
138 return 0;
139 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
140}
141
142int
143xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
144{
145 size_t i, n = xdr_buf_pagecount(buf);
146
147 if (n != 0 && buf->bvec == NULL) {
148 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
149 if (!buf->bvec)
150 return -ENOMEM;
151 for (i = 0; i < n; i++) {
152 buf->bvec[i].bv_page = buf->pages[i];
153 buf->bvec[i].bv_len = PAGE_SIZE;
154 buf->bvec[i].bv_offset = 0;
155 }
156 }
157 return 0;
158}
159
160void
161xdr_free_bvec(struct xdr_buf *buf)
162{
163 kfree(buf->bvec);
164 buf->bvec = NULL;
165}
166
167/**
168 * xdr_inline_pages - Prepare receive buffer for a large reply
169 * @xdr: xdr_buf into which reply will be placed
170 * @offset: expected offset where data payload will start, in bytes
171 * @pages: vector of struct page pointers
172 * @base: offset in first page where receive should start, in bytes
173 * @len: expected size of the upper layer data payload, in bytes
174 *
175 */
176void
177xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
178 struct page **pages, unsigned int base, unsigned int len)
179{
180 struct kvec *head = xdr->head;
181 struct kvec *tail = xdr->tail;
182 char *buf = (char *)head->iov_base;
183 unsigned int buflen = head->iov_len;
184
185 head->iov_len = offset;
186
187 xdr->pages = pages;
188 xdr->page_base = base;
189 xdr->page_len = len;
190
191 tail->iov_base = buf + offset;
192 tail->iov_len = buflen - offset;
193 if ((xdr->page_len & 3) == 0)
194 tail->iov_len -= sizeof(__be32);
195
196 xdr->buflen += len;
197}
198EXPORT_SYMBOL_GPL(xdr_inline_pages);
199
200/*
201 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
202 */
203
204/**
205 * _shift_data_right_pages
206 * @pages: vector of pages containing both the source and dest memory area.
207 * @pgto_base: page vector address of destination
208 * @pgfrom_base: page vector address of source
209 * @len: number of bytes to copy
210 *
211 * Note: the addresses pgto_base and pgfrom_base are both calculated in
212 * the same way:
213 * if a memory area starts at byte 'base' in page 'pages[i]',
214 * then its address is given as (i << PAGE_SHIFT) + base
215 * Also note: pgfrom_base must be < pgto_base, but the memory areas
216 * they point to may overlap.
217 */
218static void
219_shift_data_right_pages(struct page **pages, size_t pgto_base,
220 size_t pgfrom_base, size_t len)
221{
222 struct page **pgfrom, **pgto;
223 char *vfrom, *vto;
224 size_t copy;
225
226 BUG_ON(pgto_base <= pgfrom_base);
227
228 pgto_base += len;
229 pgfrom_base += len;
230
231 pgto = pages + (pgto_base >> PAGE_SHIFT);
232 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
233
234 pgto_base &= ~PAGE_MASK;
235 pgfrom_base &= ~PAGE_MASK;
236
237 do {
238 /* Are any pointers crossing a page boundary? */
239 if (pgto_base == 0) {
240 pgto_base = PAGE_SIZE;
241 pgto--;
242 }
243 if (pgfrom_base == 0) {
244 pgfrom_base = PAGE_SIZE;
245 pgfrom--;
246 }
247
248 copy = len;
249 if (copy > pgto_base)
250 copy = pgto_base;
251 if (copy > pgfrom_base)
252 copy = pgfrom_base;
253 pgto_base -= copy;
254 pgfrom_base -= copy;
255
256 vto = kmap_atomic(*pgto);
257 if (*pgto != *pgfrom) {
258 vfrom = kmap_atomic(*pgfrom);
259 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
260 kunmap_atomic(vfrom);
261 } else
262 memmove(vto + pgto_base, vto + pgfrom_base, copy);
263 flush_dcache_page(*pgto);
264 kunmap_atomic(vto);
265
266 } while ((len -= copy) != 0);
267}
268
269/**
270 * _copy_to_pages
271 * @pages: array of pages
272 * @pgbase: page vector address of destination
273 * @p: pointer to source data
274 * @len: length
275 *
276 * Copies data from an arbitrary memory location into an array of pages
277 * The copy is assumed to be non-overlapping.
278 */
279static void
280_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
281{
282 struct page **pgto;
283 char *vto;
284 size_t copy;
285
286 pgto = pages + (pgbase >> PAGE_SHIFT);
287 pgbase &= ~PAGE_MASK;
288
289 for (;;) {
290 copy = PAGE_SIZE - pgbase;
291 if (copy > len)
292 copy = len;
293
294 vto = kmap_atomic(*pgto);
295 memcpy(vto + pgbase, p, copy);
296 kunmap_atomic(vto);
297
298 len -= copy;
299 if (len == 0)
300 break;
301
302 pgbase += copy;
303 if (pgbase == PAGE_SIZE) {
304 flush_dcache_page(*pgto);
305 pgbase = 0;
306 pgto++;
307 }
308 p += copy;
309 }
310 flush_dcache_page(*pgto);
311}
312
313/**
314 * _copy_from_pages
315 * @p: pointer to destination
316 * @pages: array of pages
317 * @pgbase: offset of source data
318 * @len: length
319 *
320 * Copies data into an arbitrary memory location from an array of pages
321 * The copy is assumed to be non-overlapping.
322 */
323void
324_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
325{
326 struct page **pgfrom;
327 char *vfrom;
328 size_t copy;
329
330 pgfrom = pages + (pgbase >> PAGE_SHIFT);
331 pgbase &= ~PAGE_MASK;
332
333 do {
334 copy = PAGE_SIZE - pgbase;
335 if (copy > len)
336 copy = len;
337
338 vfrom = kmap_atomic(*pgfrom);
339 memcpy(p, vfrom + pgbase, copy);
340 kunmap_atomic(vfrom);
341
342 pgbase += copy;
343 if (pgbase == PAGE_SIZE) {
344 pgbase = 0;
345 pgfrom++;
346 }
347 p += copy;
348
349 } while ((len -= copy) != 0);
350}
351EXPORT_SYMBOL_GPL(_copy_from_pages);
352
353/**
354 * xdr_shrink_bufhead
355 * @buf: xdr_buf
356 * @len: bytes to remove from buf->head[0]
357 *
358 * Shrinks XDR buffer's header kvec buf->head[0] by
359 * 'len' bytes. The extra data is not lost, but is instead
360 * moved into the inlined pages and/or the tail.
361 */
362static unsigned int
363xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
364{
365 struct kvec *head, *tail;
366 size_t copy, offs;
367 unsigned int pglen = buf->page_len;
368 unsigned int result;
369
370 result = 0;
371 tail = buf->tail;
372 head = buf->head;
373
374 WARN_ON_ONCE(len > head->iov_len);
375 if (len > head->iov_len)
376 len = head->iov_len;
377
378 /* Shift the tail first */
379 if (tail->iov_len != 0) {
380 if (tail->iov_len > len) {
381 copy = tail->iov_len - len;
382 memmove((char *)tail->iov_base + len,
383 tail->iov_base, copy);
384 result += copy;
385 }
386 /* Copy from the inlined pages into the tail */
387 copy = len;
388 if (copy > pglen)
389 copy = pglen;
390 offs = len - copy;
391 if (offs >= tail->iov_len)
392 copy = 0;
393 else if (copy > tail->iov_len - offs)
394 copy = tail->iov_len - offs;
395 if (copy != 0) {
396 _copy_from_pages((char *)tail->iov_base + offs,
397 buf->pages,
398 buf->page_base + pglen + offs - len,
399 copy);
400 result += copy;
401 }
402 /* Do we also need to copy data from the head into the tail ? */
403 if (len > pglen) {
404 offs = copy = len - pglen;
405 if (copy > tail->iov_len)
406 copy = tail->iov_len;
407 memcpy(tail->iov_base,
408 (char *)head->iov_base +
409 head->iov_len - offs,
410 copy);
411 result += copy;
412 }
413 }
414 /* Now handle pages */
415 if (pglen != 0) {
416 if (pglen > len)
417 _shift_data_right_pages(buf->pages,
418 buf->page_base + len,
419 buf->page_base,
420 pglen - len);
421 copy = len;
422 if (len > pglen)
423 copy = pglen;
424 _copy_to_pages(buf->pages, buf->page_base,
425 (char *)head->iov_base + head->iov_len - len,
426 copy);
427 result += copy;
428 }
429 head->iov_len -= len;
430 buf->buflen -= len;
431 /* Have we truncated the message? */
432 if (buf->len > buf->buflen)
433 buf->len = buf->buflen;
434
435 return result;
436}
437
438/**
439 * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes
440 * @buf: xdr_buf
441 * @len: bytes to remove from buf->pages
442 *
443 * The extra data is not lost, but is instead moved into buf->tail.
444 * Returns the actual number of bytes moved.
445 */
446static unsigned int
447xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
448{
449 struct kvec *tail;
450 size_t copy;
451 unsigned int pglen = buf->page_len;
452 unsigned int tailbuf_len;
453 unsigned int result;
454
455 result = 0;
456 tail = buf->tail;
457 if (len > buf->page_len)
458 len = buf-> page_len;
459 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
460
461 /* Shift the tail first */
462 if (tailbuf_len != 0) {
463 unsigned int free_space = tailbuf_len - tail->iov_len;
464
465 if (len < free_space)
466 free_space = len;
467 tail->iov_len += free_space;
468
469 copy = len;
470 if (tail->iov_len > len) {
471 char *p = (char *)tail->iov_base + len;
472 memmove(p, tail->iov_base, tail->iov_len - len);
473 result += tail->iov_len - len;
474 } else
475 copy = tail->iov_len;
476 /* Copy from the inlined pages into the tail */
477 _copy_from_pages((char *)tail->iov_base,
478 buf->pages, buf->page_base + pglen - len,
479 copy);
480 result += copy;
481 }
482 buf->page_len -= len;
483 buf->buflen -= len;
484 /* Have we truncated the message? */
485 if (buf->len > buf->buflen)
486 buf->len = buf->buflen;
487
488 return result;
489}
490
491void
492xdr_shift_buf(struct xdr_buf *buf, size_t len)
493{
494 xdr_shrink_bufhead(buf, len);
495}
496EXPORT_SYMBOL_GPL(xdr_shift_buf);
497
498/**
499 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
500 * @xdr: pointer to struct xdr_stream
501 */
502unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
503{
504 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
505}
506EXPORT_SYMBOL_GPL(xdr_stream_pos);
507
508/**
509 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
510 * @xdr: pointer to xdr_stream struct
511 * @buf: pointer to XDR buffer in which to encode data
512 * @p: current pointer inside XDR buffer
513 * @rqst: pointer to controlling rpc_rqst, for debugging
514 *
515 * Note: at the moment the RPC client only passes the length of our
516 * scratch buffer in the xdr_buf's header kvec. Previously this
517 * meant we needed to call xdr_adjust_iovec() after encoding the
518 * data. With the new scheme, the xdr_stream manages the details
519 * of the buffer length, and takes care of adjusting the kvec
520 * length for us.
521 */
522void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
523 struct rpc_rqst *rqst)
524{
525 struct kvec *iov = buf->head;
526 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
527
528 xdr_set_scratch_buffer(xdr, NULL, 0);
529 BUG_ON(scratch_len < 0);
530 xdr->buf = buf;
531 xdr->iov = iov;
532 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
533 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
534 BUG_ON(iov->iov_len > scratch_len);
535
536 if (p != xdr->p && p != NULL) {
537 size_t len;
538
539 BUG_ON(p < xdr->p || p > xdr->end);
540 len = (char *)p - (char *)xdr->p;
541 xdr->p = p;
542 buf->len += len;
543 iov->iov_len += len;
544 }
545 xdr->rqst = rqst;
546}
547EXPORT_SYMBOL_GPL(xdr_init_encode);
548
549/**
550 * xdr_commit_encode - Ensure all data is written to buffer
551 * @xdr: pointer to xdr_stream
552 *
553 * We handle encoding across page boundaries by giving the caller a
554 * temporary location to write to, then later copying the data into
555 * place; xdr_commit_encode does that copying.
556 *
557 * Normally the caller doesn't need to call this directly, as the
558 * following xdr_reserve_space will do it. But an explicit call may be
559 * required at the end of encoding, or any other time when the xdr_buf
560 * data might be read.
561 */
562inline void xdr_commit_encode(struct xdr_stream *xdr)
563{
564 int shift = xdr->scratch.iov_len;
565 void *page;
566
567 if (shift == 0)
568 return;
569 page = page_address(*xdr->page_ptr);
570 memcpy(xdr->scratch.iov_base, page, shift);
571 memmove(page, page + shift, (void *)xdr->p - page);
572 xdr->scratch.iov_len = 0;
573}
574EXPORT_SYMBOL_GPL(xdr_commit_encode);
575
576static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
577 size_t nbytes)
578{
579 __be32 *p;
580 int space_left;
581 int frag1bytes, frag2bytes;
582
583 if (nbytes > PAGE_SIZE)
584 goto out_overflow; /* Bigger buffers require special handling */
585 if (xdr->buf->len + nbytes > xdr->buf->buflen)
586 goto out_overflow; /* Sorry, we're totally out of space */
587 frag1bytes = (xdr->end - xdr->p) << 2;
588 frag2bytes = nbytes - frag1bytes;
589 if (xdr->iov)
590 xdr->iov->iov_len += frag1bytes;
591 else
592 xdr->buf->page_len += frag1bytes;
593 xdr->page_ptr++;
594 xdr->iov = NULL;
595 /*
596 * If the last encode didn't end exactly on a page boundary, the
597 * next one will straddle boundaries. Encode into the next
598 * page, then copy it back later in xdr_commit_encode. We use
599 * the "scratch" iov to track any temporarily unused fragment of
600 * space at the end of the previous buffer:
601 */
602 xdr->scratch.iov_base = xdr->p;
603 xdr->scratch.iov_len = frag1bytes;
604 p = page_address(*xdr->page_ptr);
605 /*
606 * Note this is where the next encode will start after we've
607 * shifted this one back:
608 */
609 xdr->p = (void *)p + frag2bytes;
610 space_left = xdr->buf->buflen - xdr->buf->len;
611 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
612 xdr->buf->page_len += frag2bytes;
613 xdr->buf->len += nbytes;
614 return p;
615out_overflow:
616 trace_rpc_xdr_overflow(xdr, nbytes);
617 return NULL;
618}
619
620/**
621 * xdr_reserve_space - Reserve buffer space for sending
622 * @xdr: pointer to xdr_stream
623 * @nbytes: number of bytes to reserve
624 *
625 * Checks that we have enough buffer space to encode 'nbytes' more
626 * bytes of data. If so, update the total xdr_buf length, and
627 * adjust the length of the current kvec.
628 */
629__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
630{
631 __be32 *p = xdr->p;
632 __be32 *q;
633
634 xdr_commit_encode(xdr);
635 /* align nbytes on the next 32-bit boundary */
636 nbytes += 3;
637 nbytes &= ~3;
638 q = p + (nbytes >> 2);
639 if (unlikely(q > xdr->end || q < p))
640 return xdr_get_next_encode_buffer(xdr, nbytes);
641 xdr->p = q;
642 if (xdr->iov)
643 xdr->iov->iov_len += nbytes;
644 else
645 xdr->buf->page_len += nbytes;
646 xdr->buf->len += nbytes;
647 return p;
648}
649EXPORT_SYMBOL_GPL(xdr_reserve_space);
650
651/**
652 * xdr_truncate_encode - truncate an encode buffer
653 * @xdr: pointer to xdr_stream
654 * @len: new length of buffer
655 *
656 * Truncates the xdr stream, so that xdr->buf->len == len,
657 * and xdr->p points at offset len from the start of the buffer, and
658 * head, tail, and page lengths are adjusted to correspond.
659 *
660 * If this means moving xdr->p to a different buffer, we assume that
661 * that the end pointer should be set to the end of the current page,
662 * except in the case of the head buffer when we assume the head
663 * buffer's current length represents the end of the available buffer.
664 *
665 * This is *not* safe to use on a buffer that already has inlined page
666 * cache pages (as in a zero-copy server read reply), except for the
667 * simple case of truncating from one position in the tail to another.
668 *
669 */
670void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
671{
672 struct xdr_buf *buf = xdr->buf;
673 struct kvec *head = buf->head;
674 struct kvec *tail = buf->tail;
675 int fraglen;
676 int new;
677
678 if (len > buf->len) {
679 WARN_ON_ONCE(1);
680 return;
681 }
682 xdr_commit_encode(xdr);
683
684 fraglen = min_t(int, buf->len - len, tail->iov_len);
685 tail->iov_len -= fraglen;
686 buf->len -= fraglen;
687 if (tail->iov_len) {
688 xdr->p = tail->iov_base + tail->iov_len;
689 WARN_ON_ONCE(!xdr->end);
690 WARN_ON_ONCE(!xdr->iov);
691 return;
692 }
693 WARN_ON_ONCE(fraglen);
694 fraglen = min_t(int, buf->len - len, buf->page_len);
695 buf->page_len -= fraglen;
696 buf->len -= fraglen;
697
698 new = buf->page_base + buf->page_len;
699
700 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
701
702 if (buf->page_len) {
703 xdr->p = page_address(*xdr->page_ptr);
704 xdr->end = (void *)xdr->p + PAGE_SIZE;
705 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
706 WARN_ON_ONCE(xdr->iov);
707 return;
708 }
709 if (fraglen)
710 xdr->end = head->iov_base + head->iov_len;
711 /* (otherwise assume xdr->end is already set) */
712 xdr->page_ptr--;
713 head->iov_len = len;
714 buf->len = len;
715 xdr->p = head->iov_base + head->iov_len;
716 xdr->iov = buf->head;
717}
718EXPORT_SYMBOL(xdr_truncate_encode);
719
720/**
721 * xdr_restrict_buflen - decrease available buffer space
722 * @xdr: pointer to xdr_stream
723 * @newbuflen: new maximum number of bytes available
724 *
725 * Adjust our idea of how much space is available in the buffer.
726 * If we've already used too much space in the buffer, returns -1.
727 * If the available space is already smaller than newbuflen, returns 0
728 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
729 * and ensures xdr->end is set at most offset newbuflen from the start
730 * of the buffer.
731 */
732int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
733{
734 struct xdr_buf *buf = xdr->buf;
735 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
736 int end_offset = buf->len + left_in_this_buf;
737
738 if (newbuflen < 0 || newbuflen < buf->len)
739 return -1;
740 if (newbuflen > buf->buflen)
741 return 0;
742 if (newbuflen < end_offset)
743 xdr->end = (void *)xdr->end + newbuflen - end_offset;
744 buf->buflen = newbuflen;
745 return 0;
746}
747EXPORT_SYMBOL(xdr_restrict_buflen);
748
749/**
750 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
751 * @xdr: pointer to xdr_stream
752 * @pages: list of pages
753 * @base: offset of first byte
754 * @len: length of data in bytes
755 *
756 */
757void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
758 unsigned int len)
759{
760 struct xdr_buf *buf = xdr->buf;
761 struct kvec *iov = buf->tail;
762 buf->pages = pages;
763 buf->page_base = base;
764 buf->page_len = len;
765
766 iov->iov_base = (char *)xdr->p;
767 iov->iov_len = 0;
768 xdr->iov = iov;
769
770 if (len & 3) {
771 unsigned int pad = 4 - (len & 3);
772
773 BUG_ON(xdr->p >= xdr->end);
774 iov->iov_base = (char *)xdr->p + (len & 3);
775 iov->iov_len += pad;
776 len += pad;
777 *xdr->p++ = 0;
778 }
779 buf->buflen += len;
780 buf->len += len;
781}
782EXPORT_SYMBOL_GPL(xdr_write_pages);
783
784static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
785 unsigned int len)
786{
787 if (len > iov->iov_len)
788 len = iov->iov_len;
789 xdr->p = (__be32*)iov->iov_base;
790 xdr->end = (__be32*)(iov->iov_base + len);
791 xdr->iov = iov;
792 xdr->page_ptr = NULL;
793}
794
795static int xdr_set_page_base(struct xdr_stream *xdr,
796 unsigned int base, unsigned int len)
797{
798 unsigned int pgnr;
799 unsigned int maxlen;
800 unsigned int pgoff;
801 unsigned int pgend;
802 void *kaddr;
803
804 maxlen = xdr->buf->page_len;
805 if (base >= maxlen)
806 return -EINVAL;
807 maxlen -= base;
808 if (len > maxlen)
809 len = maxlen;
810
811 base += xdr->buf->page_base;
812
813 pgnr = base >> PAGE_SHIFT;
814 xdr->page_ptr = &xdr->buf->pages[pgnr];
815 kaddr = page_address(*xdr->page_ptr);
816
817 pgoff = base & ~PAGE_MASK;
818 xdr->p = (__be32*)(kaddr + pgoff);
819
820 pgend = pgoff + len;
821 if (pgend > PAGE_SIZE)
822 pgend = PAGE_SIZE;
823 xdr->end = (__be32*)(kaddr + pgend);
824 xdr->iov = NULL;
825 return 0;
826}
827
828static void xdr_set_next_page(struct xdr_stream *xdr)
829{
830 unsigned int newbase;
831
832 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
833 newbase -= xdr->buf->page_base;
834
835 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
836 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
837}
838
839static bool xdr_set_next_buffer(struct xdr_stream *xdr)
840{
841 if (xdr->page_ptr != NULL)
842 xdr_set_next_page(xdr);
843 else if (xdr->iov == xdr->buf->head) {
844 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
845 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
846 }
847 return xdr->p != xdr->end;
848}
849
850/**
851 * xdr_init_decode - Initialize an xdr_stream for decoding data.
852 * @xdr: pointer to xdr_stream struct
853 * @buf: pointer to XDR buffer from which to decode data
854 * @p: current pointer inside XDR buffer
855 * @rqst: pointer to controlling rpc_rqst, for debugging
856 */
857void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
858 struct rpc_rqst *rqst)
859{
860 xdr->buf = buf;
861 xdr->scratch.iov_base = NULL;
862 xdr->scratch.iov_len = 0;
863 xdr->nwords = XDR_QUADLEN(buf->len);
864 if (buf->head[0].iov_len != 0)
865 xdr_set_iov(xdr, buf->head, buf->len);
866 else if (buf->page_len != 0)
867 xdr_set_page_base(xdr, 0, buf->len);
868 else
869 xdr_set_iov(xdr, buf->head, buf->len);
870 if (p != NULL && p > xdr->p && xdr->end >= p) {
871 xdr->nwords -= p - xdr->p;
872 xdr->p = p;
873 }
874 xdr->rqst = rqst;
875}
876EXPORT_SYMBOL_GPL(xdr_init_decode);
877
878/**
879 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
880 * @xdr: pointer to xdr_stream struct
881 * @buf: pointer to XDR buffer from which to decode data
882 * @pages: list of pages to decode into
883 * @len: length in bytes of buffer in pages
884 */
885void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
886 struct page **pages, unsigned int len)
887{
888 memset(buf, 0, sizeof(*buf));
889 buf->pages = pages;
890 buf->page_len = len;
891 buf->buflen = len;
892 buf->len = len;
893 xdr_init_decode(xdr, buf, NULL, NULL);
894}
895EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
896
897static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
898{
899 unsigned int nwords = XDR_QUADLEN(nbytes);
900 __be32 *p = xdr->p;
901 __be32 *q = p + nwords;
902
903 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
904 return NULL;
905 xdr->p = q;
906 xdr->nwords -= nwords;
907 return p;
908}
909
910/**
911 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
912 * @xdr: pointer to xdr_stream struct
913 * @buf: pointer to an empty buffer
914 * @buflen: size of 'buf'
915 *
916 * The scratch buffer is used when decoding from an array of pages.
917 * If an xdr_inline_decode() call spans across page boundaries, then
918 * we copy the data into the scratch buffer in order to allow linear
919 * access.
920 */
921void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
922{
923 xdr->scratch.iov_base = buf;
924 xdr->scratch.iov_len = buflen;
925}
926EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
927
928static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
929{
930 __be32 *p;
931 char *cpdest = xdr->scratch.iov_base;
932 size_t cplen = (char *)xdr->end - (char *)xdr->p;
933
934 if (nbytes > xdr->scratch.iov_len)
935 goto out_overflow;
936 p = __xdr_inline_decode(xdr, cplen);
937 if (p == NULL)
938 return NULL;
939 memcpy(cpdest, p, cplen);
940 if (!xdr_set_next_buffer(xdr))
941 goto out_overflow;
942 cpdest += cplen;
943 nbytes -= cplen;
944 p = __xdr_inline_decode(xdr, nbytes);
945 if (p == NULL)
946 return NULL;
947 memcpy(cpdest, p, nbytes);
948 return xdr->scratch.iov_base;
949out_overflow:
950 trace_rpc_xdr_overflow(xdr, nbytes);
951 return NULL;
952}
953
954/**
955 * xdr_inline_decode - Retrieve XDR data to decode
956 * @xdr: pointer to xdr_stream struct
957 * @nbytes: number of bytes of data to decode
958 *
959 * Check if the input buffer is long enough to enable us to decode
960 * 'nbytes' more bytes of data starting at the current position.
961 * If so return the current pointer, then update the current
962 * pointer position.
963 */
964__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
965{
966 __be32 *p;
967
968 if (unlikely(nbytes == 0))
969 return xdr->p;
970 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
971 goto out_overflow;
972 p = __xdr_inline_decode(xdr, nbytes);
973 if (p != NULL)
974 return p;
975 return xdr_copy_to_scratch(xdr, nbytes);
976out_overflow:
977 trace_rpc_xdr_overflow(xdr, nbytes);
978 return NULL;
979}
980EXPORT_SYMBOL_GPL(xdr_inline_decode);
981
982static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
983{
984 struct xdr_buf *buf = xdr->buf;
985 struct kvec *iov;
986 unsigned int nwords = XDR_QUADLEN(len);
987 unsigned int cur = xdr_stream_pos(xdr);
988 unsigned int copied, offset;
989
990 if (xdr->nwords == 0)
991 return 0;
992
993 /* Realign pages to current pointer position */
994 iov = buf->head;
995 if (iov->iov_len > cur) {
996 offset = iov->iov_len - cur;
997 copied = xdr_shrink_bufhead(buf, offset);
998 trace_rpc_xdr_alignment(xdr, offset, copied);
999 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1000 }
1001
1002 if (nwords > xdr->nwords) {
1003 nwords = xdr->nwords;
1004 len = nwords << 2;
1005 }
1006 if (buf->page_len <= len)
1007 len = buf->page_len;
1008 else if (nwords < xdr->nwords) {
1009 /* Truncate page data and move it into the tail */
1010 offset = buf->page_len - len;
1011 copied = xdr_shrink_pagelen(buf, offset);
1012 trace_rpc_xdr_alignment(xdr, offset, copied);
1013 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1014 }
1015 return len;
1016}
1017
1018/**
1019 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
1020 * @xdr: pointer to xdr_stream struct
1021 * @len: number of bytes of page data
1022 *
1023 * Moves data beyond the current pointer position from the XDR head[] buffer
1024 * into the page list. Any data that lies beyond current position + "len"
1025 * bytes is moved into the XDR tail[].
1026 *
1027 * Returns the number of XDR encoded bytes now contained in the pages
1028 */
1029unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1030{
1031 struct xdr_buf *buf = xdr->buf;
1032 struct kvec *iov;
1033 unsigned int nwords;
1034 unsigned int end;
1035 unsigned int padding;
1036
1037 len = xdr_align_pages(xdr, len);
1038 if (len == 0)
1039 return 0;
1040 nwords = XDR_QUADLEN(len);
1041 padding = (nwords << 2) - len;
1042 xdr->iov = iov = buf->tail;
1043 /* Compute remaining message length. */
1044 end = ((xdr->nwords - nwords) << 2) + padding;
1045 if (end > iov->iov_len)
1046 end = iov->iov_len;
1047
1048 /*
1049 * Position current pointer at beginning of tail, and
1050 * set remaining message length.
1051 */
1052 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
1053 xdr->end = (__be32 *)((char *)iov->iov_base + end);
1054 xdr->page_ptr = NULL;
1055 xdr->nwords = XDR_QUADLEN(end - padding);
1056 return len;
1057}
1058EXPORT_SYMBOL_GPL(xdr_read_pages);
1059
1060/**
1061 * xdr_enter_page - decode data from the XDR page
1062 * @xdr: pointer to xdr_stream struct
1063 * @len: number of bytes of page data
1064 *
1065 * Moves data beyond the current pointer position from the XDR head[] buffer
1066 * into the page list. Any data that lies beyond current position + "len"
1067 * bytes is moved into the XDR tail[]. The current pointer is then
1068 * repositioned at the beginning of the first XDR page.
1069 */
1070void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1071{
1072 len = xdr_align_pages(xdr, len);
1073 /*
1074 * Position current pointer at beginning of tail, and
1075 * set remaining message length.
1076 */
1077 if (len != 0)
1078 xdr_set_page_base(xdr, 0, len);
1079}
1080EXPORT_SYMBOL_GPL(xdr_enter_page);
1081
1082static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1083
1084void
1085xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1086{
1087 buf->head[0] = *iov;
1088 buf->tail[0] = empty_iov;
1089 buf->page_len = 0;
1090 buf->buflen = buf->len = iov->iov_len;
1091}
1092EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1093
1094/**
1095 * xdr_buf_subsegment - set subbuf to a portion of buf
1096 * @buf: an xdr buffer
1097 * @subbuf: the result buffer
1098 * @base: beginning of range in bytes
1099 * @len: length of range in bytes
1100 *
1101 * sets @subbuf to an xdr buffer representing the portion of @buf of
1102 * length @len starting at offset @base.
1103 *
1104 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1105 *
1106 * Returns -1 if base of length are out of bounds.
1107 */
1108int
1109xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1110 unsigned int base, unsigned int len)
1111{
1112 subbuf->buflen = subbuf->len = len;
1113 if (base < buf->head[0].iov_len) {
1114 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1115 subbuf->head[0].iov_len = min_t(unsigned int, len,
1116 buf->head[0].iov_len - base);
1117 len -= subbuf->head[0].iov_len;
1118 base = 0;
1119 } else {
1120 base -= buf->head[0].iov_len;
1121 subbuf->head[0].iov_base = buf->head[0].iov_base;
1122 subbuf->head[0].iov_len = 0;
1123 }
1124
1125 if (base < buf->page_len) {
1126 subbuf->page_len = min(buf->page_len - base, len);
1127 base += buf->page_base;
1128 subbuf->page_base = base & ~PAGE_MASK;
1129 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1130 len -= subbuf->page_len;
1131 base = 0;
1132 } else {
1133 base -= buf->page_len;
1134 subbuf->pages = buf->pages;
1135 subbuf->page_base = 0;
1136 subbuf->page_len = 0;
1137 }
1138
1139 if (base < buf->tail[0].iov_len) {
1140 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1141 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1142 buf->tail[0].iov_len - base);
1143 len -= subbuf->tail[0].iov_len;
1144 base = 0;
1145 } else {
1146 base -= buf->tail[0].iov_len;
1147 subbuf->tail[0].iov_base = buf->tail[0].iov_base;
1148 subbuf->tail[0].iov_len = 0;
1149 }
1150
1151 if (base || len)
1152 return -1;
1153 return 0;
1154}
1155EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1156
1157/**
1158 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1159 * @buf: buf to be trimmed
1160 * @len: number of bytes to reduce "buf" by
1161 *
1162 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1163 * that it's possible that we'll trim less than that amount if the xdr_buf is
1164 * too small, or if (for instance) it's all in the head and the parser has
1165 * already read too far into it.
1166 */
1167void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1168{
1169 size_t cur;
1170 unsigned int trim = len;
1171
1172 if (buf->tail[0].iov_len) {
1173 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1174 buf->tail[0].iov_len -= cur;
1175 trim -= cur;
1176 if (!trim)
1177 goto fix_len;
1178 }
1179
1180 if (buf->page_len) {
1181 cur = min_t(unsigned int, buf->page_len, trim);
1182 buf->page_len -= cur;
1183 trim -= cur;
1184 if (!trim)
1185 goto fix_len;
1186 }
1187
1188 if (buf->head[0].iov_len) {
1189 cur = min_t(size_t, buf->head[0].iov_len, trim);
1190 buf->head[0].iov_len -= cur;
1191 trim -= cur;
1192 }
1193fix_len:
1194 buf->len -= (len - trim);
1195}
1196EXPORT_SYMBOL_GPL(xdr_buf_trim);
1197
1198static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1199{
1200 unsigned int this_len;
1201
1202 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1203 memcpy(obj, subbuf->head[0].iov_base, this_len);
1204 len -= this_len;
1205 obj += this_len;
1206 this_len = min_t(unsigned int, len, subbuf->page_len);
1207 if (this_len)
1208 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1209 len -= this_len;
1210 obj += this_len;
1211 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1212 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1213}
1214
1215/* obj is assumed to point to allocated memory of size at least len: */
1216int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1217{
1218 struct xdr_buf subbuf;
1219 int status;
1220
1221 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1222 if (status != 0)
1223 return status;
1224 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1225 return 0;
1226}
1227EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1228
1229static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1230{
1231 unsigned int this_len;
1232
1233 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1234 memcpy(subbuf->head[0].iov_base, obj, this_len);
1235 len -= this_len;
1236 obj += this_len;
1237 this_len = min_t(unsigned int, len, subbuf->page_len);
1238 if (this_len)
1239 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1240 len -= this_len;
1241 obj += this_len;
1242 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1243 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1244}
1245
1246/* obj is assumed to point to allocated memory of size at least len: */
1247int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1248{
1249 struct xdr_buf subbuf;
1250 int status;
1251
1252 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1253 if (status != 0)
1254 return status;
1255 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1256 return 0;
1257}
1258EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1259
1260int
1261xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1262{
1263 __be32 raw;
1264 int status;
1265
1266 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1267 if (status)
1268 return status;
1269 *obj = be32_to_cpu(raw);
1270 return 0;
1271}
1272EXPORT_SYMBOL_GPL(xdr_decode_word);
1273
1274int
1275xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
1276{
1277 __be32 raw = cpu_to_be32(obj);
1278
1279 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1280}
1281EXPORT_SYMBOL_GPL(xdr_encode_word);
1282
1283/* Returns 0 on success, or else a negative error code. */
1284static int
1285xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1286 struct xdr_array2_desc *desc, int encode)
1287{
1288 char *elem = NULL, *c;
1289 unsigned int copied = 0, todo, avail_here;
1290 struct page **ppages = NULL;
1291 int err;
1292
1293 if (encode) {
1294 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1295 return -EINVAL;
1296 } else {
1297 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1298 desc->array_len > desc->array_maxlen ||
1299 (unsigned long) base + 4 + desc->array_len *
1300 desc->elem_size > buf->len)
1301 return -EINVAL;
1302 }
1303 base += 4;
1304
1305 if (!desc->xcode)
1306 return 0;
1307
1308 todo = desc->array_len * desc->elem_size;
1309
1310 /* process head */
1311 if (todo && base < buf->head->iov_len) {
1312 c = buf->head->iov_base + base;
1313 avail_here = min_t(unsigned int, todo,
1314 buf->head->iov_len - base);
1315 todo -= avail_here;
1316
1317 while (avail_here >= desc->elem_size) {
1318 err = desc->xcode(desc, c);
1319 if (err)
1320 goto out;
1321 c += desc->elem_size;
1322 avail_here -= desc->elem_size;
1323 }
1324 if (avail_here) {
1325 if (!elem) {
1326 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1327 err = -ENOMEM;
1328 if (!elem)
1329 goto out;
1330 }
1331 if (encode) {
1332 err = desc->xcode(desc, elem);
1333 if (err)
1334 goto out;
1335 memcpy(c, elem, avail_here);
1336 } else
1337 memcpy(elem, c, avail_here);
1338 copied = avail_here;
1339 }
1340 base = buf->head->iov_len; /* align to start of pages */
1341 }
1342
1343 /* process pages array */
1344 base -= buf->head->iov_len;
1345 if (todo && base < buf->page_len) {
1346 unsigned int avail_page;
1347
1348 avail_here = min(todo, buf->page_len - base);
1349 todo -= avail_here;
1350
1351 base += buf->page_base;
1352 ppages = buf->pages + (base >> PAGE_SHIFT);
1353 base &= ~PAGE_MASK;
1354 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1355 avail_here);
1356 c = kmap(*ppages) + base;
1357
1358 while (avail_here) {
1359 avail_here -= avail_page;
1360 if (copied || avail_page < desc->elem_size) {
1361 unsigned int l = min(avail_page,
1362 desc->elem_size - copied);
1363 if (!elem) {
1364 elem = kmalloc(desc->elem_size,
1365 GFP_KERNEL);
1366 err = -ENOMEM;
1367 if (!elem)
1368 goto out;
1369 }
1370 if (encode) {
1371 if (!copied) {
1372 err = desc->xcode(desc, elem);
1373 if (err)
1374 goto out;
1375 }
1376 memcpy(c, elem + copied, l);
1377 copied += l;
1378 if (copied == desc->elem_size)
1379 copied = 0;
1380 } else {
1381 memcpy(elem + copied, c, l);
1382 copied += l;
1383 if (copied == desc->elem_size) {
1384 err = desc->xcode(desc, elem);
1385 if (err)
1386 goto out;
1387 copied = 0;
1388 }
1389 }
1390 avail_page -= l;
1391 c += l;
1392 }
1393 while (avail_page >= desc->elem_size) {
1394 err = desc->xcode(desc, c);
1395 if (err)
1396 goto out;
1397 c += desc->elem_size;
1398 avail_page -= desc->elem_size;
1399 }
1400 if (avail_page) {
1401 unsigned int l = min(avail_page,
1402 desc->elem_size - copied);
1403 if (!elem) {
1404 elem = kmalloc(desc->elem_size,
1405 GFP_KERNEL);
1406 err = -ENOMEM;
1407 if (!elem)
1408 goto out;
1409 }
1410 if (encode) {
1411 if (!copied) {
1412 err = desc->xcode(desc, elem);
1413 if (err)
1414 goto out;
1415 }
1416 memcpy(c, elem + copied, l);
1417 copied += l;
1418 if (copied == desc->elem_size)
1419 copied = 0;
1420 } else {
1421 memcpy(elem + copied, c, l);
1422 copied += l;
1423 if (copied == desc->elem_size) {
1424 err = desc->xcode(desc, elem);
1425 if (err)
1426 goto out;
1427 copied = 0;
1428 }
1429 }
1430 }
1431 if (avail_here) {
1432 kunmap(*ppages);
1433 ppages++;
1434 c = kmap(*ppages);
1435 }
1436
1437 avail_page = min(avail_here,
1438 (unsigned int) PAGE_SIZE);
1439 }
1440 base = buf->page_len; /* align to start of tail */
1441 }
1442
1443 /* process tail */
1444 base -= buf->page_len;
1445 if (todo) {
1446 c = buf->tail->iov_base + base;
1447 if (copied) {
1448 unsigned int l = desc->elem_size - copied;
1449
1450 if (encode)
1451 memcpy(c, elem + copied, l);
1452 else {
1453 memcpy(elem + copied, c, l);
1454 err = desc->xcode(desc, elem);
1455 if (err)
1456 goto out;
1457 }
1458 todo -= l;
1459 c += l;
1460 }
1461 while (todo) {
1462 err = desc->xcode(desc, c);
1463 if (err)
1464 goto out;
1465 c += desc->elem_size;
1466 todo -= desc->elem_size;
1467 }
1468 }
1469 err = 0;
1470
1471out:
1472 kfree(elem);
1473 if (ppages)
1474 kunmap(*ppages);
1475 return err;
1476}
1477
1478int
1479xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1480 struct xdr_array2_desc *desc)
1481{
1482 if (base >= buf->len)
1483 return -EINVAL;
1484
1485 return xdr_xcode_array2(buf, base, desc, 0);
1486}
1487EXPORT_SYMBOL_GPL(xdr_decode_array2);
1488
1489int
1490xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1491 struct xdr_array2_desc *desc)
1492{
1493 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1494 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1495 return -EINVAL;
1496
1497 return xdr_xcode_array2(buf, base, desc, 1);
1498}
1499EXPORT_SYMBOL_GPL(xdr_encode_array2);
1500
1501int
1502xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1503 int (*actor)(struct scatterlist *, void *), void *data)
1504{
1505 int i, ret = 0;
1506 unsigned int page_len, thislen, page_offset;
1507 struct scatterlist sg[1];
1508
1509 sg_init_table(sg, 1);
1510
1511 if (offset >= buf->head[0].iov_len) {
1512 offset -= buf->head[0].iov_len;
1513 } else {
1514 thislen = buf->head[0].iov_len - offset;
1515 if (thislen > len)
1516 thislen = len;
1517 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1518 ret = actor(sg, data);
1519 if (ret)
1520 goto out;
1521 offset = 0;
1522 len -= thislen;
1523 }
1524 if (len == 0)
1525 goto out;
1526
1527 if (offset >= buf->page_len) {
1528 offset -= buf->page_len;
1529 } else {
1530 page_len = buf->page_len - offset;
1531 if (page_len > len)
1532 page_len = len;
1533 len -= page_len;
1534 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1535 i = (offset + buf->page_base) >> PAGE_SHIFT;
1536 thislen = PAGE_SIZE - page_offset;
1537 do {
1538 if (thislen > page_len)
1539 thislen = page_len;
1540 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1541 ret = actor(sg, data);
1542 if (ret)
1543 goto out;
1544 page_len -= thislen;
1545 i++;
1546 page_offset = 0;
1547 thislen = PAGE_SIZE;
1548 } while (page_len != 0);
1549 offset = 0;
1550 }
1551 if (len == 0)
1552 goto out;
1553 if (offset < buf->tail[0].iov_len) {
1554 thislen = buf->tail[0].iov_len - offset;
1555 if (thislen > len)
1556 thislen = len;
1557 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1558 ret = actor(sg, data);
1559 len -= thislen;
1560 }
1561 if (len != 0)
1562 ret = -EINVAL;
1563out:
1564 return ret;
1565}
1566EXPORT_SYMBOL_GPL(xdr_process_buf);
1567
1568/**
1569 * xdr_stream_decode_opaque - Decode variable length opaque
1570 * @xdr: pointer to xdr_stream
1571 * @ptr: location to store opaque data
1572 * @size: size of storage buffer @ptr
1573 *
1574 * Return values:
1575 * On success, returns size of object stored in *@ptr
1576 * %-EBADMSG on XDR buffer overflow
1577 * %-EMSGSIZE on overflow of storage buffer @ptr
1578 */
1579ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
1580{
1581 ssize_t ret;
1582 void *p;
1583
1584 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1585 if (ret <= 0)
1586 return ret;
1587 memcpy(ptr, p, ret);
1588 return ret;
1589}
1590EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
1591
1592/**
1593 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
1594 * @xdr: pointer to xdr_stream
1595 * @ptr: location to store pointer to opaque data
1596 * @maxlen: maximum acceptable object size
1597 * @gfp_flags: GFP mask to use
1598 *
1599 * Return values:
1600 * On success, returns size of object stored in *@ptr
1601 * %-EBADMSG on XDR buffer overflow
1602 * %-EMSGSIZE if the size of the object would exceed @maxlen
1603 * %-ENOMEM on memory allocation failure
1604 */
1605ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
1606 size_t maxlen, gfp_t gfp_flags)
1607{
1608 ssize_t ret;
1609 void *p;
1610
1611 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1612 if (ret > 0) {
1613 *ptr = kmemdup(p, ret, gfp_flags);
1614 if (*ptr != NULL)
1615 return ret;
1616 ret = -ENOMEM;
1617 }
1618 *ptr = NULL;
1619 return ret;
1620}
1621EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
1622
1623/**
1624 * xdr_stream_decode_string - Decode variable length string
1625 * @xdr: pointer to xdr_stream
1626 * @str: location to store string
1627 * @size: size of storage buffer @str
1628 *
1629 * Return values:
1630 * On success, returns length of NUL-terminated string stored in *@str
1631 * %-EBADMSG on XDR buffer overflow
1632 * %-EMSGSIZE on overflow of storage buffer @str
1633 */
1634ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
1635{
1636 ssize_t ret;
1637 void *p;
1638
1639 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1640 if (ret > 0) {
1641 memcpy(str, p, ret);
1642 str[ret] = '\0';
1643 return strlen(str);
1644 }
1645 *str = '\0';
1646 return ret;
1647}
1648EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
1649
1650/**
1651 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1652 * @xdr: pointer to xdr_stream
1653 * @str: location to store pointer to string
1654 * @maxlen: maximum acceptable string length
1655 * @gfp_flags: GFP mask to use
1656 *
1657 * Return values:
1658 * On success, returns length of NUL-terminated string stored in *@ptr
1659 * %-EBADMSG on XDR buffer overflow
1660 * %-EMSGSIZE if the size of the string would exceed @maxlen
1661 * %-ENOMEM on memory allocation failure
1662 */
1663ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1664 size_t maxlen, gfp_t gfp_flags)
1665{
1666 void *p;
1667 ssize_t ret;
1668
1669 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1670 if (ret > 0) {
1671 char *s = kmalloc(ret + 1, gfp_flags);
1672 if (s != NULL) {
1673 memcpy(s, p, ret);
1674 s[ret] = '\0';
1675 *str = s;
1676 return strlen(s);
1677 }
1678 ret = -ENOMEM;
1679 }
1680 *str = NULL;
1681 return ret;
1682}
1683EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);
1/*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/types.h>
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/pagemap.h>
15#include <linux/errno.h>
16#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/msg_prot.h>
18
19/*
20 * XDR functions for basic NFS types
21 */
22__be32 *
23xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24{
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
26
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = cpu_to_be32(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
31}
32EXPORT_SYMBOL_GPL(xdr_encode_netobj);
33
34__be32 *
35xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
36{
37 unsigned int len;
38
39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
40 return NULL;
41 obj->len = len;
42 obj->data = (u8 *) p;
43 return p + XDR_QUADLEN(len);
44}
45EXPORT_SYMBOL_GPL(xdr_decode_netobj);
46
47/**
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
52 *
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
57 *
58 * Returns the updated current XDR buffer position
59 *
60 */
61__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
62{
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
66
67 if (ptr != NULL)
68 memcpy(p, ptr, nbytes);
69 if (padding != 0)
70 memset((char *)p + nbytes, 0, padding);
71 p += quadlen;
72 }
73 return p;
74}
75EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
76
77/**
78 * xdr_encode_opaque - Encode variable length opaque data
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
82 *
83 * Returns the updated current XDR buffer position
84 */
85__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
86{
87 *p++ = cpu_to_be32(nbytes);
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89}
90EXPORT_SYMBOL_GPL(xdr_encode_opaque);
91
92__be32 *
93xdr_encode_string(__be32 *p, const char *string)
94{
95 return xdr_encode_array(p, string, strlen(string));
96}
97EXPORT_SYMBOL_GPL(xdr_encode_string);
98
99__be32 *
100xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
102{
103 u32 len;
104
105 len = be32_to_cpu(*p++);
106 if (len > maxlen)
107 return NULL;
108 *lenp = len;
109 *sp = (char *) p;
110 return p + XDR_QUADLEN(len);
111}
112EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
113
114/**
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
118 *
119 */
120void
121xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122{
123 char *kaddr;
124
125 kaddr = kmap_atomic(buf->pages[0]);
126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr);
128}
129EXPORT_SYMBOL_GPL(xdr_terminate_string);
130
131void
132xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
133 unsigned int len)
134{
135 struct kvec *tail = xdr->tail;
136 u32 *p;
137
138 xdr->pages = pages;
139 xdr->page_base = base;
140 xdr->page_len = len;
141
142 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
143 tail->iov_base = p;
144 tail->iov_len = 0;
145
146 if (len & 3) {
147 unsigned int pad = 4 - (len & 3);
148
149 *p = 0;
150 tail->iov_base = (char *)p + (len & 3);
151 tail->iov_len = pad;
152 len += pad;
153 }
154 xdr->buflen += len;
155 xdr->len += len;
156}
157EXPORT_SYMBOL_GPL(xdr_encode_pages);
158
159void
160xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
161 struct page **pages, unsigned int base, unsigned int len)
162{
163 struct kvec *head = xdr->head;
164 struct kvec *tail = xdr->tail;
165 char *buf = (char *)head->iov_base;
166 unsigned int buflen = head->iov_len;
167
168 head->iov_len = offset;
169
170 xdr->pages = pages;
171 xdr->page_base = base;
172 xdr->page_len = len;
173
174 tail->iov_base = buf + offset;
175 tail->iov_len = buflen - offset;
176
177 xdr->buflen += len;
178}
179EXPORT_SYMBOL_GPL(xdr_inline_pages);
180
181/*
182 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
183 *
184 * _shift_data_right_pages
185 * @pages: vector of pages containing both the source and dest memory area.
186 * @pgto_base: page vector address of destination
187 * @pgfrom_base: page vector address of source
188 * @len: number of bytes to copy
189 *
190 * Note: the addresses pgto_base and pgfrom_base are both calculated in
191 * the same way:
192 * if a memory area starts at byte 'base' in page 'pages[i]',
193 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
194 * Also note: pgfrom_base must be < pgto_base, but the memory areas
195 * they point to may overlap.
196 */
197static void
198_shift_data_right_pages(struct page **pages, size_t pgto_base,
199 size_t pgfrom_base, size_t len)
200{
201 struct page **pgfrom, **pgto;
202 char *vfrom, *vto;
203 size_t copy;
204
205 BUG_ON(pgto_base <= pgfrom_base);
206
207 pgto_base += len;
208 pgfrom_base += len;
209
210 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
211 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
212
213 pgto_base &= ~PAGE_CACHE_MASK;
214 pgfrom_base &= ~PAGE_CACHE_MASK;
215
216 do {
217 /* Are any pointers crossing a page boundary? */
218 if (pgto_base == 0) {
219 pgto_base = PAGE_CACHE_SIZE;
220 pgto--;
221 }
222 if (pgfrom_base == 0) {
223 pgfrom_base = PAGE_CACHE_SIZE;
224 pgfrom--;
225 }
226
227 copy = len;
228 if (copy > pgto_base)
229 copy = pgto_base;
230 if (copy > pgfrom_base)
231 copy = pgfrom_base;
232 pgto_base -= copy;
233 pgfrom_base -= copy;
234
235 vto = kmap_atomic(*pgto);
236 vfrom = kmap_atomic(*pgfrom);
237 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
238 flush_dcache_page(*pgto);
239 kunmap_atomic(vfrom);
240 kunmap_atomic(vto);
241
242 } while ((len -= copy) != 0);
243}
244
245/*
246 * _copy_to_pages
247 * @pages: array of pages
248 * @pgbase: page vector address of destination
249 * @p: pointer to source data
250 * @len: length
251 *
252 * Copies data from an arbitrary memory location into an array of pages
253 * The copy is assumed to be non-overlapping.
254 */
255static void
256_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
257{
258 struct page **pgto;
259 char *vto;
260 size_t copy;
261
262 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
263 pgbase &= ~PAGE_CACHE_MASK;
264
265 for (;;) {
266 copy = PAGE_CACHE_SIZE - pgbase;
267 if (copy > len)
268 copy = len;
269
270 vto = kmap_atomic(*pgto);
271 memcpy(vto + pgbase, p, copy);
272 kunmap_atomic(vto);
273
274 len -= copy;
275 if (len == 0)
276 break;
277
278 pgbase += copy;
279 if (pgbase == PAGE_CACHE_SIZE) {
280 flush_dcache_page(*pgto);
281 pgbase = 0;
282 pgto++;
283 }
284 p += copy;
285 }
286 flush_dcache_page(*pgto);
287}
288
289/*
290 * _copy_from_pages
291 * @p: pointer to destination
292 * @pages: array of pages
293 * @pgbase: offset of source data
294 * @len: length
295 *
296 * Copies data into an arbitrary memory location from an array of pages
297 * The copy is assumed to be non-overlapping.
298 */
299void
300_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
301{
302 struct page **pgfrom;
303 char *vfrom;
304 size_t copy;
305
306 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
307 pgbase &= ~PAGE_CACHE_MASK;
308
309 do {
310 copy = PAGE_CACHE_SIZE - pgbase;
311 if (copy > len)
312 copy = len;
313
314 vfrom = kmap_atomic(*pgfrom);
315 memcpy(p, vfrom + pgbase, copy);
316 kunmap_atomic(vfrom);
317
318 pgbase += copy;
319 if (pgbase == PAGE_CACHE_SIZE) {
320 pgbase = 0;
321 pgfrom++;
322 }
323 p += copy;
324
325 } while ((len -= copy) != 0);
326}
327EXPORT_SYMBOL_GPL(_copy_from_pages);
328
329/*
330 * xdr_shrink_bufhead
331 * @buf: xdr_buf
332 * @len: bytes to remove from buf->head[0]
333 *
334 * Shrinks XDR buffer's header kvec buf->head[0] by
335 * 'len' bytes. The extra data is not lost, but is instead
336 * moved into the inlined pages and/or the tail.
337 */
338static void
339xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
340{
341 struct kvec *head, *tail;
342 size_t copy, offs;
343 unsigned int pglen = buf->page_len;
344
345 tail = buf->tail;
346 head = buf->head;
347 BUG_ON (len > head->iov_len);
348
349 /* Shift the tail first */
350 if (tail->iov_len != 0) {
351 if (tail->iov_len > len) {
352 copy = tail->iov_len - len;
353 memmove((char *)tail->iov_base + len,
354 tail->iov_base, copy);
355 }
356 /* Copy from the inlined pages into the tail */
357 copy = len;
358 if (copy > pglen)
359 copy = pglen;
360 offs = len - copy;
361 if (offs >= tail->iov_len)
362 copy = 0;
363 else if (copy > tail->iov_len - offs)
364 copy = tail->iov_len - offs;
365 if (copy != 0)
366 _copy_from_pages((char *)tail->iov_base + offs,
367 buf->pages,
368 buf->page_base + pglen + offs - len,
369 copy);
370 /* Do we also need to copy data from the head into the tail ? */
371 if (len > pglen) {
372 offs = copy = len - pglen;
373 if (copy > tail->iov_len)
374 copy = tail->iov_len;
375 memcpy(tail->iov_base,
376 (char *)head->iov_base +
377 head->iov_len - offs,
378 copy);
379 }
380 }
381 /* Now handle pages */
382 if (pglen != 0) {
383 if (pglen > len)
384 _shift_data_right_pages(buf->pages,
385 buf->page_base + len,
386 buf->page_base,
387 pglen - len);
388 copy = len;
389 if (len > pglen)
390 copy = pglen;
391 _copy_to_pages(buf->pages, buf->page_base,
392 (char *)head->iov_base + head->iov_len - len,
393 copy);
394 }
395 head->iov_len -= len;
396 buf->buflen -= len;
397 /* Have we truncated the message? */
398 if (buf->len > buf->buflen)
399 buf->len = buf->buflen;
400}
401
402/*
403 * xdr_shrink_pagelen
404 * @buf: xdr_buf
405 * @len: bytes to remove from buf->pages
406 *
407 * Shrinks XDR buffer's page array buf->pages by
408 * 'len' bytes. The extra data is not lost, but is instead
409 * moved into the tail.
410 */
411static void
412xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
413{
414 struct kvec *tail;
415 size_t copy;
416 unsigned int pglen = buf->page_len;
417 unsigned int tailbuf_len;
418
419 tail = buf->tail;
420 BUG_ON (len > pglen);
421
422 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
423
424 /* Shift the tail first */
425 if (tailbuf_len != 0) {
426 unsigned int free_space = tailbuf_len - tail->iov_len;
427
428 if (len < free_space)
429 free_space = len;
430 tail->iov_len += free_space;
431
432 copy = len;
433 if (tail->iov_len > len) {
434 char *p = (char *)tail->iov_base + len;
435 memmove(p, tail->iov_base, tail->iov_len - len);
436 } else
437 copy = tail->iov_len;
438 /* Copy from the inlined pages into the tail */
439 _copy_from_pages((char *)tail->iov_base,
440 buf->pages, buf->page_base + pglen - len,
441 copy);
442 }
443 buf->page_len -= len;
444 buf->buflen -= len;
445 /* Have we truncated the message? */
446 if (buf->len > buf->buflen)
447 buf->len = buf->buflen;
448}
449
450void
451xdr_shift_buf(struct xdr_buf *buf, size_t len)
452{
453 xdr_shrink_bufhead(buf, len);
454}
455EXPORT_SYMBOL_GPL(xdr_shift_buf);
456
457/**
458 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
459 * @xdr: pointer to xdr_stream struct
460 * @buf: pointer to XDR buffer in which to encode data
461 * @p: current pointer inside XDR buffer
462 *
463 * Note: at the moment the RPC client only passes the length of our
464 * scratch buffer in the xdr_buf's header kvec. Previously this
465 * meant we needed to call xdr_adjust_iovec() after encoding the
466 * data. With the new scheme, the xdr_stream manages the details
467 * of the buffer length, and takes care of adjusting the kvec
468 * length for us.
469 */
470void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
471{
472 struct kvec *iov = buf->head;
473 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
474
475 BUG_ON(scratch_len < 0);
476 xdr->buf = buf;
477 xdr->iov = iov;
478 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
479 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
480 BUG_ON(iov->iov_len > scratch_len);
481
482 if (p != xdr->p && p != NULL) {
483 size_t len;
484
485 BUG_ON(p < xdr->p || p > xdr->end);
486 len = (char *)p - (char *)xdr->p;
487 xdr->p = p;
488 buf->len += len;
489 iov->iov_len += len;
490 }
491}
492EXPORT_SYMBOL_GPL(xdr_init_encode);
493
494/**
495 * xdr_reserve_space - Reserve buffer space for sending
496 * @xdr: pointer to xdr_stream
497 * @nbytes: number of bytes to reserve
498 *
499 * Checks that we have enough buffer space to encode 'nbytes' more
500 * bytes of data. If so, update the total xdr_buf length, and
501 * adjust the length of the current kvec.
502 */
503__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
504{
505 __be32 *p = xdr->p;
506 __be32 *q;
507
508 /* align nbytes on the next 32-bit boundary */
509 nbytes += 3;
510 nbytes &= ~3;
511 q = p + (nbytes >> 2);
512 if (unlikely(q > xdr->end || q < p))
513 return NULL;
514 xdr->p = q;
515 xdr->iov->iov_len += nbytes;
516 xdr->buf->len += nbytes;
517 return p;
518}
519EXPORT_SYMBOL_GPL(xdr_reserve_space);
520
521/**
522 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
523 * @xdr: pointer to xdr_stream
524 * @pages: list of pages
525 * @base: offset of first byte
526 * @len: length of data in bytes
527 *
528 */
529void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
530 unsigned int len)
531{
532 struct xdr_buf *buf = xdr->buf;
533 struct kvec *iov = buf->tail;
534 buf->pages = pages;
535 buf->page_base = base;
536 buf->page_len = len;
537
538 iov->iov_base = (char *)xdr->p;
539 iov->iov_len = 0;
540 xdr->iov = iov;
541
542 if (len & 3) {
543 unsigned int pad = 4 - (len & 3);
544
545 BUG_ON(xdr->p >= xdr->end);
546 iov->iov_base = (char *)xdr->p + (len & 3);
547 iov->iov_len += pad;
548 len += pad;
549 *xdr->p++ = 0;
550 }
551 buf->buflen += len;
552 buf->len += len;
553}
554EXPORT_SYMBOL_GPL(xdr_write_pages);
555
556static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
557 __be32 *p, unsigned int len)
558{
559 if (len > iov->iov_len)
560 len = iov->iov_len;
561 if (p == NULL)
562 p = (__be32*)iov->iov_base;
563 xdr->p = p;
564 xdr->end = (__be32*)(iov->iov_base + len);
565 xdr->iov = iov;
566 xdr->page_ptr = NULL;
567}
568
569static int xdr_set_page_base(struct xdr_stream *xdr,
570 unsigned int base, unsigned int len)
571{
572 unsigned int pgnr;
573 unsigned int maxlen;
574 unsigned int pgoff;
575 unsigned int pgend;
576 void *kaddr;
577
578 maxlen = xdr->buf->page_len;
579 if (base >= maxlen)
580 return -EINVAL;
581 maxlen -= base;
582 if (len > maxlen)
583 len = maxlen;
584
585 base += xdr->buf->page_base;
586
587 pgnr = base >> PAGE_SHIFT;
588 xdr->page_ptr = &xdr->buf->pages[pgnr];
589 kaddr = page_address(*xdr->page_ptr);
590
591 pgoff = base & ~PAGE_MASK;
592 xdr->p = (__be32*)(kaddr + pgoff);
593
594 pgend = pgoff + len;
595 if (pgend > PAGE_SIZE)
596 pgend = PAGE_SIZE;
597 xdr->end = (__be32*)(kaddr + pgend);
598 xdr->iov = NULL;
599 return 0;
600}
601
602static void xdr_set_next_page(struct xdr_stream *xdr)
603{
604 unsigned int newbase;
605
606 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
607 newbase -= xdr->buf->page_base;
608
609 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
610 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
611}
612
613static bool xdr_set_next_buffer(struct xdr_stream *xdr)
614{
615 if (xdr->page_ptr != NULL)
616 xdr_set_next_page(xdr);
617 else if (xdr->iov == xdr->buf->head) {
618 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
619 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
620 }
621 return xdr->p != xdr->end;
622}
623
624/**
625 * xdr_init_decode - Initialize an xdr_stream for decoding data.
626 * @xdr: pointer to xdr_stream struct
627 * @buf: pointer to XDR buffer from which to decode data
628 * @p: current pointer inside XDR buffer
629 */
630void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
631{
632 xdr->buf = buf;
633 xdr->scratch.iov_base = NULL;
634 xdr->scratch.iov_len = 0;
635 if (buf->head[0].iov_len != 0)
636 xdr_set_iov(xdr, buf->head, p, buf->len);
637 else if (buf->page_len != 0)
638 xdr_set_page_base(xdr, 0, buf->len);
639}
640EXPORT_SYMBOL_GPL(xdr_init_decode);
641
642/**
643 * xdr_init_decode - Initialize an xdr_stream for decoding data.
644 * @xdr: pointer to xdr_stream struct
645 * @buf: pointer to XDR buffer from which to decode data
646 * @pages: list of pages to decode into
647 * @len: length in bytes of buffer in pages
648 */
649void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
650 struct page **pages, unsigned int len)
651{
652 memset(buf, 0, sizeof(*buf));
653 buf->pages = pages;
654 buf->page_len = len;
655 buf->buflen = len;
656 buf->len = len;
657 xdr_init_decode(xdr, buf, NULL);
658}
659EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
660
661static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
662{
663 __be32 *p = xdr->p;
664 __be32 *q = p + XDR_QUADLEN(nbytes);
665
666 if (unlikely(q > xdr->end || q < p))
667 return NULL;
668 xdr->p = q;
669 return p;
670}
671
672/**
673 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
674 * @xdr: pointer to xdr_stream struct
675 * @buf: pointer to an empty buffer
676 * @buflen: size of 'buf'
677 *
678 * The scratch buffer is used when decoding from an array of pages.
679 * If an xdr_inline_decode() call spans across page boundaries, then
680 * we copy the data into the scratch buffer in order to allow linear
681 * access.
682 */
683void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
684{
685 xdr->scratch.iov_base = buf;
686 xdr->scratch.iov_len = buflen;
687}
688EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
689
690static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
691{
692 __be32 *p;
693 void *cpdest = xdr->scratch.iov_base;
694 size_t cplen = (char *)xdr->end - (char *)xdr->p;
695
696 if (nbytes > xdr->scratch.iov_len)
697 return NULL;
698 memcpy(cpdest, xdr->p, cplen);
699 cpdest += cplen;
700 nbytes -= cplen;
701 if (!xdr_set_next_buffer(xdr))
702 return NULL;
703 p = __xdr_inline_decode(xdr, nbytes);
704 if (p == NULL)
705 return NULL;
706 memcpy(cpdest, p, nbytes);
707 return xdr->scratch.iov_base;
708}
709
710/**
711 * xdr_inline_decode - Retrieve XDR data to decode
712 * @xdr: pointer to xdr_stream struct
713 * @nbytes: number of bytes of data to decode
714 *
715 * Check if the input buffer is long enough to enable us to decode
716 * 'nbytes' more bytes of data starting at the current position.
717 * If so return the current pointer, then update the current
718 * pointer position.
719 */
720__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
721{
722 __be32 *p;
723
724 if (nbytes == 0)
725 return xdr->p;
726 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
727 return NULL;
728 p = __xdr_inline_decode(xdr, nbytes);
729 if (p != NULL)
730 return p;
731 return xdr_copy_to_scratch(xdr, nbytes);
732}
733EXPORT_SYMBOL_GPL(xdr_inline_decode);
734
735/**
736 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
737 * @xdr: pointer to xdr_stream struct
738 * @len: number of bytes of page data
739 *
740 * Moves data beyond the current pointer position from the XDR head[] buffer
741 * into the page list. Any data that lies beyond current position + "len"
742 * bytes is moved into the XDR tail[].
743 */
744void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
745{
746 struct xdr_buf *buf = xdr->buf;
747 struct kvec *iov;
748 ssize_t shift;
749 unsigned int end;
750 int padding;
751
752 /* Realign pages to current pointer position */
753 iov = buf->head;
754 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
755 if (shift > 0)
756 xdr_shrink_bufhead(buf, shift);
757
758 /* Truncate page data and move it into the tail */
759 if (buf->page_len > len)
760 xdr_shrink_pagelen(buf, buf->page_len - len);
761 padding = (XDR_QUADLEN(len) << 2) - len;
762 xdr->iov = iov = buf->tail;
763 /* Compute remaining message length. */
764 end = iov->iov_len;
765 shift = buf->buflen - buf->len;
766 if (shift < end)
767 end -= shift;
768 else if (shift > 0)
769 end = 0;
770 /*
771 * Position current pointer at beginning of tail, and
772 * set remaining message length.
773 */
774 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
775 xdr->end = (__be32 *)((char *)iov->iov_base + end);
776}
777EXPORT_SYMBOL_GPL(xdr_read_pages);
778
779/**
780 * xdr_enter_page - decode data from the XDR page
781 * @xdr: pointer to xdr_stream struct
782 * @len: number of bytes of page data
783 *
784 * Moves data beyond the current pointer position from the XDR head[] buffer
785 * into the page list. Any data that lies beyond current position + "len"
786 * bytes is moved into the XDR tail[]. The current pointer is then
787 * repositioned at the beginning of the first XDR page.
788 */
789void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
790{
791 xdr_read_pages(xdr, len);
792 /*
793 * Position current pointer at beginning of tail, and
794 * set remaining message length.
795 */
796 xdr_set_page_base(xdr, 0, len);
797}
798EXPORT_SYMBOL_GPL(xdr_enter_page);
799
800static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
801
802void
803xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
804{
805 buf->head[0] = *iov;
806 buf->tail[0] = empty_iov;
807 buf->page_len = 0;
808 buf->buflen = buf->len = iov->iov_len;
809}
810EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
811
812/* Sets subbuf to the portion of buf of length len beginning base bytes
813 * from the start of buf. Returns -1 if base of length are out of bounds. */
814int
815xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
816 unsigned int base, unsigned int len)
817{
818 subbuf->buflen = subbuf->len = len;
819 if (base < buf->head[0].iov_len) {
820 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
821 subbuf->head[0].iov_len = min_t(unsigned int, len,
822 buf->head[0].iov_len - base);
823 len -= subbuf->head[0].iov_len;
824 base = 0;
825 } else {
826 subbuf->head[0].iov_base = NULL;
827 subbuf->head[0].iov_len = 0;
828 base -= buf->head[0].iov_len;
829 }
830
831 if (base < buf->page_len) {
832 subbuf->page_len = min(buf->page_len - base, len);
833 base += buf->page_base;
834 subbuf->page_base = base & ~PAGE_CACHE_MASK;
835 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
836 len -= subbuf->page_len;
837 base = 0;
838 } else {
839 base -= buf->page_len;
840 subbuf->page_len = 0;
841 }
842
843 if (base < buf->tail[0].iov_len) {
844 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
845 subbuf->tail[0].iov_len = min_t(unsigned int, len,
846 buf->tail[0].iov_len - base);
847 len -= subbuf->tail[0].iov_len;
848 base = 0;
849 } else {
850 subbuf->tail[0].iov_base = NULL;
851 subbuf->tail[0].iov_len = 0;
852 base -= buf->tail[0].iov_len;
853 }
854
855 if (base || len)
856 return -1;
857 return 0;
858}
859EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
860
861static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
862{
863 unsigned int this_len;
864
865 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
866 memcpy(obj, subbuf->head[0].iov_base, this_len);
867 len -= this_len;
868 obj += this_len;
869 this_len = min_t(unsigned int, len, subbuf->page_len);
870 if (this_len)
871 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
872 len -= this_len;
873 obj += this_len;
874 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
875 memcpy(obj, subbuf->tail[0].iov_base, this_len);
876}
877
878/* obj is assumed to point to allocated memory of size at least len: */
879int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
880{
881 struct xdr_buf subbuf;
882 int status;
883
884 status = xdr_buf_subsegment(buf, &subbuf, base, len);
885 if (status != 0)
886 return status;
887 __read_bytes_from_xdr_buf(&subbuf, obj, len);
888 return 0;
889}
890EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
891
892static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
893{
894 unsigned int this_len;
895
896 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
897 memcpy(subbuf->head[0].iov_base, obj, this_len);
898 len -= this_len;
899 obj += this_len;
900 this_len = min_t(unsigned int, len, subbuf->page_len);
901 if (this_len)
902 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
903 len -= this_len;
904 obj += this_len;
905 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
906 memcpy(subbuf->tail[0].iov_base, obj, this_len);
907}
908
909/* obj is assumed to point to allocated memory of size at least len: */
910int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
911{
912 struct xdr_buf subbuf;
913 int status;
914
915 status = xdr_buf_subsegment(buf, &subbuf, base, len);
916 if (status != 0)
917 return status;
918 __write_bytes_to_xdr_buf(&subbuf, obj, len);
919 return 0;
920}
921EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
922
923int
924xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
925{
926 __be32 raw;
927 int status;
928
929 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
930 if (status)
931 return status;
932 *obj = be32_to_cpu(raw);
933 return 0;
934}
935EXPORT_SYMBOL_GPL(xdr_decode_word);
936
937int
938xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
939{
940 __be32 raw = cpu_to_be32(obj);
941
942 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
943}
944EXPORT_SYMBOL_GPL(xdr_encode_word);
945
946/* If the netobj starting offset bytes from the start of xdr_buf is contained
947 * entirely in the head or the tail, set object to point to it; otherwise
948 * try to find space for it at the end of the tail, copy it there, and
949 * set obj to point to it. */
950int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
951{
952 struct xdr_buf subbuf;
953
954 if (xdr_decode_word(buf, offset, &obj->len))
955 return -EFAULT;
956 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
957 return -EFAULT;
958
959 /* Is the obj contained entirely in the head? */
960 obj->data = subbuf.head[0].iov_base;
961 if (subbuf.head[0].iov_len == obj->len)
962 return 0;
963 /* ..or is the obj contained entirely in the tail? */
964 obj->data = subbuf.tail[0].iov_base;
965 if (subbuf.tail[0].iov_len == obj->len)
966 return 0;
967
968 /* use end of tail as storage for obj:
969 * (We don't copy to the beginning because then we'd have
970 * to worry about doing a potentially overlapping copy.
971 * This assumes the object is at most half the length of the
972 * tail.) */
973 if (obj->len > buf->buflen - buf->len)
974 return -ENOMEM;
975 if (buf->tail[0].iov_len != 0)
976 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
977 else
978 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
979 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
980 return 0;
981}
982EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
983
984/* Returns 0 on success, or else a negative error code. */
985static int
986xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
987 struct xdr_array2_desc *desc, int encode)
988{
989 char *elem = NULL, *c;
990 unsigned int copied = 0, todo, avail_here;
991 struct page **ppages = NULL;
992 int err;
993
994 if (encode) {
995 if (xdr_encode_word(buf, base, desc->array_len) != 0)
996 return -EINVAL;
997 } else {
998 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
999 desc->array_len > desc->array_maxlen ||
1000 (unsigned long) base + 4 + desc->array_len *
1001 desc->elem_size > buf->len)
1002 return -EINVAL;
1003 }
1004 base += 4;
1005
1006 if (!desc->xcode)
1007 return 0;
1008
1009 todo = desc->array_len * desc->elem_size;
1010
1011 /* process head */
1012 if (todo && base < buf->head->iov_len) {
1013 c = buf->head->iov_base + base;
1014 avail_here = min_t(unsigned int, todo,
1015 buf->head->iov_len - base);
1016 todo -= avail_here;
1017
1018 while (avail_here >= desc->elem_size) {
1019 err = desc->xcode(desc, c);
1020 if (err)
1021 goto out;
1022 c += desc->elem_size;
1023 avail_here -= desc->elem_size;
1024 }
1025 if (avail_here) {
1026 if (!elem) {
1027 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1028 err = -ENOMEM;
1029 if (!elem)
1030 goto out;
1031 }
1032 if (encode) {
1033 err = desc->xcode(desc, elem);
1034 if (err)
1035 goto out;
1036 memcpy(c, elem, avail_here);
1037 } else
1038 memcpy(elem, c, avail_here);
1039 copied = avail_here;
1040 }
1041 base = buf->head->iov_len; /* align to start of pages */
1042 }
1043
1044 /* process pages array */
1045 base -= buf->head->iov_len;
1046 if (todo && base < buf->page_len) {
1047 unsigned int avail_page;
1048
1049 avail_here = min(todo, buf->page_len - base);
1050 todo -= avail_here;
1051
1052 base += buf->page_base;
1053 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1054 base &= ~PAGE_CACHE_MASK;
1055 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1056 avail_here);
1057 c = kmap(*ppages) + base;
1058
1059 while (avail_here) {
1060 avail_here -= avail_page;
1061 if (copied || avail_page < desc->elem_size) {
1062 unsigned int l = min(avail_page,
1063 desc->elem_size - copied);
1064 if (!elem) {
1065 elem = kmalloc(desc->elem_size,
1066 GFP_KERNEL);
1067 err = -ENOMEM;
1068 if (!elem)
1069 goto out;
1070 }
1071 if (encode) {
1072 if (!copied) {
1073 err = desc->xcode(desc, elem);
1074 if (err)
1075 goto out;
1076 }
1077 memcpy(c, elem + copied, l);
1078 copied += l;
1079 if (copied == desc->elem_size)
1080 copied = 0;
1081 } else {
1082 memcpy(elem + copied, c, l);
1083 copied += l;
1084 if (copied == desc->elem_size) {
1085 err = desc->xcode(desc, elem);
1086 if (err)
1087 goto out;
1088 copied = 0;
1089 }
1090 }
1091 avail_page -= l;
1092 c += l;
1093 }
1094 while (avail_page >= desc->elem_size) {
1095 err = desc->xcode(desc, c);
1096 if (err)
1097 goto out;
1098 c += desc->elem_size;
1099 avail_page -= desc->elem_size;
1100 }
1101 if (avail_page) {
1102 unsigned int l = min(avail_page,
1103 desc->elem_size - copied);
1104 if (!elem) {
1105 elem = kmalloc(desc->elem_size,
1106 GFP_KERNEL);
1107 err = -ENOMEM;
1108 if (!elem)
1109 goto out;
1110 }
1111 if (encode) {
1112 if (!copied) {
1113 err = desc->xcode(desc, elem);
1114 if (err)
1115 goto out;
1116 }
1117 memcpy(c, elem + copied, l);
1118 copied += l;
1119 if (copied == desc->elem_size)
1120 copied = 0;
1121 } else {
1122 memcpy(elem + copied, c, l);
1123 copied += l;
1124 if (copied == desc->elem_size) {
1125 err = desc->xcode(desc, elem);
1126 if (err)
1127 goto out;
1128 copied = 0;
1129 }
1130 }
1131 }
1132 if (avail_here) {
1133 kunmap(*ppages);
1134 ppages++;
1135 c = kmap(*ppages);
1136 }
1137
1138 avail_page = min(avail_here,
1139 (unsigned int) PAGE_CACHE_SIZE);
1140 }
1141 base = buf->page_len; /* align to start of tail */
1142 }
1143
1144 /* process tail */
1145 base -= buf->page_len;
1146 if (todo) {
1147 c = buf->tail->iov_base + base;
1148 if (copied) {
1149 unsigned int l = desc->elem_size - copied;
1150
1151 if (encode)
1152 memcpy(c, elem + copied, l);
1153 else {
1154 memcpy(elem + copied, c, l);
1155 err = desc->xcode(desc, elem);
1156 if (err)
1157 goto out;
1158 }
1159 todo -= l;
1160 c += l;
1161 }
1162 while (todo) {
1163 err = desc->xcode(desc, c);
1164 if (err)
1165 goto out;
1166 c += desc->elem_size;
1167 todo -= desc->elem_size;
1168 }
1169 }
1170 err = 0;
1171
1172out:
1173 kfree(elem);
1174 if (ppages)
1175 kunmap(*ppages);
1176 return err;
1177}
1178
1179int
1180xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1181 struct xdr_array2_desc *desc)
1182{
1183 if (base >= buf->len)
1184 return -EINVAL;
1185
1186 return xdr_xcode_array2(buf, base, desc, 0);
1187}
1188EXPORT_SYMBOL_GPL(xdr_decode_array2);
1189
1190int
1191xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1192 struct xdr_array2_desc *desc)
1193{
1194 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1195 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1196 return -EINVAL;
1197
1198 return xdr_xcode_array2(buf, base, desc, 1);
1199}
1200EXPORT_SYMBOL_GPL(xdr_encode_array2);
1201
1202int
1203xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1204 int (*actor)(struct scatterlist *, void *), void *data)
1205{
1206 int i, ret = 0;
1207 unsigned int page_len, thislen, page_offset;
1208 struct scatterlist sg[1];
1209
1210 sg_init_table(sg, 1);
1211
1212 if (offset >= buf->head[0].iov_len) {
1213 offset -= buf->head[0].iov_len;
1214 } else {
1215 thislen = buf->head[0].iov_len - offset;
1216 if (thislen > len)
1217 thislen = len;
1218 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1219 ret = actor(sg, data);
1220 if (ret)
1221 goto out;
1222 offset = 0;
1223 len -= thislen;
1224 }
1225 if (len == 0)
1226 goto out;
1227
1228 if (offset >= buf->page_len) {
1229 offset -= buf->page_len;
1230 } else {
1231 page_len = buf->page_len - offset;
1232 if (page_len > len)
1233 page_len = len;
1234 len -= page_len;
1235 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1236 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1237 thislen = PAGE_CACHE_SIZE - page_offset;
1238 do {
1239 if (thislen > page_len)
1240 thislen = page_len;
1241 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1242 ret = actor(sg, data);
1243 if (ret)
1244 goto out;
1245 page_len -= thislen;
1246 i++;
1247 page_offset = 0;
1248 thislen = PAGE_CACHE_SIZE;
1249 } while (page_len != 0);
1250 offset = 0;
1251 }
1252 if (len == 0)
1253 goto out;
1254 if (offset < buf->tail[0].iov_len) {
1255 thislen = buf->tail[0].iov_len - offset;
1256 if (thislen > len)
1257 thislen = len;
1258 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1259 ret = actor(sg, data);
1260 len -= thislen;
1261 }
1262 if (len != 0)
1263 ret = -EINVAL;
1264out:
1265 return ret;
1266}
1267EXPORT_SYMBOL_GPL(xdr_process_buf);
1268