Loading...
1/*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/types.h>
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/pagemap.h>
15#include <linux/errno.h>
16#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/msg_prot.h>
18
19/*
20 * XDR functions for basic NFS types
21 */
22__be32 *
23xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24{
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
26
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = cpu_to_be32(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
31}
32EXPORT_SYMBOL_GPL(xdr_encode_netobj);
33
34__be32 *
35xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
36{
37 unsigned int len;
38
39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
40 return NULL;
41 obj->len = len;
42 obj->data = (u8 *) p;
43 return p + XDR_QUADLEN(len);
44}
45EXPORT_SYMBOL_GPL(xdr_decode_netobj);
46
47/**
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
52 *
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
57 *
58 * Returns the updated current XDR buffer position
59 *
60 */
61__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
62{
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
66
67 if (ptr != NULL)
68 memcpy(p, ptr, nbytes);
69 if (padding != 0)
70 memset((char *)p + nbytes, 0, padding);
71 p += quadlen;
72 }
73 return p;
74}
75EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
76
77/**
78 * xdr_encode_opaque - Encode variable length opaque data
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
82 *
83 * Returns the updated current XDR buffer position
84 */
85__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
86{
87 *p++ = cpu_to_be32(nbytes);
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89}
90EXPORT_SYMBOL_GPL(xdr_encode_opaque);
91
92__be32 *
93xdr_encode_string(__be32 *p, const char *string)
94{
95 return xdr_encode_array(p, string, strlen(string));
96}
97EXPORT_SYMBOL_GPL(xdr_encode_string);
98
99__be32 *
100xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
102{
103 u32 len;
104
105 len = be32_to_cpu(*p++);
106 if (len > maxlen)
107 return NULL;
108 *lenp = len;
109 *sp = (char *) p;
110 return p + XDR_QUADLEN(len);
111}
112EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
113
114/**
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
118 *
119 */
120void
121xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122{
123 char *kaddr;
124
125 kaddr = kmap_atomic(buf->pages[0]);
126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr);
128}
129EXPORT_SYMBOL_GPL(xdr_terminate_string);
130
131void
132xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
133 struct page **pages, unsigned int base, unsigned int len)
134{
135 struct kvec *head = xdr->head;
136 struct kvec *tail = xdr->tail;
137 char *buf = (char *)head->iov_base;
138 unsigned int buflen = head->iov_len;
139
140 head->iov_len = offset;
141
142 xdr->pages = pages;
143 xdr->page_base = base;
144 xdr->page_len = len;
145
146 tail->iov_base = buf + offset;
147 tail->iov_len = buflen - offset;
148
149 xdr->buflen += len;
150}
151EXPORT_SYMBOL_GPL(xdr_inline_pages);
152
153/*
154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
155 */
156
157/**
158 * _shift_data_right_pages
159 * @pages: vector of pages containing both the source and dest memory area.
160 * @pgto_base: page vector address of destination
161 * @pgfrom_base: page vector address of source
162 * @len: number of bytes to copy
163 *
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
165 * the same way:
166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap.
170 */
171static void
172_shift_data_right_pages(struct page **pages, size_t pgto_base,
173 size_t pgfrom_base, size_t len)
174{
175 struct page **pgfrom, **pgto;
176 char *vfrom, *vto;
177 size_t copy;
178
179 BUG_ON(pgto_base <= pgfrom_base);
180
181 pgto_base += len;
182 pgfrom_base += len;
183
184 pgto = pages + (pgto_base >> PAGE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
186
187 pgto_base &= ~PAGE_MASK;
188 pgfrom_base &= ~PAGE_MASK;
189
190 do {
191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) {
193 pgto_base = PAGE_SIZE;
194 pgto--;
195 }
196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_SIZE;
198 pgfrom--;
199 }
200
201 copy = len;
202 if (copy > pgto_base)
203 copy = pgto_base;
204 if (copy > pgfrom_base)
205 copy = pgfrom_base;
206 pgto_base -= copy;
207 pgfrom_base -= copy;
208
209 vto = kmap_atomic(*pgto);
210 if (*pgto != *pgfrom) {
211 vfrom = kmap_atomic(*pgfrom);
212 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
213 kunmap_atomic(vfrom);
214 } else
215 memmove(vto + pgto_base, vto + pgfrom_base, copy);
216 flush_dcache_page(*pgto);
217 kunmap_atomic(vto);
218
219 } while ((len -= copy) != 0);
220}
221
222/**
223 * _copy_to_pages
224 * @pages: array of pages
225 * @pgbase: page vector address of destination
226 * @p: pointer to source data
227 * @len: length
228 *
229 * Copies data from an arbitrary memory location into an array of pages
230 * The copy is assumed to be non-overlapping.
231 */
232static void
233_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
234{
235 struct page **pgto;
236 char *vto;
237 size_t copy;
238
239 pgto = pages + (pgbase >> PAGE_SHIFT);
240 pgbase &= ~PAGE_MASK;
241
242 for (;;) {
243 copy = PAGE_SIZE - pgbase;
244 if (copy > len)
245 copy = len;
246
247 vto = kmap_atomic(*pgto);
248 memcpy(vto + pgbase, p, copy);
249 kunmap_atomic(vto);
250
251 len -= copy;
252 if (len == 0)
253 break;
254
255 pgbase += copy;
256 if (pgbase == PAGE_SIZE) {
257 flush_dcache_page(*pgto);
258 pgbase = 0;
259 pgto++;
260 }
261 p += copy;
262 }
263 flush_dcache_page(*pgto);
264}
265
266/**
267 * _copy_from_pages
268 * @p: pointer to destination
269 * @pages: array of pages
270 * @pgbase: offset of source data
271 * @len: length
272 *
273 * Copies data into an arbitrary memory location from an array of pages
274 * The copy is assumed to be non-overlapping.
275 */
276void
277_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
278{
279 struct page **pgfrom;
280 char *vfrom;
281 size_t copy;
282
283 pgfrom = pages + (pgbase >> PAGE_SHIFT);
284 pgbase &= ~PAGE_MASK;
285
286 do {
287 copy = PAGE_SIZE - pgbase;
288 if (copy > len)
289 copy = len;
290
291 vfrom = kmap_atomic(*pgfrom);
292 memcpy(p, vfrom + pgbase, copy);
293 kunmap_atomic(vfrom);
294
295 pgbase += copy;
296 if (pgbase == PAGE_SIZE) {
297 pgbase = 0;
298 pgfrom++;
299 }
300 p += copy;
301
302 } while ((len -= copy) != 0);
303}
304EXPORT_SYMBOL_GPL(_copy_from_pages);
305
306/**
307 * xdr_shrink_bufhead
308 * @buf: xdr_buf
309 * @len: bytes to remove from buf->head[0]
310 *
311 * Shrinks XDR buffer's header kvec buf->head[0] by
312 * 'len' bytes. The extra data is not lost, but is instead
313 * moved into the inlined pages and/or the tail.
314 */
315static void
316xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
317{
318 struct kvec *head, *tail;
319 size_t copy, offs;
320 unsigned int pglen = buf->page_len;
321
322 tail = buf->tail;
323 head = buf->head;
324
325 WARN_ON_ONCE(len > head->iov_len);
326 if (len > head->iov_len)
327 len = head->iov_len;
328
329 /* Shift the tail first */
330 if (tail->iov_len != 0) {
331 if (tail->iov_len > len) {
332 copy = tail->iov_len - len;
333 memmove((char *)tail->iov_base + len,
334 tail->iov_base, copy);
335 }
336 /* Copy from the inlined pages into the tail */
337 copy = len;
338 if (copy > pglen)
339 copy = pglen;
340 offs = len - copy;
341 if (offs >= tail->iov_len)
342 copy = 0;
343 else if (copy > tail->iov_len - offs)
344 copy = tail->iov_len - offs;
345 if (copy != 0)
346 _copy_from_pages((char *)tail->iov_base + offs,
347 buf->pages,
348 buf->page_base + pglen + offs - len,
349 copy);
350 /* Do we also need to copy data from the head into the tail ? */
351 if (len > pglen) {
352 offs = copy = len - pglen;
353 if (copy > tail->iov_len)
354 copy = tail->iov_len;
355 memcpy(tail->iov_base,
356 (char *)head->iov_base +
357 head->iov_len - offs,
358 copy);
359 }
360 }
361 /* Now handle pages */
362 if (pglen != 0) {
363 if (pglen > len)
364 _shift_data_right_pages(buf->pages,
365 buf->page_base + len,
366 buf->page_base,
367 pglen - len);
368 copy = len;
369 if (len > pglen)
370 copy = pglen;
371 _copy_to_pages(buf->pages, buf->page_base,
372 (char *)head->iov_base + head->iov_len - len,
373 copy);
374 }
375 head->iov_len -= len;
376 buf->buflen -= len;
377 /* Have we truncated the message? */
378 if (buf->len > buf->buflen)
379 buf->len = buf->buflen;
380}
381
382/**
383 * xdr_shrink_pagelen
384 * @buf: xdr_buf
385 * @len: bytes to remove from buf->pages
386 *
387 * Shrinks XDR buffer's page array buf->pages by
388 * 'len' bytes. The extra data is not lost, but is instead
389 * moved into the tail.
390 */
391static void
392xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
393{
394 struct kvec *tail;
395 size_t copy;
396 unsigned int pglen = buf->page_len;
397 unsigned int tailbuf_len;
398
399 tail = buf->tail;
400 BUG_ON (len > pglen);
401
402 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
403
404 /* Shift the tail first */
405 if (tailbuf_len != 0) {
406 unsigned int free_space = tailbuf_len - tail->iov_len;
407
408 if (len < free_space)
409 free_space = len;
410 tail->iov_len += free_space;
411
412 copy = len;
413 if (tail->iov_len > len) {
414 char *p = (char *)tail->iov_base + len;
415 memmove(p, tail->iov_base, tail->iov_len - len);
416 } else
417 copy = tail->iov_len;
418 /* Copy from the inlined pages into the tail */
419 _copy_from_pages((char *)tail->iov_base,
420 buf->pages, buf->page_base + pglen - len,
421 copy);
422 }
423 buf->page_len -= len;
424 buf->buflen -= len;
425 /* Have we truncated the message? */
426 if (buf->len > buf->buflen)
427 buf->len = buf->buflen;
428}
429
430void
431xdr_shift_buf(struct xdr_buf *buf, size_t len)
432{
433 xdr_shrink_bufhead(buf, len);
434}
435EXPORT_SYMBOL_GPL(xdr_shift_buf);
436
437/**
438 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
439 * @xdr: pointer to struct xdr_stream
440 */
441unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
442{
443 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
444}
445EXPORT_SYMBOL_GPL(xdr_stream_pos);
446
447/**
448 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
449 * @xdr: pointer to xdr_stream struct
450 * @buf: pointer to XDR buffer in which to encode data
451 * @p: current pointer inside XDR buffer
452 *
453 * Note: at the moment the RPC client only passes the length of our
454 * scratch buffer in the xdr_buf's header kvec. Previously this
455 * meant we needed to call xdr_adjust_iovec() after encoding the
456 * data. With the new scheme, the xdr_stream manages the details
457 * of the buffer length, and takes care of adjusting the kvec
458 * length for us.
459 */
460void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
461{
462 struct kvec *iov = buf->head;
463 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
464
465 xdr_set_scratch_buffer(xdr, NULL, 0);
466 BUG_ON(scratch_len < 0);
467 xdr->buf = buf;
468 xdr->iov = iov;
469 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
470 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
471 BUG_ON(iov->iov_len > scratch_len);
472
473 if (p != xdr->p && p != NULL) {
474 size_t len;
475
476 BUG_ON(p < xdr->p || p > xdr->end);
477 len = (char *)p - (char *)xdr->p;
478 xdr->p = p;
479 buf->len += len;
480 iov->iov_len += len;
481 }
482}
483EXPORT_SYMBOL_GPL(xdr_init_encode);
484
485/**
486 * xdr_commit_encode - Ensure all data is written to buffer
487 * @xdr: pointer to xdr_stream
488 *
489 * We handle encoding across page boundaries by giving the caller a
490 * temporary location to write to, then later copying the data into
491 * place; xdr_commit_encode does that copying.
492 *
493 * Normally the caller doesn't need to call this directly, as the
494 * following xdr_reserve_space will do it. But an explicit call may be
495 * required at the end of encoding, or any other time when the xdr_buf
496 * data might be read.
497 */
498void xdr_commit_encode(struct xdr_stream *xdr)
499{
500 int shift = xdr->scratch.iov_len;
501 void *page;
502
503 if (shift == 0)
504 return;
505 page = page_address(*xdr->page_ptr);
506 memcpy(xdr->scratch.iov_base, page, shift);
507 memmove(page, page + shift, (void *)xdr->p - page);
508 xdr->scratch.iov_len = 0;
509}
510EXPORT_SYMBOL_GPL(xdr_commit_encode);
511
512static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
513 size_t nbytes)
514{
515 static __be32 *p;
516 int space_left;
517 int frag1bytes, frag2bytes;
518
519 if (nbytes > PAGE_SIZE)
520 return NULL; /* Bigger buffers require special handling */
521 if (xdr->buf->len + nbytes > xdr->buf->buflen)
522 return NULL; /* Sorry, we're totally out of space */
523 frag1bytes = (xdr->end - xdr->p) << 2;
524 frag2bytes = nbytes - frag1bytes;
525 if (xdr->iov)
526 xdr->iov->iov_len += frag1bytes;
527 else
528 xdr->buf->page_len += frag1bytes;
529 xdr->page_ptr++;
530 xdr->iov = NULL;
531 /*
532 * If the last encode didn't end exactly on a page boundary, the
533 * next one will straddle boundaries. Encode into the next
534 * page, then copy it back later in xdr_commit_encode. We use
535 * the "scratch" iov to track any temporarily unused fragment of
536 * space at the end of the previous buffer:
537 */
538 xdr->scratch.iov_base = xdr->p;
539 xdr->scratch.iov_len = frag1bytes;
540 p = page_address(*xdr->page_ptr);
541 /*
542 * Note this is where the next encode will start after we've
543 * shifted this one back:
544 */
545 xdr->p = (void *)p + frag2bytes;
546 space_left = xdr->buf->buflen - xdr->buf->len;
547 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
548 xdr->buf->page_len += frag2bytes;
549 xdr->buf->len += nbytes;
550 return p;
551}
552
553/**
554 * xdr_reserve_space - Reserve buffer space for sending
555 * @xdr: pointer to xdr_stream
556 * @nbytes: number of bytes to reserve
557 *
558 * Checks that we have enough buffer space to encode 'nbytes' more
559 * bytes of data. If so, update the total xdr_buf length, and
560 * adjust the length of the current kvec.
561 */
562__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
563{
564 __be32 *p = xdr->p;
565 __be32 *q;
566
567 xdr_commit_encode(xdr);
568 /* align nbytes on the next 32-bit boundary */
569 nbytes += 3;
570 nbytes &= ~3;
571 q = p + (nbytes >> 2);
572 if (unlikely(q > xdr->end || q < p))
573 return xdr_get_next_encode_buffer(xdr, nbytes);
574 xdr->p = q;
575 if (xdr->iov)
576 xdr->iov->iov_len += nbytes;
577 else
578 xdr->buf->page_len += nbytes;
579 xdr->buf->len += nbytes;
580 return p;
581}
582EXPORT_SYMBOL_GPL(xdr_reserve_space);
583
584/**
585 * xdr_truncate_encode - truncate an encode buffer
586 * @xdr: pointer to xdr_stream
587 * @len: new length of buffer
588 *
589 * Truncates the xdr stream, so that xdr->buf->len == len,
590 * and xdr->p points at offset len from the start of the buffer, and
591 * head, tail, and page lengths are adjusted to correspond.
592 *
593 * If this means moving xdr->p to a different buffer, we assume that
594 * that the end pointer should be set to the end of the current page,
595 * except in the case of the head buffer when we assume the head
596 * buffer's current length represents the end of the available buffer.
597 *
598 * This is *not* safe to use on a buffer that already has inlined page
599 * cache pages (as in a zero-copy server read reply), except for the
600 * simple case of truncating from one position in the tail to another.
601 *
602 */
603void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
604{
605 struct xdr_buf *buf = xdr->buf;
606 struct kvec *head = buf->head;
607 struct kvec *tail = buf->tail;
608 int fraglen;
609 int new;
610
611 if (len > buf->len) {
612 WARN_ON_ONCE(1);
613 return;
614 }
615 xdr_commit_encode(xdr);
616
617 fraglen = min_t(int, buf->len - len, tail->iov_len);
618 tail->iov_len -= fraglen;
619 buf->len -= fraglen;
620 if (tail->iov_len) {
621 xdr->p = tail->iov_base + tail->iov_len;
622 WARN_ON_ONCE(!xdr->end);
623 WARN_ON_ONCE(!xdr->iov);
624 return;
625 }
626 WARN_ON_ONCE(fraglen);
627 fraglen = min_t(int, buf->len - len, buf->page_len);
628 buf->page_len -= fraglen;
629 buf->len -= fraglen;
630
631 new = buf->page_base + buf->page_len;
632
633 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
634
635 if (buf->page_len) {
636 xdr->p = page_address(*xdr->page_ptr);
637 xdr->end = (void *)xdr->p + PAGE_SIZE;
638 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
639 WARN_ON_ONCE(xdr->iov);
640 return;
641 }
642 if (fraglen) {
643 xdr->end = head->iov_base + head->iov_len;
644 xdr->page_ptr--;
645 }
646 /* (otherwise assume xdr->end is already set) */
647 head->iov_len = len;
648 buf->len = len;
649 xdr->p = head->iov_base + head->iov_len;
650 xdr->iov = buf->head;
651}
652EXPORT_SYMBOL(xdr_truncate_encode);
653
654/**
655 * xdr_restrict_buflen - decrease available buffer space
656 * @xdr: pointer to xdr_stream
657 * @newbuflen: new maximum number of bytes available
658 *
659 * Adjust our idea of how much space is available in the buffer.
660 * If we've already used too much space in the buffer, returns -1.
661 * If the available space is already smaller than newbuflen, returns 0
662 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
663 * and ensures xdr->end is set at most offset newbuflen from the start
664 * of the buffer.
665 */
666int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
667{
668 struct xdr_buf *buf = xdr->buf;
669 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
670 int end_offset = buf->len + left_in_this_buf;
671
672 if (newbuflen < 0 || newbuflen < buf->len)
673 return -1;
674 if (newbuflen > buf->buflen)
675 return 0;
676 if (newbuflen < end_offset)
677 xdr->end = (void *)xdr->end + newbuflen - end_offset;
678 buf->buflen = newbuflen;
679 return 0;
680}
681EXPORT_SYMBOL(xdr_restrict_buflen);
682
683/**
684 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
685 * @xdr: pointer to xdr_stream
686 * @pages: list of pages
687 * @base: offset of first byte
688 * @len: length of data in bytes
689 *
690 */
691void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
692 unsigned int len)
693{
694 struct xdr_buf *buf = xdr->buf;
695 struct kvec *iov = buf->tail;
696 buf->pages = pages;
697 buf->page_base = base;
698 buf->page_len = len;
699
700 iov->iov_base = (char *)xdr->p;
701 iov->iov_len = 0;
702 xdr->iov = iov;
703
704 if (len & 3) {
705 unsigned int pad = 4 - (len & 3);
706
707 BUG_ON(xdr->p >= xdr->end);
708 iov->iov_base = (char *)xdr->p + (len & 3);
709 iov->iov_len += pad;
710 len += pad;
711 *xdr->p++ = 0;
712 }
713 buf->buflen += len;
714 buf->len += len;
715}
716EXPORT_SYMBOL_GPL(xdr_write_pages);
717
718static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
719 unsigned int len)
720{
721 if (len > iov->iov_len)
722 len = iov->iov_len;
723 xdr->p = (__be32*)iov->iov_base;
724 xdr->end = (__be32*)(iov->iov_base + len);
725 xdr->iov = iov;
726 xdr->page_ptr = NULL;
727}
728
729static int xdr_set_page_base(struct xdr_stream *xdr,
730 unsigned int base, unsigned int len)
731{
732 unsigned int pgnr;
733 unsigned int maxlen;
734 unsigned int pgoff;
735 unsigned int pgend;
736 void *kaddr;
737
738 maxlen = xdr->buf->page_len;
739 if (base >= maxlen)
740 return -EINVAL;
741 maxlen -= base;
742 if (len > maxlen)
743 len = maxlen;
744
745 base += xdr->buf->page_base;
746
747 pgnr = base >> PAGE_SHIFT;
748 xdr->page_ptr = &xdr->buf->pages[pgnr];
749 kaddr = page_address(*xdr->page_ptr);
750
751 pgoff = base & ~PAGE_MASK;
752 xdr->p = (__be32*)(kaddr + pgoff);
753
754 pgend = pgoff + len;
755 if (pgend > PAGE_SIZE)
756 pgend = PAGE_SIZE;
757 xdr->end = (__be32*)(kaddr + pgend);
758 xdr->iov = NULL;
759 return 0;
760}
761
762static void xdr_set_next_page(struct xdr_stream *xdr)
763{
764 unsigned int newbase;
765
766 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
767 newbase -= xdr->buf->page_base;
768
769 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
770 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
771}
772
773static bool xdr_set_next_buffer(struct xdr_stream *xdr)
774{
775 if (xdr->page_ptr != NULL)
776 xdr_set_next_page(xdr);
777 else if (xdr->iov == xdr->buf->head) {
778 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
779 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
780 }
781 return xdr->p != xdr->end;
782}
783
784/**
785 * xdr_init_decode - Initialize an xdr_stream for decoding data.
786 * @xdr: pointer to xdr_stream struct
787 * @buf: pointer to XDR buffer from which to decode data
788 * @p: current pointer inside XDR buffer
789 */
790void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
791{
792 xdr->buf = buf;
793 xdr->scratch.iov_base = NULL;
794 xdr->scratch.iov_len = 0;
795 xdr->nwords = XDR_QUADLEN(buf->len);
796 if (buf->head[0].iov_len != 0)
797 xdr_set_iov(xdr, buf->head, buf->len);
798 else if (buf->page_len != 0)
799 xdr_set_page_base(xdr, 0, buf->len);
800 if (p != NULL && p > xdr->p && xdr->end >= p) {
801 xdr->nwords -= p - xdr->p;
802 xdr->p = p;
803 }
804}
805EXPORT_SYMBOL_GPL(xdr_init_decode);
806
807/**
808 * xdr_init_decode - Initialize an xdr_stream for decoding data.
809 * @xdr: pointer to xdr_stream struct
810 * @buf: pointer to XDR buffer from which to decode data
811 * @pages: list of pages to decode into
812 * @len: length in bytes of buffer in pages
813 */
814void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
815 struct page **pages, unsigned int len)
816{
817 memset(buf, 0, sizeof(*buf));
818 buf->pages = pages;
819 buf->page_len = len;
820 buf->buflen = len;
821 buf->len = len;
822 xdr_init_decode(xdr, buf, NULL);
823}
824EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
825
826static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
827{
828 unsigned int nwords = XDR_QUADLEN(nbytes);
829 __be32 *p = xdr->p;
830 __be32 *q = p + nwords;
831
832 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
833 return NULL;
834 xdr->p = q;
835 xdr->nwords -= nwords;
836 return p;
837}
838
839/**
840 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
841 * @xdr: pointer to xdr_stream struct
842 * @buf: pointer to an empty buffer
843 * @buflen: size of 'buf'
844 *
845 * The scratch buffer is used when decoding from an array of pages.
846 * If an xdr_inline_decode() call spans across page boundaries, then
847 * we copy the data into the scratch buffer in order to allow linear
848 * access.
849 */
850void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
851{
852 xdr->scratch.iov_base = buf;
853 xdr->scratch.iov_len = buflen;
854}
855EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
856
857static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
858{
859 __be32 *p;
860 void *cpdest = xdr->scratch.iov_base;
861 size_t cplen = (char *)xdr->end - (char *)xdr->p;
862
863 if (nbytes > xdr->scratch.iov_len)
864 return NULL;
865 memcpy(cpdest, xdr->p, cplen);
866 cpdest += cplen;
867 nbytes -= cplen;
868 if (!xdr_set_next_buffer(xdr))
869 return NULL;
870 p = __xdr_inline_decode(xdr, nbytes);
871 if (p == NULL)
872 return NULL;
873 memcpy(cpdest, p, nbytes);
874 return xdr->scratch.iov_base;
875}
876
877/**
878 * xdr_inline_decode - Retrieve XDR data to decode
879 * @xdr: pointer to xdr_stream struct
880 * @nbytes: number of bytes of data to decode
881 *
882 * Check if the input buffer is long enough to enable us to decode
883 * 'nbytes' more bytes of data starting at the current position.
884 * If so return the current pointer, then update the current
885 * pointer position.
886 */
887__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
888{
889 __be32 *p;
890
891 if (nbytes == 0)
892 return xdr->p;
893 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
894 return NULL;
895 p = __xdr_inline_decode(xdr, nbytes);
896 if (p != NULL)
897 return p;
898 return xdr_copy_to_scratch(xdr, nbytes);
899}
900EXPORT_SYMBOL_GPL(xdr_inline_decode);
901
902static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
903{
904 struct xdr_buf *buf = xdr->buf;
905 struct kvec *iov;
906 unsigned int nwords = XDR_QUADLEN(len);
907 unsigned int cur = xdr_stream_pos(xdr);
908
909 if (xdr->nwords == 0)
910 return 0;
911 /* Realign pages to current pointer position */
912 iov = buf->head;
913 if (iov->iov_len > cur) {
914 xdr_shrink_bufhead(buf, iov->iov_len - cur);
915 xdr->nwords = XDR_QUADLEN(buf->len - cur);
916 }
917
918 if (nwords > xdr->nwords) {
919 nwords = xdr->nwords;
920 len = nwords << 2;
921 }
922 if (buf->page_len <= len)
923 len = buf->page_len;
924 else if (nwords < xdr->nwords) {
925 /* Truncate page data and move it into the tail */
926 xdr_shrink_pagelen(buf, buf->page_len - len);
927 xdr->nwords = XDR_QUADLEN(buf->len - cur);
928 }
929 return len;
930}
931
932/**
933 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
934 * @xdr: pointer to xdr_stream struct
935 * @len: number of bytes of page data
936 *
937 * Moves data beyond the current pointer position from the XDR head[] buffer
938 * into the page list. Any data that lies beyond current position + "len"
939 * bytes is moved into the XDR tail[].
940 *
941 * Returns the number of XDR encoded bytes now contained in the pages
942 */
943unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
944{
945 struct xdr_buf *buf = xdr->buf;
946 struct kvec *iov;
947 unsigned int nwords;
948 unsigned int end;
949 unsigned int padding;
950
951 len = xdr_align_pages(xdr, len);
952 if (len == 0)
953 return 0;
954 nwords = XDR_QUADLEN(len);
955 padding = (nwords << 2) - len;
956 xdr->iov = iov = buf->tail;
957 /* Compute remaining message length. */
958 end = ((xdr->nwords - nwords) << 2) + padding;
959 if (end > iov->iov_len)
960 end = iov->iov_len;
961
962 /*
963 * Position current pointer at beginning of tail, and
964 * set remaining message length.
965 */
966 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
967 xdr->end = (__be32 *)((char *)iov->iov_base + end);
968 xdr->page_ptr = NULL;
969 xdr->nwords = XDR_QUADLEN(end - padding);
970 return len;
971}
972EXPORT_SYMBOL_GPL(xdr_read_pages);
973
974/**
975 * xdr_enter_page - decode data from the XDR page
976 * @xdr: pointer to xdr_stream struct
977 * @len: number of bytes of page data
978 *
979 * Moves data beyond the current pointer position from the XDR head[] buffer
980 * into the page list. Any data that lies beyond current position + "len"
981 * bytes is moved into the XDR tail[]. The current pointer is then
982 * repositioned at the beginning of the first XDR page.
983 */
984void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
985{
986 len = xdr_align_pages(xdr, len);
987 /*
988 * Position current pointer at beginning of tail, and
989 * set remaining message length.
990 */
991 if (len != 0)
992 xdr_set_page_base(xdr, 0, len);
993}
994EXPORT_SYMBOL_GPL(xdr_enter_page);
995
996static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
997
998void
999xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1000{
1001 buf->head[0] = *iov;
1002 buf->tail[0] = empty_iov;
1003 buf->page_len = 0;
1004 buf->buflen = buf->len = iov->iov_len;
1005}
1006EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1007
1008/**
1009 * xdr_buf_subsegment - set subbuf to a portion of buf
1010 * @buf: an xdr buffer
1011 * @subbuf: the result buffer
1012 * @base: beginning of range in bytes
1013 * @len: length of range in bytes
1014 *
1015 * sets @subbuf to an xdr buffer representing the portion of @buf of
1016 * length @len starting at offset @base.
1017 *
1018 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1019 *
1020 * Returns -1 if base of length are out of bounds.
1021 */
1022int
1023xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1024 unsigned int base, unsigned int len)
1025{
1026 subbuf->buflen = subbuf->len = len;
1027 if (base < buf->head[0].iov_len) {
1028 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1029 subbuf->head[0].iov_len = min_t(unsigned int, len,
1030 buf->head[0].iov_len - base);
1031 len -= subbuf->head[0].iov_len;
1032 base = 0;
1033 } else {
1034 base -= buf->head[0].iov_len;
1035 subbuf->head[0].iov_len = 0;
1036 }
1037
1038 if (base < buf->page_len) {
1039 subbuf->page_len = min(buf->page_len - base, len);
1040 base += buf->page_base;
1041 subbuf->page_base = base & ~PAGE_MASK;
1042 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1043 len -= subbuf->page_len;
1044 base = 0;
1045 } else {
1046 base -= buf->page_len;
1047 subbuf->page_len = 0;
1048 }
1049
1050 if (base < buf->tail[0].iov_len) {
1051 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1052 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1053 buf->tail[0].iov_len - base);
1054 len -= subbuf->tail[0].iov_len;
1055 base = 0;
1056 } else {
1057 base -= buf->tail[0].iov_len;
1058 subbuf->tail[0].iov_len = 0;
1059 }
1060
1061 if (base || len)
1062 return -1;
1063 return 0;
1064}
1065EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1066
1067/**
1068 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1069 * @buf: buf to be trimmed
1070 * @len: number of bytes to reduce "buf" by
1071 *
1072 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1073 * that it's possible that we'll trim less than that amount if the xdr_buf is
1074 * too small, or if (for instance) it's all in the head and the parser has
1075 * already read too far into it.
1076 */
1077void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1078{
1079 size_t cur;
1080 unsigned int trim = len;
1081
1082 if (buf->tail[0].iov_len) {
1083 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1084 buf->tail[0].iov_len -= cur;
1085 trim -= cur;
1086 if (!trim)
1087 goto fix_len;
1088 }
1089
1090 if (buf->page_len) {
1091 cur = min_t(unsigned int, buf->page_len, trim);
1092 buf->page_len -= cur;
1093 trim -= cur;
1094 if (!trim)
1095 goto fix_len;
1096 }
1097
1098 if (buf->head[0].iov_len) {
1099 cur = min_t(size_t, buf->head[0].iov_len, trim);
1100 buf->head[0].iov_len -= cur;
1101 trim -= cur;
1102 }
1103fix_len:
1104 buf->len -= (len - trim);
1105}
1106EXPORT_SYMBOL_GPL(xdr_buf_trim);
1107
1108static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1109{
1110 unsigned int this_len;
1111
1112 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1113 memcpy(obj, subbuf->head[0].iov_base, this_len);
1114 len -= this_len;
1115 obj += this_len;
1116 this_len = min_t(unsigned int, len, subbuf->page_len);
1117 if (this_len)
1118 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1119 len -= this_len;
1120 obj += this_len;
1121 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1122 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1123}
1124
1125/* obj is assumed to point to allocated memory of size at least len: */
1126int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1127{
1128 struct xdr_buf subbuf;
1129 int status;
1130
1131 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1132 if (status != 0)
1133 return status;
1134 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1135 return 0;
1136}
1137EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1138
1139static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1140{
1141 unsigned int this_len;
1142
1143 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1144 memcpy(subbuf->head[0].iov_base, obj, this_len);
1145 len -= this_len;
1146 obj += this_len;
1147 this_len = min_t(unsigned int, len, subbuf->page_len);
1148 if (this_len)
1149 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1150 len -= this_len;
1151 obj += this_len;
1152 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1153 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1154}
1155
1156/* obj is assumed to point to allocated memory of size at least len: */
1157int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1158{
1159 struct xdr_buf subbuf;
1160 int status;
1161
1162 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1163 if (status != 0)
1164 return status;
1165 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1166 return 0;
1167}
1168EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1169
1170int
1171xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1172{
1173 __be32 raw;
1174 int status;
1175
1176 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1177 if (status)
1178 return status;
1179 *obj = be32_to_cpu(raw);
1180 return 0;
1181}
1182EXPORT_SYMBOL_GPL(xdr_decode_word);
1183
1184int
1185xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
1186{
1187 __be32 raw = cpu_to_be32(obj);
1188
1189 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1190}
1191EXPORT_SYMBOL_GPL(xdr_encode_word);
1192
1193/* If the netobj starting offset bytes from the start of xdr_buf is contained
1194 * entirely in the head or the tail, set object to point to it; otherwise
1195 * try to find space for it at the end of the tail, copy it there, and
1196 * set obj to point to it. */
1197int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1198{
1199 struct xdr_buf subbuf;
1200
1201 if (xdr_decode_word(buf, offset, &obj->len))
1202 return -EFAULT;
1203 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
1204 return -EFAULT;
1205
1206 /* Is the obj contained entirely in the head? */
1207 obj->data = subbuf.head[0].iov_base;
1208 if (subbuf.head[0].iov_len == obj->len)
1209 return 0;
1210 /* ..or is the obj contained entirely in the tail? */
1211 obj->data = subbuf.tail[0].iov_base;
1212 if (subbuf.tail[0].iov_len == obj->len)
1213 return 0;
1214
1215 /* use end of tail as storage for obj:
1216 * (We don't copy to the beginning because then we'd have
1217 * to worry about doing a potentially overlapping copy.
1218 * This assumes the object is at most half the length of the
1219 * tail.) */
1220 if (obj->len > buf->buflen - buf->len)
1221 return -ENOMEM;
1222 if (buf->tail[0].iov_len != 0)
1223 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1224 else
1225 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1226 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1227 return 0;
1228}
1229EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
1230
1231/* Returns 0 on success, or else a negative error code. */
1232static int
1233xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1234 struct xdr_array2_desc *desc, int encode)
1235{
1236 char *elem = NULL, *c;
1237 unsigned int copied = 0, todo, avail_here;
1238 struct page **ppages = NULL;
1239 int err;
1240
1241 if (encode) {
1242 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1243 return -EINVAL;
1244 } else {
1245 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1246 desc->array_len > desc->array_maxlen ||
1247 (unsigned long) base + 4 + desc->array_len *
1248 desc->elem_size > buf->len)
1249 return -EINVAL;
1250 }
1251 base += 4;
1252
1253 if (!desc->xcode)
1254 return 0;
1255
1256 todo = desc->array_len * desc->elem_size;
1257
1258 /* process head */
1259 if (todo && base < buf->head->iov_len) {
1260 c = buf->head->iov_base + base;
1261 avail_here = min_t(unsigned int, todo,
1262 buf->head->iov_len - base);
1263 todo -= avail_here;
1264
1265 while (avail_here >= desc->elem_size) {
1266 err = desc->xcode(desc, c);
1267 if (err)
1268 goto out;
1269 c += desc->elem_size;
1270 avail_here -= desc->elem_size;
1271 }
1272 if (avail_here) {
1273 if (!elem) {
1274 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1275 err = -ENOMEM;
1276 if (!elem)
1277 goto out;
1278 }
1279 if (encode) {
1280 err = desc->xcode(desc, elem);
1281 if (err)
1282 goto out;
1283 memcpy(c, elem, avail_here);
1284 } else
1285 memcpy(elem, c, avail_here);
1286 copied = avail_here;
1287 }
1288 base = buf->head->iov_len; /* align to start of pages */
1289 }
1290
1291 /* process pages array */
1292 base -= buf->head->iov_len;
1293 if (todo && base < buf->page_len) {
1294 unsigned int avail_page;
1295
1296 avail_here = min(todo, buf->page_len - base);
1297 todo -= avail_here;
1298
1299 base += buf->page_base;
1300 ppages = buf->pages + (base >> PAGE_SHIFT);
1301 base &= ~PAGE_MASK;
1302 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1303 avail_here);
1304 c = kmap(*ppages) + base;
1305
1306 while (avail_here) {
1307 avail_here -= avail_page;
1308 if (copied || avail_page < desc->elem_size) {
1309 unsigned int l = min(avail_page,
1310 desc->elem_size - copied);
1311 if (!elem) {
1312 elem = kmalloc(desc->elem_size,
1313 GFP_KERNEL);
1314 err = -ENOMEM;
1315 if (!elem)
1316 goto out;
1317 }
1318 if (encode) {
1319 if (!copied) {
1320 err = desc->xcode(desc, elem);
1321 if (err)
1322 goto out;
1323 }
1324 memcpy(c, elem + copied, l);
1325 copied += l;
1326 if (copied == desc->elem_size)
1327 copied = 0;
1328 } else {
1329 memcpy(elem + copied, c, l);
1330 copied += l;
1331 if (copied == desc->elem_size) {
1332 err = desc->xcode(desc, elem);
1333 if (err)
1334 goto out;
1335 copied = 0;
1336 }
1337 }
1338 avail_page -= l;
1339 c += l;
1340 }
1341 while (avail_page >= desc->elem_size) {
1342 err = desc->xcode(desc, c);
1343 if (err)
1344 goto out;
1345 c += desc->elem_size;
1346 avail_page -= desc->elem_size;
1347 }
1348 if (avail_page) {
1349 unsigned int l = min(avail_page,
1350 desc->elem_size - copied);
1351 if (!elem) {
1352 elem = kmalloc(desc->elem_size,
1353 GFP_KERNEL);
1354 err = -ENOMEM;
1355 if (!elem)
1356 goto out;
1357 }
1358 if (encode) {
1359 if (!copied) {
1360 err = desc->xcode(desc, elem);
1361 if (err)
1362 goto out;
1363 }
1364 memcpy(c, elem + copied, l);
1365 copied += l;
1366 if (copied == desc->elem_size)
1367 copied = 0;
1368 } else {
1369 memcpy(elem + copied, c, l);
1370 copied += l;
1371 if (copied == desc->elem_size) {
1372 err = desc->xcode(desc, elem);
1373 if (err)
1374 goto out;
1375 copied = 0;
1376 }
1377 }
1378 }
1379 if (avail_here) {
1380 kunmap(*ppages);
1381 ppages++;
1382 c = kmap(*ppages);
1383 }
1384
1385 avail_page = min(avail_here,
1386 (unsigned int) PAGE_SIZE);
1387 }
1388 base = buf->page_len; /* align to start of tail */
1389 }
1390
1391 /* process tail */
1392 base -= buf->page_len;
1393 if (todo) {
1394 c = buf->tail->iov_base + base;
1395 if (copied) {
1396 unsigned int l = desc->elem_size - copied;
1397
1398 if (encode)
1399 memcpy(c, elem + copied, l);
1400 else {
1401 memcpy(elem + copied, c, l);
1402 err = desc->xcode(desc, elem);
1403 if (err)
1404 goto out;
1405 }
1406 todo -= l;
1407 c += l;
1408 }
1409 while (todo) {
1410 err = desc->xcode(desc, c);
1411 if (err)
1412 goto out;
1413 c += desc->elem_size;
1414 todo -= desc->elem_size;
1415 }
1416 }
1417 err = 0;
1418
1419out:
1420 kfree(elem);
1421 if (ppages)
1422 kunmap(*ppages);
1423 return err;
1424}
1425
1426int
1427xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1428 struct xdr_array2_desc *desc)
1429{
1430 if (base >= buf->len)
1431 return -EINVAL;
1432
1433 return xdr_xcode_array2(buf, base, desc, 0);
1434}
1435EXPORT_SYMBOL_GPL(xdr_decode_array2);
1436
1437int
1438xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1439 struct xdr_array2_desc *desc)
1440{
1441 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1442 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1443 return -EINVAL;
1444
1445 return xdr_xcode_array2(buf, base, desc, 1);
1446}
1447EXPORT_SYMBOL_GPL(xdr_encode_array2);
1448
1449int
1450xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1451 int (*actor)(struct scatterlist *, void *), void *data)
1452{
1453 int i, ret = 0;
1454 unsigned int page_len, thislen, page_offset;
1455 struct scatterlist sg[1];
1456
1457 sg_init_table(sg, 1);
1458
1459 if (offset >= buf->head[0].iov_len) {
1460 offset -= buf->head[0].iov_len;
1461 } else {
1462 thislen = buf->head[0].iov_len - offset;
1463 if (thislen > len)
1464 thislen = len;
1465 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1466 ret = actor(sg, data);
1467 if (ret)
1468 goto out;
1469 offset = 0;
1470 len -= thislen;
1471 }
1472 if (len == 0)
1473 goto out;
1474
1475 if (offset >= buf->page_len) {
1476 offset -= buf->page_len;
1477 } else {
1478 page_len = buf->page_len - offset;
1479 if (page_len > len)
1480 page_len = len;
1481 len -= page_len;
1482 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1483 i = (offset + buf->page_base) >> PAGE_SHIFT;
1484 thislen = PAGE_SIZE - page_offset;
1485 do {
1486 if (thislen > page_len)
1487 thislen = page_len;
1488 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1489 ret = actor(sg, data);
1490 if (ret)
1491 goto out;
1492 page_len -= thislen;
1493 i++;
1494 page_offset = 0;
1495 thislen = PAGE_SIZE;
1496 } while (page_len != 0);
1497 offset = 0;
1498 }
1499 if (len == 0)
1500 goto out;
1501 if (offset < buf->tail[0].iov_len) {
1502 thislen = buf->tail[0].iov_len - offset;
1503 if (thislen > len)
1504 thislen = len;
1505 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1506 ret = actor(sg, data);
1507 len -= thislen;
1508 }
1509 if (len != 0)
1510 ret = -EINVAL;
1511out:
1512 return ret;
1513}
1514EXPORT_SYMBOL_GPL(xdr_process_buf);
1515
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/net/sunrpc/xdr.c
4 *
5 * Generic XDR support.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/types.h>
13#include <linux/string.h>
14#include <linux/kernel.h>
15#include <linux/pagemap.h>
16#include <linux/errno.h>
17#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/msg_prot.h>
19#include <linux/bvec.h>
20#include <trace/events/sunrpc.h>
21
22static void _copy_to_pages(struct page **, size_t, const char *, size_t);
23
24
25/*
26 * XDR functions for basic NFS types
27 */
28__be32 *
29xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
30{
31 unsigned int quadlen = XDR_QUADLEN(obj->len);
32
33 p[quadlen] = 0; /* zero trailing bytes */
34 *p++ = cpu_to_be32(obj->len);
35 memcpy(p, obj->data, obj->len);
36 return p + XDR_QUADLEN(obj->len);
37}
38EXPORT_SYMBOL_GPL(xdr_encode_netobj);
39
40__be32 *
41xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
42{
43 unsigned int len;
44
45 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
46 return NULL;
47 obj->len = len;
48 obj->data = (u8 *) p;
49 return p + XDR_QUADLEN(len);
50}
51EXPORT_SYMBOL_GPL(xdr_decode_netobj);
52
53/**
54 * xdr_encode_opaque_fixed - Encode fixed length opaque data
55 * @p: pointer to current position in XDR buffer.
56 * @ptr: pointer to data to encode (or NULL)
57 * @nbytes: size of data.
58 *
59 * Copy the array of data of length nbytes at ptr to the XDR buffer
60 * at position p, then align to the next 32-bit boundary by padding
61 * with zero bytes (see RFC1832).
62 * Note: if ptr is NULL, only the padding is performed.
63 *
64 * Returns the updated current XDR buffer position
65 *
66 */
67__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
68{
69 if (likely(nbytes != 0)) {
70 unsigned int quadlen = XDR_QUADLEN(nbytes);
71 unsigned int padding = (quadlen << 2) - nbytes;
72
73 if (ptr != NULL)
74 memcpy(p, ptr, nbytes);
75 if (padding != 0)
76 memset((char *)p + nbytes, 0, padding);
77 p += quadlen;
78 }
79 return p;
80}
81EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
82
83/**
84 * xdr_encode_opaque - Encode variable length opaque data
85 * @p: pointer to current position in XDR buffer.
86 * @ptr: pointer to data to encode (or NULL)
87 * @nbytes: size of data.
88 *
89 * Returns the updated current XDR buffer position
90 */
91__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
92{
93 *p++ = cpu_to_be32(nbytes);
94 return xdr_encode_opaque_fixed(p, ptr, nbytes);
95}
96EXPORT_SYMBOL_GPL(xdr_encode_opaque);
97
98__be32 *
99xdr_encode_string(__be32 *p, const char *string)
100{
101 return xdr_encode_array(p, string, strlen(string));
102}
103EXPORT_SYMBOL_GPL(xdr_encode_string);
104
105__be32 *
106xdr_decode_string_inplace(__be32 *p, char **sp,
107 unsigned int *lenp, unsigned int maxlen)
108{
109 u32 len;
110
111 len = be32_to_cpu(*p++);
112 if (len > maxlen)
113 return NULL;
114 *lenp = len;
115 *sp = (char *) p;
116 return p + XDR_QUADLEN(len);
117}
118EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
119
120/**
121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
122 * @buf: XDR buffer where string resides
123 * @len: length of string, in bytes
124 *
125 */
126void xdr_terminate_string(const struct xdr_buf *buf, const u32 len)
127{
128 char *kaddr;
129
130 kaddr = kmap_atomic(buf->pages[0]);
131 kaddr[buf->page_base + len] = '\0';
132 kunmap_atomic(kaddr);
133}
134EXPORT_SYMBOL_GPL(xdr_terminate_string);
135
136size_t xdr_buf_pagecount(const struct xdr_buf *buf)
137{
138 if (!buf->page_len)
139 return 0;
140 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
141}
142
143int
144xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
145{
146 size_t i, n = xdr_buf_pagecount(buf);
147
148 if (n != 0 && buf->bvec == NULL) {
149 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
150 if (!buf->bvec)
151 return -ENOMEM;
152 for (i = 0; i < n; i++) {
153 buf->bvec[i].bv_page = buf->pages[i];
154 buf->bvec[i].bv_len = PAGE_SIZE;
155 buf->bvec[i].bv_offset = 0;
156 }
157 }
158 return 0;
159}
160
161void
162xdr_free_bvec(struct xdr_buf *buf)
163{
164 kfree(buf->bvec);
165 buf->bvec = NULL;
166}
167
168/**
169 * xdr_inline_pages - Prepare receive buffer for a large reply
170 * @xdr: xdr_buf into which reply will be placed
171 * @offset: expected offset where data payload will start, in bytes
172 * @pages: vector of struct page pointers
173 * @base: offset in first page where receive should start, in bytes
174 * @len: expected size of the upper layer data payload, in bytes
175 *
176 */
177void
178xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
179 struct page **pages, unsigned int base, unsigned int len)
180{
181 struct kvec *head = xdr->head;
182 struct kvec *tail = xdr->tail;
183 char *buf = (char *)head->iov_base;
184 unsigned int buflen = head->iov_len;
185
186 head->iov_len = offset;
187
188 xdr->pages = pages;
189 xdr->page_base = base;
190 xdr->page_len = len;
191
192 tail->iov_base = buf + offset;
193 tail->iov_len = buflen - offset;
194 xdr->buflen += len;
195}
196EXPORT_SYMBOL_GPL(xdr_inline_pages);
197
198/*
199 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
200 */
201
202/**
203 * _shift_data_left_pages
204 * @pages: vector of pages containing both the source and dest memory area.
205 * @pgto_base: page vector address of destination
206 * @pgfrom_base: page vector address of source
207 * @len: number of bytes to copy
208 *
209 * Note: the addresses pgto_base and pgfrom_base are both calculated in
210 * the same way:
211 * if a memory area starts at byte 'base' in page 'pages[i]',
212 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
213 * Alse note: pgto_base must be < pgfrom_base, but the memory areas
214 * they point to may overlap.
215 */
216static void
217_shift_data_left_pages(struct page **pages, size_t pgto_base,
218 size_t pgfrom_base, size_t len)
219{
220 struct page **pgfrom, **pgto;
221 char *vfrom, *vto;
222 size_t copy;
223
224 BUG_ON(pgfrom_base <= pgto_base);
225
226 if (!len)
227 return;
228
229 pgto = pages + (pgto_base >> PAGE_SHIFT);
230 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
231
232 pgto_base &= ~PAGE_MASK;
233 pgfrom_base &= ~PAGE_MASK;
234
235 do {
236 if (pgto_base >= PAGE_SIZE) {
237 pgto_base = 0;
238 pgto++;
239 }
240 if (pgfrom_base >= PAGE_SIZE){
241 pgfrom_base = 0;
242 pgfrom++;
243 }
244
245 copy = len;
246 if (copy > (PAGE_SIZE - pgto_base))
247 copy = PAGE_SIZE - pgto_base;
248 if (copy > (PAGE_SIZE - pgfrom_base))
249 copy = PAGE_SIZE - pgfrom_base;
250
251 vto = kmap_atomic(*pgto);
252 if (*pgto != *pgfrom) {
253 vfrom = kmap_atomic(*pgfrom);
254 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
255 kunmap_atomic(vfrom);
256 } else
257 memmove(vto + pgto_base, vto + pgfrom_base, copy);
258 flush_dcache_page(*pgto);
259 kunmap_atomic(vto);
260
261 pgto_base += copy;
262 pgfrom_base += copy;
263
264 } while ((len -= copy) != 0);
265}
266
267/**
268 * _shift_data_right_pages
269 * @pages: vector of pages containing both the source and dest memory area.
270 * @pgto_base: page vector address of destination
271 * @pgfrom_base: page vector address of source
272 * @len: number of bytes to copy
273 *
274 * Note: the addresses pgto_base and pgfrom_base are both calculated in
275 * the same way:
276 * if a memory area starts at byte 'base' in page 'pages[i]',
277 * then its address is given as (i << PAGE_SHIFT) + base
278 * Also note: pgfrom_base must be < pgto_base, but the memory areas
279 * they point to may overlap.
280 */
281static void
282_shift_data_right_pages(struct page **pages, size_t pgto_base,
283 size_t pgfrom_base, size_t len)
284{
285 struct page **pgfrom, **pgto;
286 char *vfrom, *vto;
287 size_t copy;
288
289 BUG_ON(pgto_base <= pgfrom_base);
290
291 if (!len)
292 return;
293
294 pgto_base += len;
295 pgfrom_base += len;
296
297 pgto = pages + (pgto_base >> PAGE_SHIFT);
298 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
299
300 pgto_base &= ~PAGE_MASK;
301 pgfrom_base &= ~PAGE_MASK;
302
303 do {
304 /* Are any pointers crossing a page boundary? */
305 if (pgto_base == 0) {
306 pgto_base = PAGE_SIZE;
307 pgto--;
308 }
309 if (pgfrom_base == 0) {
310 pgfrom_base = PAGE_SIZE;
311 pgfrom--;
312 }
313
314 copy = len;
315 if (copy > pgto_base)
316 copy = pgto_base;
317 if (copy > pgfrom_base)
318 copy = pgfrom_base;
319 pgto_base -= copy;
320 pgfrom_base -= copy;
321
322 vto = kmap_atomic(*pgto);
323 if (*pgto != *pgfrom) {
324 vfrom = kmap_atomic(*pgfrom);
325 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
326 kunmap_atomic(vfrom);
327 } else
328 memmove(vto + pgto_base, vto + pgfrom_base, copy);
329 flush_dcache_page(*pgto);
330 kunmap_atomic(vto);
331
332 } while ((len -= copy) != 0);
333}
334
335/**
336 * _copy_to_pages
337 * @pages: array of pages
338 * @pgbase: page vector address of destination
339 * @p: pointer to source data
340 * @len: length
341 *
342 * Copies data from an arbitrary memory location into an array of pages
343 * The copy is assumed to be non-overlapping.
344 */
345static void
346_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
347{
348 struct page **pgto;
349 char *vto;
350 size_t copy;
351
352 if (!len)
353 return;
354
355 pgto = pages + (pgbase >> PAGE_SHIFT);
356 pgbase &= ~PAGE_MASK;
357
358 for (;;) {
359 copy = PAGE_SIZE - pgbase;
360 if (copy > len)
361 copy = len;
362
363 vto = kmap_atomic(*pgto);
364 memcpy(vto + pgbase, p, copy);
365 kunmap_atomic(vto);
366
367 len -= copy;
368 if (len == 0)
369 break;
370
371 pgbase += copy;
372 if (pgbase == PAGE_SIZE) {
373 flush_dcache_page(*pgto);
374 pgbase = 0;
375 pgto++;
376 }
377 p += copy;
378 }
379 flush_dcache_page(*pgto);
380}
381
382/**
383 * _copy_from_pages
384 * @p: pointer to destination
385 * @pages: array of pages
386 * @pgbase: offset of source data
387 * @len: length
388 *
389 * Copies data into an arbitrary memory location from an array of pages
390 * The copy is assumed to be non-overlapping.
391 */
392void
393_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
394{
395 struct page **pgfrom;
396 char *vfrom;
397 size_t copy;
398
399 if (!len)
400 return;
401
402 pgfrom = pages + (pgbase >> PAGE_SHIFT);
403 pgbase &= ~PAGE_MASK;
404
405 do {
406 copy = PAGE_SIZE - pgbase;
407 if (copy > len)
408 copy = len;
409
410 vfrom = kmap_atomic(*pgfrom);
411 memcpy(p, vfrom + pgbase, copy);
412 kunmap_atomic(vfrom);
413
414 pgbase += copy;
415 if (pgbase == PAGE_SIZE) {
416 pgbase = 0;
417 pgfrom++;
418 }
419 p += copy;
420
421 } while ((len -= copy) != 0);
422}
423EXPORT_SYMBOL_GPL(_copy_from_pages);
424
425static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base,
426 unsigned int len)
427{
428 if (base >= iov->iov_len)
429 return;
430 if (len > iov->iov_len - base)
431 len = iov->iov_len - base;
432 memset(iov->iov_base + base, 0, len);
433}
434
435/**
436 * xdr_buf_pages_zero
437 * @buf: xdr_buf
438 * @pgbase: beginning offset
439 * @len: length
440 */
441static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase,
442 unsigned int len)
443{
444 struct page **pages = buf->pages;
445 struct page **page;
446 char *vpage;
447 unsigned int zero;
448
449 if (!len)
450 return;
451 if (pgbase >= buf->page_len) {
452 xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len);
453 return;
454 }
455 if (pgbase + len > buf->page_len) {
456 xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len);
457 len = buf->page_len - pgbase;
458 }
459
460 pgbase += buf->page_base;
461
462 page = pages + (pgbase >> PAGE_SHIFT);
463 pgbase &= ~PAGE_MASK;
464
465 do {
466 zero = PAGE_SIZE - pgbase;
467 if (zero > len)
468 zero = len;
469
470 vpage = kmap_atomic(*page);
471 memset(vpage + pgbase, 0, zero);
472 kunmap_atomic(vpage);
473
474 flush_dcache_page(*page);
475 pgbase = 0;
476 page++;
477
478 } while ((len -= zero) != 0);
479}
480
481static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf,
482 unsigned int buflen, gfp_t gfp)
483{
484 unsigned int i, npages, pagelen;
485
486 if (!(buf->flags & XDRBUF_SPARSE_PAGES))
487 return buflen;
488 if (buflen <= buf->head->iov_len)
489 return buflen;
490 pagelen = buflen - buf->head->iov_len;
491 if (pagelen > buf->page_len)
492 pagelen = buf->page_len;
493 npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT;
494 for (i = 0; i < npages; i++) {
495 if (!buf->pages[i])
496 continue;
497 buf->pages[i] = alloc_page(gfp);
498 if (likely(buf->pages[i]))
499 continue;
500 buflen -= pagelen;
501 pagelen = i << PAGE_SHIFT;
502 if (pagelen > buf->page_base)
503 buflen += pagelen - buf->page_base;
504 break;
505 }
506 return buflen;
507}
508
509static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len)
510{
511 struct kvec *head = buf->head;
512 struct kvec *tail = buf->tail;
513 unsigned int sum = head->iov_len + buf->page_len + tail->iov_len;
514 unsigned int free_space, newlen;
515
516 if (sum > buf->len) {
517 free_space = min_t(unsigned int, sum - buf->len, len);
518 newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space,
519 GFP_KERNEL);
520 free_space = newlen - buf->len;
521 buf->len = newlen;
522 len -= free_space;
523 if (!len)
524 return;
525 }
526
527 if (buf->buflen > sum) {
528 /* Expand the tail buffer */
529 free_space = min_t(unsigned int, buf->buflen - sum, len);
530 tail->iov_len += free_space;
531 buf->len += free_space;
532 }
533}
534
535static void xdr_buf_tail_copy_right(const struct xdr_buf *buf,
536 unsigned int base, unsigned int len,
537 unsigned int shift)
538{
539 const struct kvec *tail = buf->tail;
540 unsigned int to = base + shift;
541
542 if (to >= tail->iov_len)
543 return;
544 if (len + to > tail->iov_len)
545 len = tail->iov_len - to;
546 memmove(tail->iov_base + to, tail->iov_base + base, len);
547}
548
549static void xdr_buf_pages_copy_right(const struct xdr_buf *buf,
550 unsigned int base, unsigned int len,
551 unsigned int shift)
552{
553 const struct kvec *tail = buf->tail;
554 unsigned int to = base + shift;
555 unsigned int pglen = 0;
556 unsigned int talen = 0, tato = 0;
557
558 if (base >= buf->page_len)
559 return;
560 if (len > buf->page_len - base)
561 len = buf->page_len - base;
562 if (to >= buf->page_len) {
563 tato = to - buf->page_len;
564 if (tail->iov_len >= len + tato)
565 talen = len;
566 else if (tail->iov_len > tato)
567 talen = tail->iov_len - tato;
568 } else if (len + to >= buf->page_len) {
569 pglen = buf->page_len - to;
570 talen = len - pglen;
571 if (talen > tail->iov_len)
572 talen = tail->iov_len;
573 } else
574 pglen = len;
575
576 _copy_from_pages(tail->iov_base + tato, buf->pages,
577 buf->page_base + base + pglen, talen);
578 _shift_data_right_pages(buf->pages, buf->page_base + to,
579 buf->page_base + base, pglen);
580}
581
582static void xdr_buf_head_copy_right(const struct xdr_buf *buf,
583 unsigned int base, unsigned int len,
584 unsigned int shift)
585{
586 const struct kvec *head = buf->head;
587 const struct kvec *tail = buf->tail;
588 unsigned int to = base + shift;
589 unsigned int pglen = 0, pgto = 0;
590 unsigned int talen = 0, tato = 0;
591
592 if (base >= head->iov_len)
593 return;
594 if (len > head->iov_len - base)
595 len = head->iov_len - base;
596 if (to >= buf->page_len + head->iov_len) {
597 tato = to - buf->page_len - head->iov_len;
598 talen = len;
599 } else if (to >= head->iov_len) {
600 pgto = to - head->iov_len;
601 pglen = len;
602 if (pgto + pglen > buf->page_len) {
603 talen = pgto + pglen - buf->page_len;
604 pglen -= talen;
605 }
606 } else {
607 pglen = len - to;
608 if (pglen > buf->page_len) {
609 talen = pglen - buf->page_len;
610 pglen = buf->page_len;
611 }
612 }
613
614 len -= talen;
615 base += len;
616 if (talen + tato > tail->iov_len)
617 talen = tail->iov_len > tato ? tail->iov_len - tato : 0;
618 memcpy(tail->iov_base + tato, head->iov_base + base, talen);
619
620 len -= pglen;
621 base -= pglen;
622 _copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base,
623 pglen);
624
625 base -= len;
626 memmove(head->iov_base + to, head->iov_base + base, len);
627}
628
629static void xdr_buf_tail_shift_right(const struct xdr_buf *buf,
630 unsigned int base, unsigned int len,
631 unsigned int shift)
632{
633 const struct kvec *tail = buf->tail;
634
635 if (base >= tail->iov_len || !shift || !len)
636 return;
637 xdr_buf_tail_copy_right(buf, base, len, shift);
638}
639
640static void xdr_buf_pages_shift_right(const struct xdr_buf *buf,
641 unsigned int base, unsigned int len,
642 unsigned int shift)
643{
644 if (!shift || !len)
645 return;
646 if (base >= buf->page_len) {
647 xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift);
648 return;
649 }
650 if (base + len > buf->page_len)
651 xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len,
652 shift);
653 xdr_buf_pages_copy_right(buf, base, len, shift);
654}
655
656static void xdr_buf_head_shift_right(const struct xdr_buf *buf,
657 unsigned int base, unsigned int len,
658 unsigned int shift)
659{
660 const struct kvec *head = buf->head;
661
662 if (!shift)
663 return;
664 if (base >= head->iov_len) {
665 xdr_buf_pages_shift_right(buf, head->iov_len - base, len,
666 shift);
667 return;
668 }
669 if (base + len > head->iov_len)
670 xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len,
671 shift);
672 xdr_buf_head_copy_right(buf, base, len, shift);
673}
674
675static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base,
676 unsigned int len, unsigned int shift)
677{
678 const struct kvec *tail = buf->tail;
679
680 if (base >= tail->iov_len)
681 return;
682 if (len > tail->iov_len - base)
683 len = tail->iov_len - base;
684 /* Shift data into head */
685 if (shift > buf->page_len + base) {
686 const struct kvec *head = buf->head;
687 unsigned int hdto =
688 head->iov_len + buf->page_len + base - shift;
689 unsigned int hdlen = len;
690
691 if (WARN_ONCE(shift > head->iov_len + buf->page_len + base,
692 "SUNRPC: Misaligned data.\n"))
693 return;
694 if (hdto + hdlen > head->iov_len)
695 hdlen = head->iov_len - hdto;
696 memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen);
697 base += hdlen;
698 len -= hdlen;
699 if (!len)
700 return;
701 }
702 /* Shift data into pages */
703 if (shift > base) {
704 unsigned int pgto = buf->page_len + base - shift;
705 unsigned int pglen = len;
706
707 if (pgto + pglen > buf->page_len)
708 pglen = buf->page_len - pgto;
709 _copy_to_pages(buf->pages, buf->page_base + pgto,
710 tail->iov_base + base, pglen);
711 base += pglen;
712 len -= pglen;
713 if (!len)
714 return;
715 }
716 memmove(tail->iov_base + base - shift, tail->iov_base + base, len);
717}
718
719static void xdr_buf_pages_copy_left(const struct xdr_buf *buf,
720 unsigned int base, unsigned int len,
721 unsigned int shift)
722{
723 unsigned int pgto;
724
725 if (base >= buf->page_len)
726 return;
727 if (len > buf->page_len - base)
728 len = buf->page_len - base;
729 /* Shift data into head */
730 if (shift > base) {
731 const struct kvec *head = buf->head;
732 unsigned int hdto = head->iov_len + base - shift;
733 unsigned int hdlen = len;
734
735 if (WARN_ONCE(shift > head->iov_len + base,
736 "SUNRPC: Misaligned data.\n"))
737 return;
738 if (hdto + hdlen > head->iov_len)
739 hdlen = head->iov_len - hdto;
740 _copy_from_pages(head->iov_base + hdto, buf->pages,
741 buf->page_base + base, hdlen);
742 base += hdlen;
743 len -= hdlen;
744 if (!len)
745 return;
746 }
747 pgto = base - shift;
748 _shift_data_left_pages(buf->pages, buf->page_base + pgto,
749 buf->page_base + base, len);
750}
751
752static void xdr_buf_tail_shift_left(const struct xdr_buf *buf,
753 unsigned int base, unsigned int len,
754 unsigned int shift)
755{
756 if (!shift || !len)
757 return;
758 xdr_buf_tail_copy_left(buf, base, len, shift);
759}
760
761static void xdr_buf_pages_shift_left(const struct xdr_buf *buf,
762 unsigned int base, unsigned int len,
763 unsigned int shift)
764{
765 if (!shift || !len)
766 return;
767 if (base >= buf->page_len) {
768 xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift);
769 return;
770 }
771 xdr_buf_pages_copy_left(buf, base, len, shift);
772 len += base;
773 if (len <= buf->page_len)
774 return;
775 xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift);
776}
777
778static void xdr_buf_head_shift_left(const struct xdr_buf *buf,
779 unsigned int base, unsigned int len,
780 unsigned int shift)
781{
782 const struct kvec *head = buf->head;
783 unsigned int bytes;
784
785 if (!shift || !len)
786 return;
787
788 if (shift > base) {
789 bytes = (shift - base);
790 if (bytes >= len)
791 return;
792 base += bytes;
793 len -= bytes;
794 }
795
796 if (base < head->iov_len) {
797 bytes = min_t(unsigned int, len, head->iov_len - base);
798 memmove(head->iov_base + (base - shift),
799 head->iov_base + base, bytes);
800 base += bytes;
801 len -= bytes;
802 }
803 xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift);
804}
805
806/**
807 * xdr_shrink_bufhead
808 * @buf: xdr_buf
809 * @len: new length of buf->head[0]
810 *
811 * Shrinks XDR buffer's header kvec buf->head[0], setting it to
812 * 'len' bytes. The extra data is not lost, but is instead
813 * moved into the inlined pages and/or the tail.
814 */
815static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len)
816{
817 struct kvec *head = buf->head;
818 unsigned int shift, buflen = max(buf->len, len);
819
820 WARN_ON_ONCE(len > head->iov_len);
821 if (head->iov_len > buflen) {
822 buf->buflen -= head->iov_len - buflen;
823 head->iov_len = buflen;
824 }
825 if (len >= head->iov_len)
826 return 0;
827 shift = head->iov_len - len;
828 xdr_buf_try_expand(buf, shift);
829 xdr_buf_head_shift_right(buf, len, buflen - len, shift);
830 head->iov_len = len;
831 buf->buflen -= shift;
832 buf->len -= shift;
833 return shift;
834}
835
836/**
837 * xdr_shrink_pagelen - shrinks buf->pages to @len bytes
838 * @buf: xdr_buf
839 * @len: new page buffer length
840 *
841 * The extra data is not lost, but is instead moved into buf->tail.
842 * Returns the actual number of bytes moved.
843 */
844static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len)
845{
846 unsigned int shift, buflen = buf->len - buf->head->iov_len;
847
848 WARN_ON_ONCE(len > buf->page_len);
849 if (buf->head->iov_len >= buf->len || len > buflen)
850 buflen = len;
851 if (buf->page_len > buflen) {
852 buf->buflen -= buf->page_len - buflen;
853 buf->page_len = buflen;
854 }
855 if (len >= buf->page_len)
856 return 0;
857 shift = buf->page_len - len;
858 xdr_buf_try_expand(buf, shift);
859 xdr_buf_pages_shift_right(buf, len, buflen - len, shift);
860 buf->page_len = len;
861 buf->len -= shift;
862 buf->buflen -= shift;
863 return shift;
864}
865
866void
867xdr_shift_buf(struct xdr_buf *buf, size_t len)
868{
869 xdr_shrink_bufhead(buf, buf->head->iov_len - len);
870}
871EXPORT_SYMBOL_GPL(xdr_shift_buf);
872
873/**
874 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
875 * @xdr: pointer to struct xdr_stream
876 */
877unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
878{
879 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
880}
881EXPORT_SYMBOL_GPL(xdr_stream_pos);
882
883static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos)
884{
885 unsigned int blen = xdr->buf->len;
886
887 xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0;
888}
889
890static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos)
891{
892 xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len);
893}
894
895/**
896 * xdr_page_pos - Return the current offset from the start of the xdr pages
897 * @xdr: pointer to struct xdr_stream
898 */
899unsigned int xdr_page_pos(const struct xdr_stream *xdr)
900{
901 unsigned int pos = xdr_stream_pos(xdr);
902
903 WARN_ON(pos < xdr->buf->head[0].iov_len);
904 return pos - xdr->buf->head[0].iov_len;
905}
906EXPORT_SYMBOL_GPL(xdr_page_pos);
907
908/**
909 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
910 * @xdr: pointer to xdr_stream struct
911 * @buf: pointer to XDR buffer in which to encode data
912 * @p: current pointer inside XDR buffer
913 * @rqst: pointer to controlling rpc_rqst, for debugging
914 *
915 * Note: at the moment the RPC client only passes the length of our
916 * scratch buffer in the xdr_buf's header kvec. Previously this
917 * meant we needed to call xdr_adjust_iovec() after encoding the
918 * data. With the new scheme, the xdr_stream manages the details
919 * of the buffer length, and takes care of adjusting the kvec
920 * length for us.
921 */
922void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
923 struct rpc_rqst *rqst)
924{
925 struct kvec *iov = buf->head;
926 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
927
928 xdr_reset_scratch_buffer(xdr);
929 BUG_ON(scratch_len < 0);
930 xdr->buf = buf;
931 xdr->iov = iov;
932 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
933 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
934 BUG_ON(iov->iov_len > scratch_len);
935
936 if (p != xdr->p && p != NULL) {
937 size_t len;
938
939 BUG_ON(p < xdr->p || p > xdr->end);
940 len = (char *)p - (char *)xdr->p;
941 xdr->p = p;
942 buf->len += len;
943 iov->iov_len += len;
944 }
945 xdr->rqst = rqst;
946}
947EXPORT_SYMBOL_GPL(xdr_init_encode);
948
949/**
950 * xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages
951 * @xdr: pointer to xdr_stream struct
952 * @buf: pointer to XDR buffer into which to encode data
953 * @pages: list of pages to decode into
954 * @rqst: pointer to controlling rpc_rqst, for debugging
955 *
956 */
957void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
958 struct page **pages, struct rpc_rqst *rqst)
959{
960 xdr_reset_scratch_buffer(xdr);
961
962 xdr->buf = buf;
963 xdr->page_ptr = pages;
964 xdr->iov = NULL;
965 xdr->p = page_address(*pages);
966 xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
967 xdr->rqst = rqst;
968}
969EXPORT_SYMBOL_GPL(xdr_init_encode_pages);
970
971/**
972 * __xdr_commit_encode - Ensure all data is written to buffer
973 * @xdr: pointer to xdr_stream
974 *
975 * We handle encoding across page boundaries by giving the caller a
976 * temporary location to write to, then later copying the data into
977 * place; xdr_commit_encode does that copying.
978 *
979 * Normally the caller doesn't need to call this directly, as the
980 * following xdr_reserve_space will do it. But an explicit call may be
981 * required at the end of encoding, or any other time when the xdr_buf
982 * data might be read.
983 */
984void __xdr_commit_encode(struct xdr_stream *xdr)
985{
986 size_t shift = xdr->scratch.iov_len;
987 void *page;
988
989 page = page_address(*xdr->page_ptr);
990 memcpy(xdr->scratch.iov_base, page, shift);
991 memmove(page, page + shift, (void *)xdr->p - page);
992 xdr_reset_scratch_buffer(xdr);
993}
994EXPORT_SYMBOL_GPL(__xdr_commit_encode);
995
996/*
997 * The buffer space to be reserved crosses the boundary between
998 * xdr->buf->head and xdr->buf->pages, or between two pages
999 * in xdr->buf->pages.
1000 */
1001static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
1002 size_t nbytes)
1003{
1004 int space_left;
1005 int frag1bytes, frag2bytes;
1006 void *p;
1007
1008 if (nbytes > PAGE_SIZE)
1009 goto out_overflow; /* Bigger buffers require special handling */
1010 if (xdr->buf->len + nbytes > xdr->buf->buflen)
1011 goto out_overflow; /* Sorry, we're totally out of space */
1012 frag1bytes = (xdr->end - xdr->p) << 2;
1013 frag2bytes = nbytes - frag1bytes;
1014 if (xdr->iov)
1015 xdr->iov->iov_len += frag1bytes;
1016 else
1017 xdr->buf->page_len += frag1bytes;
1018 xdr->page_ptr++;
1019 xdr->iov = NULL;
1020
1021 /*
1022 * If the last encode didn't end exactly on a page boundary, the
1023 * next one will straddle boundaries. Encode into the next
1024 * page, then copy it back later in xdr_commit_encode. We use
1025 * the "scratch" iov to track any temporarily unused fragment of
1026 * space at the end of the previous buffer:
1027 */
1028 xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
1029
1030 /*
1031 * xdr->p is where the next encode will start after
1032 * xdr_commit_encode() has shifted this one back:
1033 */
1034 p = page_address(*xdr->page_ptr);
1035 xdr->p = p + frag2bytes;
1036 space_left = xdr->buf->buflen - xdr->buf->len;
1037 if (space_left - frag1bytes >= PAGE_SIZE)
1038 xdr->end = p + PAGE_SIZE;
1039 else
1040 xdr->end = p + space_left - frag1bytes;
1041
1042 xdr->buf->page_len += frag2bytes;
1043 xdr->buf->len += nbytes;
1044 return p;
1045out_overflow:
1046 trace_rpc_xdr_overflow(xdr, nbytes);
1047 return NULL;
1048}
1049
1050/**
1051 * xdr_reserve_space - Reserve buffer space for sending
1052 * @xdr: pointer to xdr_stream
1053 * @nbytes: number of bytes to reserve
1054 *
1055 * Checks that we have enough buffer space to encode 'nbytes' more
1056 * bytes of data. If so, update the total xdr_buf length, and
1057 * adjust the length of the current kvec.
1058 */
1059__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1060{
1061 __be32 *p = xdr->p;
1062 __be32 *q;
1063
1064 xdr_commit_encode(xdr);
1065 /* align nbytes on the next 32-bit boundary */
1066 nbytes += 3;
1067 nbytes &= ~3;
1068 q = p + (nbytes >> 2);
1069 if (unlikely(q > xdr->end || q < p))
1070 return xdr_get_next_encode_buffer(xdr, nbytes);
1071 xdr->p = q;
1072 if (xdr->iov)
1073 xdr->iov->iov_len += nbytes;
1074 else
1075 xdr->buf->page_len += nbytes;
1076 xdr->buf->len += nbytes;
1077 return p;
1078}
1079EXPORT_SYMBOL_GPL(xdr_reserve_space);
1080
1081
1082/**
1083 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
1084 * @xdr: pointer to xdr_stream
1085 * @vec: pointer to a kvec array
1086 * @nbytes: number of bytes to reserve
1087 *
1088 * Reserves enough buffer space to encode 'nbytes' of data and stores the
1089 * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
1090 * determined based on the number of bytes remaining in the current page to
1091 * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
1092 */
1093int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
1094{
1095 int thislen;
1096 int v = 0;
1097 __be32 *p;
1098
1099 /*
1100 * svcrdma requires every READ payload to start somewhere
1101 * in xdr->pages.
1102 */
1103 if (xdr->iov == xdr->buf->head) {
1104 xdr->iov = NULL;
1105 xdr->end = xdr->p;
1106 }
1107
1108 while (nbytes) {
1109 thislen = xdr->buf->page_len % PAGE_SIZE;
1110 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
1111
1112 p = xdr_reserve_space(xdr, thislen);
1113 if (!p)
1114 return -EIO;
1115
1116 vec[v].iov_base = p;
1117 vec[v].iov_len = thislen;
1118 v++;
1119 nbytes -= thislen;
1120 }
1121
1122 return v;
1123}
1124EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
1125
1126/**
1127 * xdr_truncate_encode - truncate an encode buffer
1128 * @xdr: pointer to xdr_stream
1129 * @len: new length of buffer
1130 *
1131 * Truncates the xdr stream, so that xdr->buf->len == len,
1132 * and xdr->p points at offset len from the start of the buffer, and
1133 * head, tail, and page lengths are adjusted to correspond.
1134 *
1135 * If this means moving xdr->p to a different buffer, we assume that
1136 * the end pointer should be set to the end of the current page,
1137 * except in the case of the head buffer when we assume the head
1138 * buffer's current length represents the end of the available buffer.
1139 *
1140 * This is *not* safe to use on a buffer that already has inlined page
1141 * cache pages (as in a zero-copy server read reply), except for the
1142 * simple case of truncating from one position in the tail to another.
1143 *
1144 */
1145void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
1146{
1147 struct xdr_buf *buf = xdr->buf;
1148 struct kvec *head = buf->head;
1149 struct kvec *tail = buf->tail;
1150 int fraglen;
1151 int new;
1152
1153 if (len > buf->len) {
1154 WARN_ON_ONCE(1);
1155 return;
1156 }
1157 xdr_commit_encode(xdr);
1158
1159 fraglen = min_t(int, buf->len - len, tail->iov_len);
1160 tail->iov_len -= fraglen;
1161 buf->len -= fraglen;
1162 if (tail->iov_len) {
1163 xdr->p = tail->iov_base + tail->iov_len;
1164 WARN_ON_ONCE(!xdr->end);
1165 WARN_ON_ONCE(!xdr->iov);
1166 return;
1167 }
1168 WARN_ON_ONCE(fraglen);
1169 fraglen = min_t(int, buf->len - len, buf->page_len);
1170 buf->page_len -= fraglen;
1171 buf->len -= fraglen;
1172
1173 new = buf->page_base + buf->page_len;
1174
1175 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
1176
1177 if (buf->page_len) {
1178 xdr->p = page_address(*xdr->page_ptr);
1179 xdr->end = (void *)xdr->p + PAGE_SIZE;
1180 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
1181 WARN_ON_ONCE(xdr->iov);
1182 return;
1183 }
1184 if (fraglen)
1185 xdr->end = head->iov_base + head->iov_len;
1186 /* (otherwise assume xdr->end is already set) */
1187 xdr->page_ptr--;
1188 head->iov_len = len;
1189 buf->len = len;
1190 xdr->p = head->iov_base + head->iov_len;
1191 xdr->iov = buf->head;
1192}
1193EXPORT_SYMBOL(xdr_truncate_encode);
1194
1195/**
1196 * xdr_restrict_buflen - decrease available buffer space
1197 * @xdr: pointer to xdr_stream
1198 * @newbuflen: new maximum number of bytes available
1199 *
1200 * Adjust our idea of how much space is available in the buffer.
1201 * If we've already used too much space in the buffer, returns -1.
1202 * If the available space is already smaller than newbuflen, returns 0
1203 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
1204 * and ensures xdr->end is set at most offset newbuflen from the start
1205 * of the buffer.
1206 */
1207int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
1208{
1209 struct xdr_buf *buf = xdr->buf;
1210 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
1211 int end_offset = buf->len + left_in_this_buf;
1212
1213 if (newbuflen < 0 || newbuflen < buf->len)
1214 return -1;
1215 if (newbuflen > buf->buflen)
1216 return 0;
1217 if (newbuflen < end_offset)
1218 xdr->end = (void *)xdr->end + newbuflen - end_offset;
1219 buf->buflen = newbuflen;
1220 return 0;
1221}
1222EXPORT_SYMBOL(xdr_restrict_buflen);
1223
1224/**
1225 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
1226 * @xdr: pointer to xdr_stream
1227 * @pages: array of pages to insert
1228 * @base: starting offset of first data byte in @pages
1229 * @len: number of data bytes in @pages to insert
1230 *
1231 * After the @pages are added, the tail iovec is instantiated pointing to
1232 * end of the head buffer, and the stream is set up to encode subsequent
1233 * items into the tail.
1234 */
1235void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
1236 unsigned int len)
1237{
1238 struct xdr_buf *buf = xdr->buf;
1239 struct kvec *tail = buf->tail;
1240
1241 buf->pages = pages;
1242 buf->page_base = base;
1243 buf->page_len = len;
1244
1245 tail->iov_base = xdr->p;
1246 tail->iov_len = 0;
1247 xdr->iov = tail;
1248
1249 if (len & 3) {
1250 unsigned int pad = 4 - (len & 3);
1251
1252 BUG_ON(xdr->p >= xdr->end);
1253 tail->iov_base = (char *)xdr->p + (len & 3);
1254 tail->iov_len += pad;
1255 len += pad;
1256 *xdr->p++ = 0;
1257 }
1258 buf->buflen += len;
1259 buf->len += len;
1260}
1261EXPORT_SYMBOL_GPL(xdr_write_pages);
1262
1263static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
1264 unsigned int base, unsigned int len)
1265{
1266 if (len > iov->iov_len)
1267 len = iov->iov_len;
1268 if (unlikely(base > len))
1269 base = len;
1270 xdr->p = (__be32*)(iov->iov_base + base);
1271 xdr->end = (__be32*)(iov->iov_base + len);
1272 xdr->iov = iov;
1273 xdr->page_ptr = NULL;
1274 return len - base;
1275}
1276
1277static unsigned int xdr_set_tail_base(struct xdr_stream *xdr,
1278 unsigned int base, unsigned int len)
1279{
1280 struct xdr_buf *buf = xdr->buf;
1281
1282 xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len);
1283 return xdr_set_iov(xdr, buf->tail, base, len);
1284}
1285
1286static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
1287 unsigned int base, unsigned int len)
1288{
1289 unsigned int pgnr;
1290 unsigned int maxlen;
1291 unsigned int pgoff;
1292 unsigned int pgend;
1293 void *kaddr;
1294
1295 maxlen = xdr->buf->page_len;
1296 if (base >= maxlen)
1297 return 0;
1298 else
1299 maxlen -= base;
1300 if (len > maxlen)
1301 len = maxlen;
1302
1303 xdr_stream_page_set_pos(xdr, base);
1304 base += xdr->buf->page_base;
1305
1306 pgnr = base >> PAGE_SHIFT;
1307 xdr->page_ptr = &xdr->buf->pages[pgnr];
1308 kaddr = page_address(*xdr->page_ptr);
1309
1310 pgoff = base & ~PAGE_MASK;
1311 xdr->p = (__be32*)(kaddr + pgoff);
1312
1313 pgend = pgoff + len;
1314 if (pgend > PAGE_SIZE)
1315 pgend = PAGE_SIZE;
1316 xdr->end = (__be32*)(kaddr + pgend);
1317 xdr->iov = NULL;
1318 return len;
1319}
1320
1321static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
1322 unsigned int len)
1323{
1324 if (xdr_set_page_base(xdr, base, len) == 0) {
1325 base -= xdr->buf->page_len;
1326 xdr_set_tail_base(xdr, base, len);
1327 }
1328}
1329
1330static void xdr_set_next_page(struct xdr_stream *xdr)
1331{
1332 unsigned int newbase;
1333
1334 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
1335 newbase -= xdr->buf->page_base;
1336 if (newbase < xdr->buf->page_len)
1337 xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr));
1338 else
1339 xdr_set_tail_base(xdr, 0, xdr_stream_remaining(xdr));
1340}
1341
1342static bool xdr_set_next_buffer(struct xdr_stream *xdr)
1343{
1344 if (xdr->page_ptr != NULL)
1345 xdr_set_next_page(xdr);
1346 else if (xdr->iov == xdr->buf->head)
1347 xdr_set_page(xdr, 0, xdr_stream_remaining(xdr));
1348 return xdr->p != xdr->end;
1349}
1350
1351/**
1352 * xdr_init_decode - Initialize an xdr_stream for decoding data.
1353 * @xdr: pointer to xdr_stream struct
1354 * @buf: pointer to XDR buffer from which to decode data
1355 * @p: current pointer inside XDR buffer
1356 * @rqst: pointer to controlling rpc_rqst, for debugging
1357 */
1358void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
1359 struct rpc_rqst *rqst)
1360{
1361 xdr->buf = buf;
1362 xdr_reset_scratch_buffer(xdr);
1363 xdr->nwords = XDR_QUADLEN(buf->len);
1364 if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
1365 xdr_set_page_base(xdr, 0, buf->len) == 0)
1366 xdr_set_iov(xdr, buf->tail, 0, buf->len);
1367 if (p != NULL && p > xdr->p && xdr->end >= p) {
1368 xdr->nwords -= p - xdr->p;
1369 xdr->p = p;
1370 }
1371 xdr->rqst = rqst;
1372}
1373EXPORT_SYMBOL_GPL(xdr_init_decode);
1374
1375/**
1376 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
1377 * @xdr: pointer to xdr_stream struct
1378 * @buf: pointer to XDR buffer from which to decode data
1379 * @pages: list of pages to decode into
1380 * @len: length in bytes of buffer in pages
1381 */
1382void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
1383 struct page **pages, unsigned int len)
1384{
1385 memset(buf, 0, sizeof(*buf));
1386 buf->pages = pages;
1387 buf->page_len = len;
1388 buf->buflen = len;
1389 buf->len = len;
1390 xdr_init_decode(xdr, buf, NULL, NULL);
1391}
1392EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
1393
1394static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1395{
1396 unsigned int nwords = XDR_QUADLEN(nbytes);
1397 __be32 *p = xdr->p;
1398 __be32 *q = p + nwords;
1399
1400 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
1401 return NULL;
1402 xdr->p = q;
1403 xdr->nwords -= nwords;
1404 return p;
1405}
1406
1407static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
1408{
1409 __be32 *p;
1410 char *cpdest = xdr->scratch.iov_base;
1411 size_t cplen = (char *)xdr->end - (char *)xdr->p;
1412
1413 if (nbytes > xdr->scratch.iov_len)
1414 goto out_overflow;
1415 p = __xdr_inline_decode(xdr, cplen);
1416 if (p == NULL)
1417 return NULL;
1418 memcpy(cpdest, p, cplen);
1419 if (!xdr_set_next_buffer(xdr))
1420 goto out_overflow;
1421 cpdest += cplen;
1422 nbytes -= cplen;
1423 p = __xdr_inline_decode(xdr, nbytes);
1424 if (p == NULL)
1425 return NULL;
1426 memcpy(cpdest, p, nbytes);
1427 return xdr->scratch.iov_base;
1428out_overflow:
1429 trace_rpc_xdr_overflow(xdr, nbytes);
1430 return NULL;
1431}
1432
1433/**
1434 * xdr_inline_decode - Retrieve XDR data to decode
1435 * @xdr: pointer to xdr_stream struct
1436 * @nbytes: number of bytes of data to decode
1437 *
1438 * Check if the input buffer is long enough to enable us to decode
1439 * 'nbytes' more bytes of data starting at the current position.
1440 * If so return the current pointer, then update the current
1441 * pointer position.
1442 */
1443__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1444{
1445 __be32 *p;
1446
1447 if (unlikely(nbytes == 0))
1448 return xdr->p;
1449 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1450 goto out_overflow;
1451 p = __xdr_inline_decode(xdr, nbytes);
1452 if (p != NULL)
1453 return p;
1454 return xdr_copy_to_scratch(xdr, nbytes);
1455out_overflow:
1456 trace_rpc_xdr_overflow(xdr, nbytes);
1457 return NULL;
1458}
1459EXPORT_SYMBOL_GPL(xdr_inline_decode);
1460
1461static void xdr_realign_pages(struct xdr_stream *xdr)
1462{
1463 struct xdr_buf *buf = xdr->buf;
1464 struct kvec *iov = buf->head;
1465 unsigned int cur = xdr_stream_pos(xdr);
1466 unsigned int copied;
1467
1468 /* Realign pages to current pointer position */
1469 if (iov->iov_len > cur) {
1470 copied = xdr_shrink_bufhead(buf, cur);
1471 trace_rpc_xdr_alignment(xdr, cur, copied);
1472 xdr_set_page(xdr, 0, buf->page_len);
1473 }
1474}
1475
1476static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
1477{
1478 struct xdr_buf *buf = xdr->buf;
1479 unsigned int nwords = XDR_QUADLEN(len);
1480 unsigned int copied;
1481
1482 if (xdr->nwords == 0)
1483 return 0;
1484
1485 xdr_realign_pages(xdr);
1486 if (nwords > xdr->nwords) {
1487 nwords = xdr->nwords;
1488 len = nwords << 2;
1489 }
1490 if (buf->page_len <= len)
1491 len = buf->page_len;
1492 else if (nwords < xdr->nwords) {
1493 /* Truncate page data and move it into the tail */
1494 copied = xdr_shrink_pagelen(buf, len);
1495 trace_rpc_xdr_alignment(xdr, len, copied);
1496 }
1497 return len;
1498}
1499
1500/**
1501 * xdr_read_pages - align page-based XDR data to current pointer position
1502 * @xdr: pointer to xdr_stream struct
1503 * @len: number of bytes of page data
1504 *
1505 * Moves data beyond the current pointer position from the XDR head[] buffer
1506 * into the page list. Any data that lies beyond current position + @len
1507 * bytes is moved into the XDR tail[]. The xdr_stream current position is
1508 * then advanced past that data to align to the next XDR object in the tail.
1509 *
1510 * Returns the number of XDR encoded bytes now contained in the pages
1511 */
1512unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1513{
1514 unsigned int nwords = XDR_QUADLEN(len);
1515 unsigned int base, end, pglen;
1516
1517 pglen = xdr_align_pages(xdr, nwords << 2);
1518 if (pglen == 0)
1519 return 0;
1520
1521 base = (nwords << 2) - pglen;
1522 end = xdr_stream_remaining(xdr) - pglen;
1523
1524 xdr_set_tail_base(xdr, base, end);
1525 return len <= pglen ? len : pglen;
1526}
1527EXPORT_SYMBOL_GPL(xdr_read_pages);
1528
1529/**
1530 * xdr_set_pagelen - Sets the length of the XDR pages
1531 * @xdr: pointer to xdr_stream struct
1532 * @len: new length of the XDR page data
1533 *
1534 * Either grows or shrinks the length of the xdr pages by setting pagelen to
1535 * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas
1536 * when growing any data beyond the current pointer is moved into the tail.
1537 *
1538 * Returns True if the operation was successful, and False otherwise.
1539 */
1540void xdr_set_pagelen(struct xdr_stream *xdr, unsigned int len)
1541{
1542 struct xdr_buf *buf = xdr->buf;
1543 size_t remaining = xdr_stream_remaining(xdr);
1544 size_t base = 0;
1545
1546 if (len < buf->page_len) {
1547 base = buf->page_len - len;
1548 xdr_shrink_pagelen(buf, len);
1549 } else {
1550 xdr_buf_head_shift_right(buf, xdr_stream_pos(xdr),
1551 buf->page_len, remaining);
1552 if (len > buf->page_len)
1553 xdr_buf_try_expand(buf, len - buf->page_len);
1554 }
1555 xdr_set_tail_base(xdr, base, remaining);
1556}
1557EXPORT_SYMBOL_GPL(xdr_set_pagelen);
1558
1559/**
1560 * xdr_enter_page - decode data from the XDR page
1561 * @xdr: pointer to xdr_stream struct
1562 * @len: number of bytes of page data
1563 *
1564 * Moves data beyond the current pointer position from the XDR head[] buffer
1565 * into the page list. Any data that lies beyond current position + "len"
1566 * bytes is moved into the XDR tail[]. The current pointer is then
1567 * repositioned at the beginning of the first XDR page.
1568 */
1569void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1570{
1571 len = xdr_align_pages(xdr, len);
1572 /*
1573 * Position current pointer at beginning of tail, and
1574 * set remaining message length.
1575 */
1576 if (len != 0)
1577 xdr_set_page_base(xdr, 0, len);
1578}
1579EXPORT_SYMBOL_GPL(xdr_enter_page);
1580
1581static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1582
1583void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf)
1584{
1585 buf->head[0] = *iov;
1586 buf->tail[0] = empty_iov;
1587 buf->page_len = 0;
1588 buf->buflen = buf->len = iov->iov_len;
1589}
1590EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1591
1592/**
1593 * xdr_buf_subsegment - set subbuf to a portion of buf
1594 * @buf: an xdr buffer
1595 * @subbuf: the result buffer
1596 * @base: beginning of range in bytes
1597 * @len: length of range in bytes
1598 *
1599 * sets @subbuf to an xdr buffer representing the portion of @buf of
1600 * length @len starting at offset @base.
1601 *
1602 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1603 *
1604 * Returns -1 if base or length are out of bounds.
1605 */
1606int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf,
1607 unsigned int base, unsigned int len)
1608{
1609 subbuf->buflen = subbuf->len = len;
1610 if (base < buf->head[0].iov_len) {
1611 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1612 subbuf->head[0].iov_len = min_t(unsigned int, len,
1613 buf->head[0].iov_len - base);
1614 len -= subbuf->head[0].iov_len;
1615 base = 0;
1616 } else {
1617 base -= buf->head[0].iov_len;
1618 subbuf->head[0].iov_base = buf->head[0].iov_base;
1619 subbuf->head[0].iov_len = 0;
1620 }
1621
1622 if (base < buf->page_len) {
1623 subbuf->page_len = min(buf->page_len - base, len);
1624 base += buf->page_base;
1625 subbuf->page_base = base & ~PAGE_MASK;
1626 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1627 len -= subbuf->page_len;
1628 base = 0;
1629 } else {
1630 base -= buf->page_len;
1631 subbuf->pages = buf->pages;
1632 subbuf->page_base = 0;
1633 subbuf->page_len = 0;
1634 }
1635
1636 if (base < buf->tail[0].iov_len) {
1637 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1638 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1639 buf->tail[0].iov_len - base);
1640 len -= subbuf->tail[0].iov_len;
1641 base = 0;
1642 } else {
1643 base -= buf->tail[0].iov_len;
1644 subbuf->tail[0].iov_base = buf->tail[0].iov_base;
1645 subbuf->tail[0].iov_len = 0;
1646 }
1647
1648 if (base || len)
1649 return -1;
1650 return 0;
1651}
1652EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1653
1654/**
1655 * xdr_stream_subsegment - set @subbuf to a portion of @xdr
1656 * @xdr: an xdr_stream set up for decoding
1657 * @subbuf: the result buffer
1658 * @nbytes: length of @xdr to extract, in bytes
1659 *
1660 * Sets up @subbuf to represent a portion of @xdr. The portion
1661 * starts at the current offset in @xdr, and extends for a length
1662 * of @nbytes. If this is successful, @xdr is advanced to the next
1663 * XDR data item following that portion.
1664 *
1665 * Return values:
1666 * %true: @subbuf has been initialized, and @xdr has been advanced.
1667 * %false: a bounds error has occurred
1668 */
1669bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
1670 unsigned int nbytes)
1671{
1672 unsigned int start = xdr_stream_pos(xdr);
1673 unsigned int remaining, len;
1674
1675 /* Extract @subbuf and bounds-check the fn arguments */
1676 if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes))
1677 return false;
1678
1679 /* Advance @xdr by @nbytes */
1680 for (remaining = nbytes; remaining;) {
1681 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1682 return false;
1683
1684 len = (char *)xdr->end - (char *)xdr->p;
1685 if (remaining <= len) {
1686 xdr->p = (__be32 *)((char *)xdr->p +
1687 (remaining + xdr_pad_size(nbytes)));
1688 break;
1689 }
1690
1691 xdr->p = (__be32 *)((char *)xdr->p + len);
1692 xdr->end = xdr->p;
1693 remaining -= len;
1694 }
1695
1696 xdr_stream_set_pos(xdr, start + nbytes);
1697 return true;
1698}
1699EXPORT_SYMBOL_GPL(xdr_stream_subsegment);
1700
1701/**
1702 * xdr_stream_move_subsegment - Move part of a stream to another position
1703 * @xdr: the source xdr_stream
1704 * @offset: the source offset of the segment
1705 * @target: the target offset of the segment
1706 * @length: the number of bytes to move
1707 *
1708 * Moves @length bytes from @offset to @target in the xdr_stream, overwriting
1709 * anything in its space. Returns the number of bytes in the segment.
1710 */
1711unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
1712 unsigned int target, unsigned int length)
1713{
1714 struct xdr_buf buf;
1715 unsigned int shift;
1716
1717 if (offset < target) {
1718 shift = target - offset;
1719 if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0)
1720 return 0;
1721 xdr_buf_head_shift_right(&buf, 0, length, shift);
1722 } else if (offset > target) {
1723 shift = offset - target;
1724 if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0)
1725 return 0;
1726 xdr_buf_head_shift_left(&buf, shift, length, shift);
1727 }
1728 return length;
1729}
1730EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment);
1731
1732/**
1733 * xdr_stream_zero - zero out a portion of an xdr_stream
1734 * @xdr: an xdr_stream to zero out
1735 * @offset: the starting point in the stream
1736 * @length: the number of bytes to zero
1737 */
1738unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
1739 unsigned int length)
1740{
1741 struct xdr_buf buf;
1742
1743 if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0)
1744 return 0;
1745 if (buf.head[0].iov_len)
1746 xdr_buf_iov_zero(buf.head, 0, buf.head[0].iov_len);
1747 if (buf.page_len > 0)
1748 xdr_buf_pages_zero(&buf, 0, buf.page_len);
1749 if (buf.tail[0].iov_len)
1750 xdr_buf_iov_zero(buf.tail, 0, buf.tail[0].iov_len);
1751 return length;
1752}
1753EXPORT_SYMBOL_GPL(xdr_stream_zero);
1754
1755/**
1756 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1757 * @buf: buf to be trimmed
1758 * @len: number of bytes to reduce "buf" by
1759 *
1760 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1761 * that it's possible that we'll trim less than that amount if the xdr_buf is
1762 * too small, or if (for instance) it's all in the head and the parser has
1763 * already read too far into it.
1764 */
1765void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1766{
1767 size_t cur;
1768 unsigned int trim = len;
1769
1770 if (buf->tail[0].iov_len) {
1771 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1772 buf->tail[0].iov_len -= cur;
1773 trim -= cur;
1774 if (!trim)
1775 goto fix_len;
1776 }
1777
1778 if (buf->page_len) {
1779 cur = min_t(unsigned int, buf->page_len, trim);
1780 buf->page_len -= cur;
1781 trim -= cur;
1782 if (!trim)
1783 goto fix_len;
1784 }
1785
1786 if (buf->head[0].iov_len) {
1787 cur = min_t(size_t, buf->head[0].iov_len, trim);
1788 buf->head[0].iov_len -= cur;
1789 trim -= cur;
1790 }
1791fix_len:
1792 buf->len -= (len - trim);
1793}
1794EXPORT_SYMBOL_GPL(xdr_buf_trim);
1795
1796static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf,
1797 void *obj, unsigned int len)
1798{
1799 unsigned int this_len;
1800
1801 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1802 memcpy(obj, subbuf->head[0].iov_base, this_len);
1803 len -= this_len;
1804 obj += this_len;
1805 this_len = min_t(unsigned int, len, subbuf->page_len);
1806 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1807 len -= this_len;
1808 obj += this_len;
1809 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1810 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1811}
1812
1813/* obj is assumed to point to allocated memory of size at least len: */
1814int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base,
1815 void *obj, unsigned int len)
1816{
1817 struct xdr_buf subbuf;
1818 int status;
1819
1820 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1821 if (status != 0)
1822 return status;
1823 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1824 return 0;
1825}
1826EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1827
1828static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf,
1829 void *obj, unsigned int len)
1830{
1831 unsigned int this_len;
1832
1833 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1834 memcpy(subbuf->head[0].iov_base, obj, this_len);
1835 len -= this_len;
1836 obj += this_len;
1837 this_len = min_t(unsigned int, len, subbuf->page_len);
1838 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1839 len -= this_len;
1840 obj += this_len;
1841 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1842 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1843}
1844
1845/* obj is assumed to point to allocated memory of size at least len: */
1846int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base,
1847 void *obj, unsigned int len)
1848{
1849 struct xdr_buf subbuf;
1850 int status;
1851
1852 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1853 if (status != 0)
1854 return status;
1855 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1856 return 0;
1857}
1858EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1859
1860int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj)
1861{
1862 __be32 raw;
1863 int status;
1864
1865 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1866 if (status)
1867 return status;
1868 *obj = be32_to_cpu(raw);
1869 return 0;
1870}
1871EXPORT_SYMBOL_GPL(xdr_decode_word);
1872
1873int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj)
1874{
1875 __be32 raw = cpu_to_be32(obj);
1876
1877 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1878}
1879EXPORT_SYMBOL_GPL(xdr_encode_word);
1880
1881/* Returns 0 on success, or else a negative error code. */
1882static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base,
1883 struct xdr_array2_desc *desc, int encode)
1884{
1885 char *elem = NULL, *c;
1886 unsigned int copied = 0, todo, avail_here;
1887 struct page **ppages = NULL;
1888 int err;
1889
1890 if (encode) {
1891 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1892 return -EINVAL;
1893 } else {
1894 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1895 desc->array_len > desc->array_maxlen ||
1896 (unsigned long) base + 4 + desc->array_len *
1897 desc->elem_size > buf->len)
1898 return -EINVAL;
1899 }
1900 base += 4;
1901
1902 if (!desc->xcode)
1903 return 0;
1904
1905 todo = desc->array_len * desc->elem_size;
1906
1907 /* process head */
1908 if (todo && base < buf->head->iov_len) {
1909 c = buf->head->iov_base + base;
1910 avail_here = min_t(unsigned int, todo,
1911 buf->head->iov_len - base);
1912 todo -= avail_here;
1913
1914 while (avail_here >= desc->elem_size) {
1915 err = desc->xcode(desc, c);
1916 if (err)
1917 goto out;
1918 c += desc->elem_size;
1919 avail_here -= desc->elem_size;
1920 }
1921 if (avail_here) {
1922 if (!elem) {
1923 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1924 err = -ENOMEM;
1925 if (!elem)
1926 goto out;
1927 }
1928 if (encode) {
1929 err = desc->xcode(desc, elem);
1930 if (err)
1931 goto out;
1932 memcpy(c, elem, avail_here);
1933 } else
1934 memcpy(elem, c, avail_here);
1935 copied = avail_here;
1936 }
1937 base = buf->head->iov_len; /* align to start of pages */
1938 }
1939
1940 /* process pages array */
1941 base -= buf->head->iov_len;
1942 if (todo && base < buf->page_len) {
1943 unsigned int avail_page;
1944
1945 avail_here = min(todo, buf->page_len - base);
1946 todo -= avail_here;
1947
1948 base += buf->page_base;
1949 ppages = buf->pages + (base >> PAGE_SHIFT);
1950 base &= ~PAGE_MASK;
1951 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1952 avail_here);
1953 c = kmap(*ppages) + base;
1954
1955 while (avail_here) {
1956 avail_here -= avail_page;
1957 if (copied || avail_page < desc->elem_size) {
1958 unsigned int l = min(avail_page,
1959 desc->elem_size - copied);
1960 if (!elem) {
1961 elem = kmalloc(desc->elem_size,
1962 GFP_KERNEL);
1963 err = -ENOMEM;
1964 if (!elem)
1965 goto out;
1966 }
1967 if (encode) {
1968 if (!copied) {
1969 err = desc->xcode(desc, elem);
1970 if (err)
1971 goto out;
1972 }
1973 memcpy(c, elem + copied, l);
1974 copied += l;
1975 if (copied == desc->elem_size)
1976 copied = 0;
1977 } else {
1978 memcpy(elem + copied, c, l);
1979 copied += l;
1980 if (copied == desc->elem_size) {
1981 err = desc->xcode(desc, elem);
1982 if (err)
1983 goto out;
1984 copied = 0;
1985 }
1986 }
1987 avail_page -= l;
1988 c += l;
1989 }
1990 while (avail_page >= desc->elem_size) {
1991 err = desc->xcode(desc, c);
1992 if (err)
1993 goto out;
1994 c += desc->elem_size;
1995 avail_page -= desc->elem_size;
1996 }
1997 if (avail_page) {
1998 unsigned int l = min(avail_page,
1999 desc->elem_size - copied);
2000 if (!elem) {
2001 elem = kmalloc(desc->elem_size,
2002 GFP_KERNEL);
2003 err = -ENOMEM;
2004 if (!elem)
2005 goto out;
2006 }
2007 if (encode) {
2008 if (!copied) {
2009 err = desc->xcode(desc, elem);
2010 if (err)
2011 goto out;
2012 }
2013 memcpy(c, elem + copied, l);
2014 copied += l;
2015 if (copied == desc->elem_size)
2016 copied = 0;
2017 } else {
2018 memcpy(elem + copied, c, l);
2019 copied += l;
2020 if (copied == desc->elem_size) {
2021 err = desc->xcode(desc, elem);
2022 if (err)
2023 goto out;
2024 copied = 0;
2025 }
2026 }
2027 }
2028 if (avail_here) {
2029 kunmap(*ppages);
2030 ppages++;
2031 c = kmap(*ppages);
2032 }
2033
2034 avail_page = min(avail_here,
2035 (unsigned int) PAGE_SIZE);
2036 }
2037 base = buf->page_len; /* align to start of tail */
2038 }
2039
2040 /* process tail */
2041 base -= buf->page_len;
2042 if (todo) {
2043 c = buf->tail->iov_base + base;
2044 if (copied) {
2045 unsigned int l = desc->elem_size - copied;
2046
2047 if (encode)
2048 memcpy(c, elem + copied, l);
2049 else {
2050 memcpy(elem + copied, c, l);
2051 err = desc->xcode(desc, elem);
2052 if (err)
2053 goto out;
2054 }
2055 todo -= l;
2056 c += l;
2057 }
2058 while (todo) {
2059 err = desc->xcode(desc, c);
2060 if (err)
2061 goto out;
2062 c += desc->elem_size;
2063 todo -= desc->elem_size;
2064 }
2065 }
2066 err = 0;
2067
2068out:
2069 kfree(elem);
2070 if (ppages)
2071 kunmap(*ppages);
2072 return err;
2073}
2074
2075int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base,
2076 struct xdr_array2_desc *desc)
2077{
2078 if (base >= buf->len)
2079 return -EINVAL;
2080
2081 return xdr_xcode_array2(buf, base, desc, 0);
2082}
2083EXPORT_SYMBOL_GPL(xdr_decode_array2);
2084
2085int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base,
2086 struct xdr_array2_desc *desc)
2087{
2088 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
2089 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
2090 return -EINVAL;
2091
2092 return xdr_xcode_array2(buf, base, desc, 1);
2093}
2094EXPORT_SYMBOL_GPL(xdr_encode_array2);
2095
2096int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset,
2097 unsigned int len,
2098 int (*actor)(struct scatterlist *, void *), void *data)
2099{
2100 int i, ret = 0;
2101 unsigned int page_len, thislen, page_offset;
2102 struct scatterlist sg[1];
2103
2104 sg_init_table(sg, 1);
2105
2106 if (offset >= buf->head[0].iov_len) {
2107 offset -= buf->head[0].iov_len;
2108 } else {
2109 thislen = buf->head[0].iov_len - offset;
2110 if (thislen > len)
2111 thislen = len;
2112 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
2113 ret = actor(sg, data);
2114 if (ret)
2115 goto out;
2116 offset = 0;
2117 len -= thislen;
2118 }
2119 if (len == 0)
2120 goto out;
2121
2122 if (offset >= buf->page_len) {
2123 offset -= buf->page_len;
2124 } else {
2125 page_len = buf->page_len - offset;
2126 if (page_len > len)
2127 page_len = len;
2128 len -= page_len;
2129 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
2130 i = (offset + buf->page_base) >> PAGE_SHIFT;
2131 thislen = PAGE_SIZE - page_offset;
2132 do {
2133 if (thislen > page_len)
2134 thislen = page_len;
2135 sg_set_page(sg, buf->pages[i], thislen, page_offset);
2136 ret = actor(sg, data);
2137 if (ret)
2138 goto out;
2139 page_len -= thislen;
2140 i++;
2141 page_offset = 0;
2142 thislen = PAGE_SIZE;
2143 } while (page_len != 0);
2144 offset = 0;
2145 }
2146 if (len == 0)
2147 goto out;
2148 if (offset < buf->tail[0].iov_len) {
2149 thislen = buf->tail[0].iov_len - offset;
2150 if (thislen > len)
2151 thislen = len;
2152 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
2153 ret = actor(sg, data);
2154 len -= thislen;
2155 }
2156 if (len != 0)
2157 ret = -EINVAL;
2158out:
2159 return ret;
2160}
2161EXPORT_SYMBOL_GPL(xdr_process_buf);
2162
2163/**
2164 * xdr_stream_decode_opaque - Decode variable length opaque
2165 * @xdr: pointer to xdr_stream
2166 * @ptr: location to store opaque data
2167 * @size: size of storage buffer @ptr
2168 *
2169 * Return values:
2170 * On success, returns size of object stored in *@ptr
2171 * %-EBADMSG on XDR buffer overflow
2172 * %-EMSGSIZE on overflow of storage buffer @ptr
2173 */
2174ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
2175{
2176 ssize_t ret;
2177 void *p;
2178
2179 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
2180 if (ret <= 0)
2181 return ret;
2182 memcpy(ptr, p, ret);
2183 return ret;
2184}
2185EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
2186
2187/**
2188 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
2189 * @xdr: pointer to xdr_stream
2190 * @ptr: location to store pointer to opaque data
2191 * @maxlen: maximum acceptable object size
2192 * @gfp_flags: GFP mask to use
2193 *
2194 * Return values:
2195 * On success, returns size of object stored in *@ptr
2196 * %-EBADMSG on XDR buffer overflow
2197 * %-EMSGSIZE if the size of the object would exceed @maxlen
2198 * %-ENOMEM on memory allocation failure
2199 */
2200ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
2201 size_t maxlen, gfp_t gfp_flags)
2202{
2203 ssize_t ret;
2204 void *p;
2205
2206 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
2207 if (ret > 0) {
2208 *ptr = kmemdup(p, ret, gfp_flags);
2209 if (*ptr != NULL)
2210 return ret;
2211 ret = -ENOMEM;
2212 }
2213 *ptr = NULL;
2214 return ret;
2215}
2216EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
2217
2218/**
2219 * xdr_stream_decode_string - Decode variable length string
2220 * @xdr: pointer to xdr_stream
2221 * @str: location to store string
2222 * @size: size of storage buffer @str
2223 *
2224 * Return values:
2225 * On success, returns length of NUL-terminated string stored in *@str
2226 * %-EBADMSG on XDR buffer overflow
2227 * %-EMSGSIZE on overflow of storage buffer @str
2228 */
2229ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
2230{
2231 ssize_t ret;
2232 void *p;
2233
2234 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
2235 if (ret > 0) {
2236 memcpy(str, p, ret);
2237 str[ret] = '\0';
2238 return strlen(str);
2239 }
2240 *str = '\0';
2241 return ret;
2242}
2243EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
2244
2245/**
2246 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
2247 * @xdr: pointer to xdr_stream
2248 * @str: location to store pointer to string
2249 * @maxlen: maximum acceptable string length
2250 * @gfp_flags: GFP mask to use
2251 *
2252 * Return values:
2253 * On success, returns length of NUL-terminated string stored in *@ptr
2254 * %-EBADMSG on XDR buffer overflow
2255 * %-EMSGSIZE if the size of the string would exceed @maxlen
2256 * %-ENOMEM on memory allocation failure
2257 */
2258ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
2259 size_t maxlen, gfp_t gfp_flags)
2260{
2261 void *p;
2262 ssize_t ret;
2263
2264 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
2265 if (ret > 0) {
2266 char *s = kmemdup_nul(p, ret, gfp_flags);
2267 if (s != NULL) {
2268 *str = s;
2269 return strlen(s);
2270 }
2271 ret = -ENOMEM;
2272 }
2273 *str = NULL;
2274 return ret;
2275}
2276EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);