xref: /openbmc/linux/net/sunrpc/xdr.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  * linux/net/sunrpc/xdr.c
3  *
4  * Generic XDR support.
5  *
6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/sunrpc/xdr.h>
16 #include <linux/sunrpc/msg_prot.h>
17 
18 /*
19  * XDR functions for basic NFS types
20  */
21 u32 *
22 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
23 {
24 	unsigned int	quadlen = XDR_QUADLEN(obj->len);
25 
26 	p[quadlen] = 0;		/* zero trailing bytes */
27 	*p++ = htonl(obj->len);
28 	memcpy(p, obj->data, obj->len);
29 	return p + XDR_QUADLEN(obj->len);
30 }
31 
32 u32 *
33 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
34 {
35 	unsigned int	len;
36 
37 	if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
38 		return NULL;
39 	obj->len  = len;
40 	obj->data = (u8 *) p;
41 	return p + XDR_QUADLEN(len);
42 }
43 
44 /**
45  * xdr_encode_opaque_fixed - Encode fixed length opaque data
46  * @p: pointer to current position in XDR buffer.
47  * @ptr: pointer to data to encode (or NULL)
48  * @nbytes: size of data.
49  *
50  * Copy the array of data of length nbytes at ptr to the XDR buffer
51  * at position p, then align to the next 32-bit boundary by padding
52  * with zero bytes (see RFC1832).
53  * Note: if ptr is NULL, only the padding is performed.
54  *
55  * Returns the updated current XDR buffer position
56  *
57  */
58 u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes)
59 {
60 	if (likely(nbytes != 0)) {
61 		unsigned int quadlen = XDR_QUADLEN(nbytes);
62 		unsigned int padding = (quadlen << 2) - nbytes;
63 
64 		if (ptr != NULL)
65 			memcpy(p, ptr, nbytes);
66 		if (padding != 0)
67 			memset((char *)p + nbytes, 0, padding);
68 		p += quadlen;
69 	}
70 	return p;
71 }
72 EXPORT_SYMBOL(xdr_encode_opaque_fixed);
73 
74 /**
75  * xdr_encode_opaque - Encode variable length opaque data
76  * @p: pointer to current position in XDR buffer.
77  * @ptr: pointer to data to encode (or NULL)
78  * @nbytes: size of data.
79  *
80  * Returns the updated current XDR buffer position
81  */
82 u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes)
83 {
84 	*p++ = htonl(nbytes);
85 	return xdr_encode_opaque_fixed(p, ptr, nbytes);
86 }
87 EXPORT_SYMBOL(xdr_encode_opaque);
88 
89 u32 *
90 xdr_encode_string(u32 *p, const char *string)
91 {
92 	return xdr_encode_array(p, string, strlen(string));
93 }
94 
95 u32 *
96 xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
97 {
98 	unsigned int	len;
99 	char		*string;
100 
101 	if ((len = ntohl(*p++)) > maxlen)
102 		return NULL;
103 	if (lenp)
104 		*lenp = len;
105 	if ((len % 4) != 0) {
106 		string = (char *) p;
107 	} else {
108 		string = (char *) (p - 1);
109 		memmove(string, p, len);
110 	}
111 	string[len] = '\0';
112 	*sp = string;
113 	return p + XDR_QUADLEN(len);
114 }
115 
116 u32 *
117 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
118 {
119 	unsigned int	len;
120 
121 	if ((len = ntohl(*p++)) > maxlen)
122 		return NULL;
123 	*lenp = len;
124 	*sp = (char *) p;
125 	return p + XDR_QUADLEN(len);
126 }
127 
128 void
129 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
130 		 unsigned int len)
131 {
132 	struct kvec *tail = xdr->tail;
133 	u32 *p;
134 
135 	xdr->pages = pages;
136 	xdr->page_base = base;
137 	xdr->page_len = len;
138 
139 	p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
140 	tail->iov_base = p;
141 	tail->iov_len = 0;
142 
143 	if (len & 3) {
144 		unsigned int pad = 4 - (len & 3);
145 
146 		*p = 0;
147 		tail->iov_base = (char *)p + (len & 3);
148 		tail->iov_len  = pad;
149 		len += pad;
150 	}
151 	xdr->buflen += len;
152 	xdr->len += len;
153 }
154 
155 void
156 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
157 		 struct page **pages, unsigned int base, unsigned int len)
158 {
159 	struct kvec *head = xdr->head;
160 	struct kvec *tail = xdr->tail;
161 	char *buf = (char *)head->iov_base;
162 	unsigned int buflen = head->iov_len;
163 
164 	head->iov_len  = offset;
165 
166 	xdr->pages = pages;
167 	xdr->page_base = base;
168 	xdr->page_len = len;
169 
170 	tail->iov_base = buf + offset;
171 	tail->iov_len = buflen - offset;
172 
173 	xdr->buflen += len;
174 }
175 
176 
177 /*
178  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
179  *
180  * _shift_data_right_pages
181  * @pages: vector of pages containing both the source and dest memory area.
182  * @pgto_base: page vector address of destination
183  * @pgfrom_base: page vector address of source
184  * @len: number of bytes to copy
185  *
186  * Note: the addresses pgto_base and pgfrom_base are both calculated in
187  *       the same way:
188  *            if a memory area starts at byte 'base' in page 'pages[i]',
189  *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
190  * Also note: pgfrom_base must be < pgto_base, but the memory areas
191  * 	they point to may overlap.
192  */
193 static void
194 _shift_data_right_pages(struct page **pages, size_t pgto_base,
195 		size_t pgfrom_base, size_t len)
196 {
197 	struct page **pgfrom, **pgto;
198 	char *vfrom, *vto;
199 	size_t copy;
200 
201 	BUG_ON(pgto_base <= pgfrom_base);
202 
203 	pgto_base += len;
204 	pgfrom_base += len;
205 
206 	pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
207 	pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
208 
209 	pgto_base &= ~PAGE_CACHE_MASK;
210 	pgfrom_base &= ~PAGE_CACHE_MASK;
211 
212 	do {
213 		/* Are any pointers crossing a page boundary? */
214 		if (pgto_base == 0) {
215 			flush_dcache_page(*pgto);
216 			pgto_base = PAGE_CACHE_SIZE;
217 			pgto--;
218 		}
219 		if (pgfrom_base == 0) {
220 			pgfrom_base = PAGE_CACHE_SIZE;
221 			pgfrom--;
222 		}
223 
224 		copy = len;
225 		if (copy > pgto_base)
226 			copy = pgto_base;
227 		if (copy > pgfrom_base)
228 			copy = pgfrom_base;
229 		pgto_base -= copy;
230 		pgfrom_base -= copy;
231 
232 		vto = kmap_atomic(*pgto, KM_USER0);
233 		vfrom = kmap_atomic(*pgfrom, KM_USER1);
234 		memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
235 		kunmap_atomic(vfrom, KM_USER1);
236 		kunmap_atomic(vto, KM_USER0);
237 
238 	} while ((len -= copy) != 0);
239 	flush_dcache_page(*pgto);
240 }
241 
242 /*
243  * _copy_to_pages
244  * @pages: array of pages
245  * @pgbase: page vector address of destination
246  * @p: pointer to source data
247  * @len: length
248  *
249  * Copies data from an arbitrary memory location into an array of pages
250  * The copy is assumed to be non-overlapping.
251  */
252 static void
253 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
254 {
255 	struct page **pgto;
256 	char *vto;
257 	size_t copy;
258 
259 	pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
260 	pgbase &= ~PAGE_CACHE_MASK;
261 
262 	do {
263 		copy = PAGE_CACHE_SIZE - pgbase;
264 		if (copy > len)
265 			copy = len;
266 
267 		vto = kmap_atomic(*pgto, KM_USER0);
268 		memcpy(vto + pgbase, p, copy);
269 		kunmap_atomic(vto, KM_USER0);
270 
271 		pgbase += copy;
272 		if (pgbase == PAGE_CACHE_SIZE) {
273 			flush_dcache_page(*pgto);
274 			pgbase = 0;
275 			pgto++;
276 		}
277 		p += copy;
278 
279 	} while ((len -= copy) != 0);
280 	flush_dcache_page(*pgto);
281 }
282 
283 /*
284  * _copy_from_pages
285  * @p: pointer to destination
286  * @pages: array of pages
287  * @pgbase: offset of source data
288  * @len: length
289  *
290  * Copies data into an arbitrary memory location from an array of pages
291  * The copy is assumed to be non-overlapping.
292  */
293 static void
294 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
295 {
296 	struct page **pgfrom;
297 	char *vfrom;
298 	size_t copy;
299 
300 	pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
301 	pgbase &= ~PAGE_CACHE_MASK;
302 
303 	do {
304 		copy = PAGE_CACHE_SIZE - pgbase;
305 		if (copy > len)
306 			copy = len;
307 
308 		vfrom = kmap_atomic(*pgfrom, KM_USER0);
309 		memcpy(p, vfrom + pgbase, copy);
310 		kunmap_atomic(vfrom, KM_USER0);
311 
312 		pgbase += copy;
313 		if (pgbase == PAGE_CACHE_SIZE) {
314 			pgbase = 0;
315 			pgfrom++;
316 		}
317 		p += copy;
318 
319 	} while ((len -= copy) != 0);
320 }
321 
322 /*
323  * xdr_shrink_bufhead
324  * @buf: xdr_buf
325  * @len: bytes to remove from buf->head[0]
326  *
327  * Shrinks XDR buffer's header kvec buf->head[0] by
328  * 'len' bytes. The extra data is not lost, but is instead
329  * moved into the inlined pages and/or the tail.
330  */
331 static void
332 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
333 {
334 	struct kvec *head, *tail;
335 	size_t copy, offs;
336 	unsigned int pglen = buf->page_len;
337 
338 	tail = buf->tail;
339 	head = buf->head;
340 	BUG_ON (len > head->iov_len);
341 
342 	/* Shift the tail first */
343 	if (tail->iov_len != 0) {
344 		if (tail->iov_len > len) {
345 			copy = tail->iov_len - len;
346 			memmove((char *)tail->iov_base + len,
347 					tail->iov_base, copy);
348 		}
349 		/* Copy from the inlined pages into the tail */
350 		copy = len;
351 		if (copy > pglen)
352 			copy = pglen;
353 		offs = len - copy;
354 		if (offs >= tail->iov_len)
355 			copy = 0;
356 		else if (copy > tail->iov_len - offs)
357 			copy = tail->iov_len - offs;
358 		if (copy != 0)
359 			_copy_from_pages((char *)tail->iov_base + offs,
360 					buf->pages,
361 					buf->page_base + pglen + offs - len,
362 					copy);
363 		/* Do we also need to copy data from the head into the tail ? */
364 		if (len > pglen) {
365 			offs = copy = len - pglen;
366 			if (copy > tail->iov_len)
367 				copy = tail->iov_len;
368 			memcpy(tail->iov_base,
369 					(char *)head->iov_base +
370 					head->iov_len - offs,
371 					copy);
372 		}
373 	}
374 	/* Now handle pages */
375 	if (pglen != 0) {
376 		if (pglen > len)
377 			_shift_data_right_pages(buf->pages,
378 					buf->page_base + len,
379 					buf->page_base,
380 					pglen - len);
381 		copy = len;
382 		if (len > pglen)
383 			copy = pglen;
384 		_copy_to_pages(buf->pages, buf->page_base,
385 				(char *)head->iov_base + head->iov_len - len,
386 				copy);
387 	}
388 	head->iov_len -= len;
389 	buf->buflen -= len;
390 	/* Have we truncated the message? */
391 	if (buf->len > buf->buflen)
392 		buf->len = buf->buflen;
393 }
394 
395 /*
396  * xdr_shrink_pagelen
397  * @buf: xdr_buf
398  * @len: bytes to remove from buf->pages
399  *
400  * Shrinks XDR buffer's page array buf->pages by
401  * 'len' bytes. The extra data is not lost, but is instead
402  * moved into the tail.
403  */
404 static void
405 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
406 {
407 	struct kvec *tail;
408 	size_t copy;
409 	char *p;
410 	unsigned int pglen = buf->page_len;
411 
412 	tail = buf->tail;
413 	BUG_ON (len > pglen);
414 
415 	/* Shift the tail first */
416 	if (tail->iov_len != 0) {
417 		p = (char *)tail->iov_base + len;
418 		if (tail->iov_len > len) {
419 			copy = tail->iov_len - len;
420 			memmove(p, tail->iov_base, copy);
421 		} else
422 			buf->buflen -= len;
423 		/* Copy from the inlined pages into the tail */
424 		copy = len;
425 		if (copy > tail->iov_len)
426 			copy = tail->iov_len;
427 		_copy_from_pages((char *)tail->iov_base,
428 				buf->pages, buf->page_base + pglen - len,
429 				copy);
430 	}
431 	buf->page_len -= len;
432 	buf->buflen -= len;
433 	/* Have we truncated the message? */
434 	if (buf->len > buf->buflen)
435 		buf->len = buf->buflen;
436 }
437 
438 void
439 xdr_shift_buf(struct xdr_buf *buf, size_t len)
440 {
441 	xdr_shrink_bufhead(buf, len);
442 }
443 
444 /**
445  * xdr_init_encode - Initialize a struct xdr_stream for sending data.
446  * @xdr: pointer to xdr_stream struct
447  * @buf: pointer to XDR buffer in which to encode data
448  * @p: current pointer inside XDR buffer
449  *
450  * Note: at the moment the RPC client only passes the length of our
451  *	 scratch buffer in the xdr_buf's header kvec. Previously this
452  *	 meant we needed to call xdr_adjust_iovec() after encoding the
453  *	 data. With the new scheme, the xdr_stream manages the details
454  *	 of the buffer length, and takes care of adjusting the kvec
455  *	 length for us.
456  */
457 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
458 {
459 	struct kvec *iov = buf->head;
460 	int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
461 
462 	BUG_ON(scratch_len < 0);
463 	xdr->buf = buf;
464 	xdr->iov = iov;
465 	xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len);
466 	xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len);
467 	BUG_ON(iov->iov_len > scratch_len);
468 
469 	if (p != xdr->p && p != NULL) {
470 		size_t len;
471 
472 		BUG_ON(p < xdr->p || p > xdr->end);
473 		len = (char *)p - (char *)xdr->p;
474 		xdr->p = p;
475 		buf->len += len;
476 		iov->iov_len += len;
477 	}
478 }
479 EXPORT_SYMBOL(xdr_init_encode);
480 
481 /**
482  * xdr_reserve_space - Reserve buffer space for sending
483  * @xdr: pointer to xdr_stream
484  * @nbytes: number of bytes to reserve
485  *
486  * Checks that we have enough buffer space to encode 'nbytes' more
487  * bytes of data. If so, update the total xdr_buf length, and
488  * adjust the length of the current kvec.
489  */
490 uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
491 {
492 	uint32_t *p = xdr->p;
493 	uint32_t *q;
494 
495 	/* align nbytes on the next 32-bit boundary */
496 	nbytes += 3;
497 	nbytes &= ~3;
498 	q = p + (nbytes >> 2);
499 	if (unlikely(q > xdr->end || q < p))
500 		return NULL;
501 	xdr->p = q;
502 	xdr->iov->iov_len += nbytes;
503 	xdr->buf->len += nbytes;
504 	return p;
505 }
506 EXPORT_SYMBOL(xdr_reserve_space);
507 
508 /**
509  * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
510  * @xdr: pointer to xdr_stream
511  * @pages: list of pages
512  * @base: offset of first byte
513  * @len: length of data in bytes
514  *
515  */
516 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
517 		 unsigned int len)
518 {
519 	struct xdr_buf *buf = xdr->buf;
520 	struct kvec *iov = buf->tail;
521 	buf->pages = pages;
522 	buf->page_base = base;
523 	buf->page_len = len;
524 
525 	iov->iov_base = (char *)xdr->p;
526 	iov->iov_len  = 0;
527 	xdr->iov = iov;
528 
529 	if (len & 3) {
530 		unsigned int pad = 4 - (len & 3);
531 
532 		BUG_ON(xdr->p >= xdr->end);
533 		iov->iov_base = (char *)xdr->p + (len & 3);
534 		iov->iov_len  += pad;
535 		len += pad;
536 		*xdr->p++ = 0;
537 	}
538 	buf->buflen += len;
539 	buf->len += len;
540 }
541 EXPORT_SYMBOL(xdr_write_pages);
542 
543 /**
544  * xdr_init_decode - Initialize an xdr_stream for decoding data.
545  * @xdr: pointer to xdr_stream struct
546  * @buf: pointer to XDR buffer from which to decode data
547  * @p: current pointer inside XDR buffer
548  */
549 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
550 {
551 	struct kvec *iov = buf->head;
552 	unsigned int len = iov->iov_len;
553 
554 	if (len > buf->len)
555 		len = buf->len;
556 	xdr->buf = buf;
557 	xdr->iov = iov;
558 	xdr->p = p;
559 	xdr->end = (uint32_t *)((char *)iov->iov_base + len);
560 }
561 EXPORT_SYMBOL(xdr_init_decode);
562 
563 /**
564  * xdr_inline_decode - Retrieve non-page XDR data to decode
565  * @xdr: pointer to xdr_stream struct
566  * @nbytes: number of bytes of data to decode
567  *
568  * Check if the input buffer is long enough to enable us to decode
569  * 'nbytes' more bytes of data starting at the current position.
570  * If so return the current pointer, then update the current
571  * pointer position.
572  */
573 uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
574 {
575 	uint32_t *p = xdr->p;
576 	uint32_t *q = p + XDR_QUADLEN(nbytes);
577 
578 	if (unlikely(q > xdr->end || q < p))
579 		return NULL;
580 	xdr->p = q;
581 	return p;
582 }
583 EXPORT_SYMBOL(xdr_inline_decode);
584 
585 /**
586  * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
587  * @xdr: pointer to xdr_stream struct
588  * @len: number of bytes of page data
589  *
590  * Moves data beyond the current pointer position from the XDR head[] buffer
591  * into the page list. Any data that lies beyond current position + "len"
592  * bytes is moved into the XDR tail[]. The current pointer is then
593  * repositioned at the beginning of the XDR tail.
594  */
595 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
596 {
597 	struct xdr_buf *buf = xdr->buf;
598 	struct kvec *iov;
599 	ssize_t shift;
600 	unsigned int end;
601 	int padding;
602 
603 	/* Realign pages to current pointer position */
604 	iov  = buf->head;
605 	shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
606 	if (shift > 0)
607 		xdr_shrink_bufhead(buf, shift);
608 
609 	/* Truncate page data and move it into the tail */
610 	if (buf->page_len > len)
611 		xdr_shrink_pagelen(buf, buf->page_len - len);
612 	padding = (XDR_QUADLEN(len) << 2) - len;
613 	xdr->iov = iov = buf->tail;
614 	/* Compute remaining message length.  */
615 	end = iov->iov_len;
616 	shift = buf->buflen - buf->len;
617 	if (shift < end)
618 		end -= shift;
619 	else if (shift > 0)
620 		end = 0;
621 	/*
622 	 * Position current pointer at beginning of tail, and
623 	 * set remaining message length.
624 	 */
625 	xdr->p = (uint32_t *)((char *)iov->iov_base + padding);
626 	xdr->end = (uint32_t *)((char *)iov->iov_base + end);
627 }
628 EXPORT_SYMBOL(xdr_read_pages);
629 
630 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
631 
632 void
633 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
634 {
635 	buf->head[0] = *iov;
636 	buf->tail[0] = empty_iov;
637 	buf->page_len = 0;
638 	buf->buflen = buf->len = iov->iov_len;
639 }
640 
641 /* Sets subiov to the intersection of iov with the buffer of length len
642  * starting base bytes after iov.  Indicates empty intersection by setting
643  * length of subiov to zero.  Decrements len by length of subiov, sets base
644  * to zero (or decrements it by length of iov if subiov is empty). */
645 static void
646 iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
647 {
648 	if (*base > iov->iov_len) {
649 		subiov->iov_base = NULL;
650 		subiov->iov_len = 0;
651 		*base -= iov->iov_len;
652 	} else {
653 		subiov->iov_base = iov->iov_base + *base;
654 		subiov->iov_len = min(*len, (int)iov->iov_len - *base);
655 		*base = 0;
656 	}
657 	*len -= subiov->iov_len;
658 }
659 
660 /* Sets subbuf to the portion of buf of length len beginning base bytes
661  * from the start of buf. Returns -1 if base of length are out of bounds. */
662 int
663 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
664 			int base, int len)
665 {
666 	int i;
667 
668 	subbuf->buflen = subbuf->len = len;
669 	iov_subsegment(buf->head, subbuf->head, &base, &len);
670 
671 	if (base < buf->page_len) {
672 		i = (base + buf->page_base) >> PAGE_CACHE_SHIFT;
673 		subbuf->pages = &buf->pages[i];
674 		subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK;
675 		subbuf->page_len = min((int)buf->page_len - base, len);
676 		len -= subbuf->page_len;
677 		base = 0;
678 	} else {
679 		base -= buf->page_len;
680 		subbuf->page_len = 0;
681 	}
682 
683 	iov_subsegment(buf->tail, subbuf->tail, &base, &len);
684 	if (base || len)
685 		return -1;
686 	return 0;
687 }
688 
689 /* obj is assumed to point to allocated memory of size at least len: */
690 int
691 read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
692 {
693 	struct xdr_buf subbuf;
694 	int this_len;
695 	int status;
696 
697 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
698 	if (status)
699 		goto out;
700 	this_len = min(len, (int)subbuf.head[0].iov_len);
701 	memcpy(obj, subbuf.head[0].iov_base, this_len);
702 	len -= this_len;
703 	obj += this_len;
704 	this_len = min(len, (int)subbuf.page_len);
705 	if (this_len)
706 		_copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len);
707 	len -= this_len;
708 	obj += this_len;
709 	this_len = min(len, (int)subbuf.tail[0].iov_len);
710 	memcpy(obj, subbuf.tail[0].iov_base, this_len);
711 out:
712 	return status;
713 }
714 
715 /* obj is assumed to point to allocated memory of size at least len: */
716 int
717 write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
718 {
719 	struct xdr_buf subbuf;
720 	int this_len;
721 	int status;
722 
723 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
724 	if (status)
725 		goto out;
726 	this_len = min(len, (int)subbuf.head[0].iov_len);
727 	memcpy(subbuf.head[0].iov_base, obj, this_len);
728 	len -= this_len;
729 	obj += this_len;
730 	this_len = min(len, (int)subbuf.page_len);
731 	if (this_len)
732 		_copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len);
733 	len -= this_len;
734 	obj += this_len;
735 	this_len = min(len, (int)subbuf.tail[0].iov_len);
736 	memcpy(subbuf.tail[0].iov_base, obj, this_len);
737 out:
738 	return status;
739 }
740 
741 int
742 xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
743 {
744 	u32	raw;
745 	int	status;
746 
747 	status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
748 	if (status)
749 		return status;
750 	*obj = ntohl(raw);
751 	return 0;
752 }
753 
754 int
755 xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
756 {
757 	u32	raw = htonl(obj);
758 
759 	return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
760 }
761 
762 /* If the netobj starting offset bytes from the start of xdr_buf is contained
763  * entirely in the head or the tail, set object to point to it; otherwise
764  * try to find space for it at the end of the tail, copy it there, and
765  * set obj to point to it. */
766 int
767 xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
768 {
769 	u32	tail_offset = buf->head[0].iov_len + buf->page_len;
770 	u32	obj_end_offset;
771 
772 	if (xdr_decode_word(buf, offset, &obj->len))
773 		goto out;
774 	obj_end_offset = offset + 4 + obj->len;
775 
776 	if (obj_end_offset <= buf->head[0].iov_len) {
777 		/* The obj is contained entirely in the head: */
778 		obj->data = buf->head[0].iov_base + offset + 4;
779 	} else if (offset + 4 >= tail_offset) {
780 		if (obj_end_offset - tail_offset
781 				> buf->tail[0].iov_len)
782 			goto out;
783 		/* The obj is contained entirely in the tail: */
784 		obj->data = buf->tail[0].iov_base
785 			+ offset - tail_offset + 4;
786 	} else {
787 		/* use end of tail as storage for obj:
788 		 * (We don't copy to the beginning because then we'd have
789 		 * to worry about doing a potentially overlapping copy.
790 		 * This assumes the object is at most half the length of the
791 		 * tail.) */
792 		if (obj->len > buf->tail[0].iov_len)
793 			goto out;
794 		obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len -
795 				obj->len;
796 		if (read_bytes_from_xdr_buf(buf, offset + 4,
797 					obj->data, obj->len))
798 			goto out;
799 
800 	}
801 	return 0;
802 out:
803 	return -1;
804 }
805 
806 /* Returns 0 on success, or else a negative error code. */
807 static int
808 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
809 		 struct xdr_array2_desc *desc, int encode)
810 {
811 	char *elem = NULL, *c;
812 	unsigned int copied = 0, todo, avail_here;
813 	struct page **ppages = NULL;
814 	int err;
815 
816 	if (encode) {
817 		if (xdr_encode_word(buf, base, desc->array_len) != 0)
818 			return -EINVAL;
819 	} else {
820 		if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
821 		    desc->array_len > desc->array_maxlen ||
822 		    (unsigned long) base + 4 + desc->array_len *
823 				    desc->elem_size > buf->len)
824 			return -EINVAL;
825 	}
826 	base += 4;
827 
828 	if (!desc->xcode)
829 		return 0;
830 
831 	todo = desc->array_len * desc->elem_size;
832 
833 	/* process head */
834 	if (todo && base < buf->head->iov_len) {
835 		c = buf->head->iov_base + base;
836 		avail_here = min_t(unsigned int, todo,
837 				   buf->head->iov_len - base);
838 		todo -= avail_here;
839 
840 		while (avail_here >= desc->elem_size) {
841 			err = desc->xcode(desc, c);
842 			if (err)
843 				goto out;
844 			c += desc->elem_size;
845 			avail_here -= desc->elem_size;
846 		}
847 		if (avail_here) {
848 			if (!elem) {
849 				elem = kmalloc(desc->elem_size, GFP_KERNEL);
850 				err = -ENOMEM;
851 				if (!elem)
852 					goto out;
853 			}
854 			if (encode) {
855 				err = desc->xcode(desc, elem);
856 				if (err)
857 					goto out;
858 				memcpy(c, elem, avail_here);
859 			} else
860 				memcpy(elem, c, avail_here);
861 			copied = avail_here;
862 		}
863 		base = buf->head->iov_len;  /* align to start of pages */
864 	}
865 
866 	/* process pages array */
867 	base -= buf->head->iov_len;
868 	if (todo && base < buf->page_len) {
869 		unsigned int avail_page;
870 
871 		avail_here = min(todo, buf->page_len - base);
872 		todo -= avail_here;
873 
874 		base += buf->page_base;
875 		ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
876 		base &= ~PAGE_CACHE_MASK;
877 		avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
878 					avail_here);
879 		c = kmap(*ppages) + base;
880 
881 		while (avail_here) {
882 			avail_here -= avail_page;
883 			if (copied || avail_page < desc->elem_size) {
884 				unsigned int l = min(avail_page,
885 					desc->elem_size - copied);
886 				if (!elem) {
887 					elem = kmalloc(desc->elem_size,
888 						       GFP_KERNEL);
889 					err = -ENOMEM;
890 					if (!elem)
891 						goto out;
892 				}
893 				if (encode) {
894 					if (!copied) {
895 						err = desc->xcode(desc, elem);
896 						if (err)
897 							goto out;
898 					}
899 					memcpy(c, elem + copied, l);
900 					copied += l;
901 					if (copied == desc->elem_size)
902 						copied = 0;
903 				} else {
904 					memcpy(elem + copied, c, l);
905 					copied += l;
906 					if (copied == desc->elem_size) {
907 						err = desc->xcode(desc, elem);
908 						if (err)
909 							goto out;
910 						copied = 0;
911 					}
912 				}
913 				avail_page -= l;
914 				c += l;
915 			}
916 			while (avail_page >= desc->elem_size) {
917 				err = desc->xcode(desc, c);
918 				if (err)
919 					goto out;
920 				c += desc->elem_size;
921 				avail_page -= desc->elem_size;
922 			}
923 			if (avail_page) {
924 				unsigned int l = min(avail_page,
925 					    desc->elem_size - copied);
926 				if (!elem) {
927 					elem = kmalloc(desc->elem_size,
928 						       GFP_KERNEL);
929 					err = -ENOMEM;
930 					if (!elem)
931 						goto out;
932 				}
933 				if (encode) {
934 					if (!copied) {
935 						err = desc->xcode(desc, elem);
936 						if (err)
937 							goto out;
938 					}
939 					memcpy(c, elem + copied, l);
940 					copied += l;
941 					if (copied == desc->elem_size)
942 						copied = 0;
943 				} else {
944 					memcpy(elem + copied, c, l);
945 					copied += l;
946 					if (copied == desc->elem_size) {
947 						err = desc->xcode(desc, elem);
948 						if (err)
949 							goto out;
950 						copied = 0;
951 					}
952 				}
953 			}
954 			if (avail_here) {
955 				kunmap(*ppages);
956 				ppages++;
957 				c = kmap(*ppages);
958 			}
959 
960 			avail_page = min(avail_here,
961 				 (unsigned int) PAGE_CACHE_SIZE);
962 		}
963 		base = buf->page_len;  /* align to start of tail */
964 	}
965 
966 	/* process tail */
967 	base -= buf->page_len;
968 	if (todo) {
969 		c = buf->tail->iov_base + base;
970 		if (copied) {
971 			unsigned int l = desc->elem_size - copied;
972 
973 			if (encode)
974 				memcpy(c, elem + copied, l);
975 			else {
976 				memcpy(elem + copied, c, l);
977 				err = desc->xcode(desc, elem);
978 				if (err)
979 					goto out;
980 			}
981 			todo -= l;
982 			c += l;
983 		}
984 		while (todo) {
985 			err = desc->xcode(desc, c);
986 			if (err)
987 				goto out;
988 			c += desc->elem_size;
989 			todo -= desc->elem_size;
990 		}
991 	}
992 	err = 0;
993 
994 out:
995 	if (elem)
996 		kfree(elem);
997 	if (ppages)
998 		kunmap(*ppages);
999 	return err;
1000 }
1001 
1002 int
1003 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1004 		  struct xdr_array2_desc *desc)
1005 {
1006 	if (base >= buf->len)
1007 		return -EINVAL;
1008 
1009 	return xdr_xcode_array2(buf, base, desc, 0);
1010 }
1011 
1012 int
1013 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1014 		  struct xdr_array2_desc *desc)
1015 {
1016 	if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1017 	    buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1018 		return -EINVAL;
1019 
1020 	return xdr_xcode_array2(buf, base, desc, 1);
1021 }
1022