xref: /openbmc/linux/net/sunrpc/xdr.c (revision 643d1f7f)
1 /*
2  * linux/net/sunrpc/xdr.c
3  *
4  * Generic XDR support.
5  *
6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/sunrpc/xdr.h>
16 #include <linux/sunrpc/msg_prot.h>
17 
18 /*
19  * XDR functions for basic NFS types
20  */
21 __be32 *
22 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
23 {
24 	unsigned int	quadlen = XDR_QUADLEN(obj->len);
25 
26 	p[quadlen] = 0;		/* zero trailing bytes */
27 	*p++ = htonl(obj->len);
28 	memcpy(p, obj->data, obj->len);
29 	return p + XDR_QUADLEN(obj->len);
30 }
31 EXPORT_SYMBOL(xdr_encode_netobj);
32 
33 __be32 *
34 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
35 {
36 	unsigned int	len;
37 
38 	if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
39 		return NULL;
40 	obj->len  = len;
41 	obj->data = (u8 *) p;
42 	return p + XDR_QUADLEN(len);
43 }
44 EXPORT_SYMBOL(xdr_decode_netobj);
45 
46 /**
47  * xdr_encode_opaque_fixed - Encode fixed length opaque data
48  * @p: pointer to current position in XDR buffer.
49  * @ptr: pointer to data to encode (or NULL)
50  * @nbytes: size of data.
51  *
52  * Copy the array of data of length nbytes at ptr to the XDR buffer
53  * at position p, then align to the next 32-bit boundary by padding
54  * with zero bytes (see RFC1832).
55  * Note: if ptr is NULL, only the padding is performed.
56  *
57  * Returns the updated current XDR buffer position
58  *
59  */
60 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
61 {
62 	if (likely(nbytes != 0)) {
63 		unsigned int quadlen = XDR_QUADLEN(nbytes);
64 		unsigned int padding = (quadlen << 2) - nbytes;
65 
66 		if (ptr != NULL)
67 			memcpy(p, ptr, nbytes);
68 		if (padding != 0)
69 			memset((char *)p + nbytes, 0, padding);
70 		p += quadlen;
71 	}
72 	return p;
73 }
74 EXPORT_SYMBOL(xdr_encode_opaque_fixed);
75 
76 /**
77  * xdr_encode_opaque - Encode variable length opaque data
78  * @p: pointer to current position in XDR buffer.
79  * @ptr: pointer to data to encode (or NULL)
80  * @nbytes: size of data.
81  *
82  * Returns the updated current XDR buffer position
83  */
84 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
85 {
86 	*p++ = htonl(nbytes);
87 	return xdr_encode_opaque_fixed(p, ptr, nbytes);
88 }
89 EXPORT_SYMBOL(xdr_encode_opaque);
90 
91 __be32 *
92 xdr_encode_string(__be32 *p, const char *string)
93 {
94 	return xdr_encode_array(p, string, strlen(string));
95 }
96 EXPORT_SYMBOL(xdr_encode_string);
97 
98 __be32 *
99 xdr_decode_string_inplace(__be32 *p, char **sp,
100 			  unsigned int *lenp, unsigned int maxlen)
101 {
102 	u32 len;
103 
104 	len = ntohl(*p++);
105 	if (len > maxlen)
106 		return NULL;
107 	*lenp = len;
108 	*sp = (char *) p;
109 	return p + XDR_QUADLEN(len);
110 }
111 EXPORT_SYMBOL(xdr_decode_string_inplace);
112 
113 void
114 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
115 		 unsigned int len)
116 {
117 	struct kvec *tail = xdr->tail;
118 	u32 *p;
119 
120 	xdr->pages = pages;
121 	xdr->page_base = base;
122 	xdr->page_len = len;
123 
124 	p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
125 	tail->iov_base = p;
126 	tail->iov_len = 0;
127 
128 	if (len & 3) {
129 		unsigned int pad = 4 - (len & 3);
130 
131 		*p = 0;
132 		tail->iov_base = (char *)p + (len & 3);
133 		tail->iov_len  = pad;
134 		len += pad;
135 	}
136 	xdr->buflen += len;
137 	xdr->len += len;
138 }
139 EXPORT_SYMBOL(xdr_encode_pages);
140 
141 void
142 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
143 		 struct page **pages, unsigned int base, unsigned int len)
144 {
145 	struct kvec *head = xdr->head;
146 	struct kvec *tail = xdr->tail;
147 	char *buf = (char *)head->iov_base;
148 	unsigned int buflen = head->iov_len;
149 
150 	head->iov_len  = offset;
151 
152 	xdr->pages = pages;
153 	xdr->page_base = base;
154 	xdr->page_len = len;
155 
156 	tail->iov_base = buf + offset;
157 	tail->iov_len = buflen - offset;
158 
159 	xdr->buflen += len;
160 }
161 EXPORT_SYMBOL(xdr_inline_pages);
162 
163 /*
164  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
165  *
166  * _shift_data_right_pages
167  * @pages: vector of pages containing both the source and dest memory area.
168  * @pgto_base: page vector address of destination
169  * @pgfrom_base: page vector address of source
170  * @len: number of bytes to copy
171  *
172  * Note: the addresses pgto_base and pgfrom_base are both calculated in
173  *       the same way:
174  *            if a memory area starts at byte 'base' in page 'pages[i]',
175  *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
176  * Also note: pgfrom_base must be < pgto_base, but the memory areas
177  * 	they point to may overlap.
178  */
179 static void
180 _shift_data_right_pages(struct page **pages, size_t pgto_base,
181 		size_t pgfrom_base, size_t len)
182 {
183 	struct page **pgfrom, **pgto;
184 	char *vfrom, *vto;
185 	size_t copy;
186 
187 	BUG_ON(pgto_base <= pgfrom_base);
188 
189 	pgto_base += len;
190 	pgfrom_base += len;
191 
192 	pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
193 	pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
194 
195 	pgto_base &= ~PAGE_CACHE_MASK;
196 	pgfrom_base &= ~PAGE_CACHE_MASK;
197 
198 	do {
199 		/* Are any pointers crossing a page boundary? */
200 		if (pgto_base == 0) {
201 			pgto_base = PAGE_CACHE_SIZE;
202 			pgto--;
203 		}
204 		if (pgfrom_base == 0) {
205 			pgfrom_base = PAGE_CACHE_SIZE;
206 			pgfrom--;
207 		}
208 
209 		copy = len;
210 		if (copy > pgto_base)
211 			copy = pgto_base;
212 		if (copy > pgfrom_base)
213 			copy = pgfrom_base;
214 		pgto_base -= copy;
215 		pgfrom_base -= copy;
216 
217 		vto = kmap_atomic(*pgto, KM_USER0);
218 		vfrom = kmap_atomic(*pgfrom, KM_USER1);
219 		memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
220 		flush_dcache_page(*pgto);
221 		kunmap_atomic(vfrom, KM_USER1);
222 		kunmap_atomic(vto, KM_USER0);
223 
224 	} while ((len -= copy) != 0);
225 }
226 
227 /*
228  * _copy_to_pages
229  * @pages: array of pages
230  * @pgbase: page vector address of destination
231  * @p: pointer to source data
232  * @len: length
233  *
234  * Copies data from an arbitrary memory location into an array of pages
235  * The copy is assumed to be non-overlapping.
236  */
237 static void
238 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
239 {
240 	struct page **pgto;
241 	char *vto;
242 	size_t copy;
243 
244 	pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
245 	pgbase &= ~PAGE_CACHE_MASK;
246 
247 	do {
248 		copy = PAGE_CACHE_SIZE - pgbase;
249 		if (copy > len)
250 			copy = len;
251 
252 		vto = kmap_atomic(*pgto, KM_USER0);
253 		memcpy(vto + pgbase, p, copy);
254 		kunmap_atomic(vto, KM_USER0);
255 
256 		pgbase += copy;
257 		if (pgbase == PAGE_CACHE_SIZE) {
258 			flush_dcache_page(*pgto);
259 			pgbase = 0;
260 			pgto++;
261 		}
262 		p += copy;
263 
264 	} while ((len -= copy) != 0);
265 	flush_dcache_page(*pgto);
266 }
267 
268 /*
269  * _copy_from_pages
270  * @p: pointer to destination
271  * @pages: array of pages
272  * @pgbase: offset of source data
273  * @len: length
274  *
275  * Copies data into an arbitrary memory location from an array of pages
276  * The copy is assumed to be non-overlapping.
277  */
278 static void
279 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
280 {
281 	struct page **pgfrom;
282 	char *vfrom;
283 	size_t copy;
284 
285 	pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
286 	pgbase &= ~PAGE_CACHE_MASK;
287 
288 	do {
289 		copy = PAGE_CACHE_SIZE - pgbase;
290 		if (copy > len)
291 			copy = len;
292 
293 		vfrom = kmap_atomic(*pgfrom, KM_USER0);
294 		memcpy(p, vfrom + pgbase, copy);
295 		kunmap_atomic(vfrom, KM_USER0);
296 
297 		pgbase += copy;
298 		if (pgbase == PAGE_CACHE_SIZE) {
299 			pgbase = 0;
300 			pgfrom++;
301 		}
302 		p += copy;
303 
304 	} while ((len -= copy) != 0);
305 }
306 
307 /*
308  * xdr_shrink_bufhead
309  * @buf: xdr_buf
310  * @len: bytes to remove from buf->head[0]
311  *
312  * Shrinks XDR buffer's header kvec buf->head[0] by
313  * 'len' bytes. The extra data is not lost, but is instead
314  * moved into the inlined pages and/or the tail.
315  */
316 static void
317 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
318 {
319 	struct kvec *head, *tail;
320 	size_t copy, offs;
321 	unsigned int pglen = buf->page_len;
322 
323 	tail = buf->tail;
324 	head = buf->head;
325 	BUG_ON (len > head->iov_len);
326 
327 	/* Shift the tail first */
328 	if (tail->iov_len != 0) {
329 		if (tail->iov_len > len) {
330 			copy = tail->iov_len - len;
331 			memmove((char *)tail->iov_base + len,
332 					tail->iov_base, copy);
333 		}
334 		/* Copy from the inlined pages into the tail */
335 		copy = len;
336 		if (copy > pglen)
337 			copy = pglen;
338 		offs = len - copy;
339 		if (offs >= tail->iov_len)
340 			copy = 0;
341 		else if (copy > tail->iov_len - offs)
342 			copy = tail->iov_len - offs;
343 		if (copy != 0)
344 			_copy_from_pages((char *)tail->iov_base + offs,
345 					buf->pages,
346 					buf->page_base + pglen + offs - len,
347 					copy);
348 		/* Do we also need to copy data from the head into the tail ? */
349 		if (len > pglen) {
350 			offs = copy = len - pglen;
351 			if (copy > tail->iov_len)
352 				copy = tail->iov_len;
353 			memcpy(tail->iov_base,
354 					(char *)head->iov_base +
355 					head->iov_len - offs,
356 					copy);
357 		}
358 	}
359 	/* Now handle pages */
360 	if (pglen != 0) {
361 		if (pglen > len)
362 			_shift_data_right_pages(buf->pages,
363 					buf->page_base + len,
364 					buf->page_base,
365 					pglen - len);
366 		copy = len;
367 		if (len > pglen)
368 			copy = pglen;
369 		_copy_to_pages(buf->pages, buf->page_base,
370 				(char *)head->iov_base + head->iov_len - len,
371 				copy);
372 	}
373 	head->iov_len -= len;
374 	buf->buflen -= len;
375 	/* Have we truncated the message? */
376 	if (buf->len > buf->buflen)
377 		buf->len = buf->buflen;
378 }
379 
380 /*
381  * xdr_shrink_pagelen
382  * @buf: xdr_buf
383  * @len: bytes to remove from buf->pages
384  *
385  * Shrinks XDR buffer's page array buf->pages by
386  * 'len' bytes. The extra data is not lost, but is instead
387  * moved into the tail.
388  */
389 static void
390 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
391 {
392 	struct kvec *tail;
393 	size_t copy;
394 	char *p;
395 	unsigned int pglen = buf->page_len;
396 
397 	tail = buf->tail;
398 	BUG_ON (len > pglen);
399 
400 	/* Shift the tail first */
401 	if (tail->iov_len != 0) {
402 		p = (char *)tail->iov_base + len;
403 		if (tail->iov_len > len) {
404 			copy = tail->iov_len - len;
405 			memmove(p, tail->iov_base, copy);
406 		} else
407 			buf->buflen -= len;
408 		/* Copy from the inlined pages into the tail */
409 		copy = len;
410 		if (copy > tail->iov_len)
411 			copy = tail->iov_len;
412 		_copy_from_pages((char *)tail->iov_base,
413 				buf->pages, buf->page_base + pglen - len,
414 				copy);
415 	}
416 	buf->page_len -= len;
417 	buf->buflen -= len;
418 	/* Have we truncated the message? */
419 	if (buf->len > buf->buflen)
420 		buf->len = buf->buflen;
421 }
422 
423 void
424 xdr_shift_buf(struct xdr_buf *buf, size_t len)
425 {
426 	xdr_shrink_bufhead(buf, len);
427 }
428 EXPORT_SYMBOL(xdr_shift_buf);
429 
430 /**
431  * xdr_init_encode - Initialize a struct xdr_stream for sending data.
432  * @xdr: pointer to xdr_stream struct
433  * @buf: pointer to XDR buffer in which to encode data
434  * @p: current pointer inside XDR buffer
435  *
436  * Note: at the moment the RPC client only passes the length of our
437  *	 scratch buffer in the xdr_buf's header kvec. Previously this
438  *	 meant we needed to call xdr_adjust_iovec() after encoding the
439  *	 data. With the new scheme, the xdr_stream manages the details
440  *	 of the buffer length, and takes care of adjusting the kvec
441  *	 length for us.
442  */
443 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
444 {
445 	struct kvec *iov = buf->head;
446 	int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
447 
448 	BUG_ON(scratch_len < 0);
449 	xdr->buf = buf;
450 	xdr->iov = iov;
451 	xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
452 	xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
453 	BUG_ON(iov->iov_len > scratch_len);
454 
455 	if (p != xdr->p && p != NULL) {
456 		size_t len;
457 
458 		BUG_ON(p < xdr->p || p > xdr->end);
459 		len = (char *)p - (char *)xdr->p;
460 		xdr->p = p;
461 		buf->len += len;
462 		iov->iov_len += len;
463 	}
464 }
465 EXPORT_SYMBOL(xdr_init_encode);
466 
467 /**
468  * xdr_reserve_space - Reserve buffer space for sending
469  * @xdr: pointer to xdr_stream
470  * @nbytes: number of bytes to reserve
471  *
472  * Checks that we have enough buffer space to encode 'nbytes' more
473  * bytes of data. If so, update the total xdr_buf length, and
474  * adjust the length of the current kvec.
475  */
476 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
477 {
478 	__be32 *p = xdr->p;
479 	__be32 *q;
480 
481 	/* align nbytes on the next 32-bit boundary */
482 	nbytes += 3;
483 	nbytes &= ~3;
484 	q = p + (nbytes >> 2);
485 	if (unlikely(q > xdr->end || q < p))
486 		return NULL;
487 	xdr->p = q;
488 	xdr->iov->iov_len += nbytes;
489 	xdr->buf->len += nbytes;
490 	return p;
491 }
492 EXPORT_SYMBOL(xdr_reserve_space);
493 
494 /**
495  * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
496  * @xdr: pointer to xdr_stream
497  * @pages: list of pages
498  * @base: offset of first byte
499  * @len: length of data in bytes
500  *
501  */
502 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
503 		 unsigned int len)
504 {
505 	struct xdr_buf *buf = xdr->buf;
506 	struct kvec *iov = buf->tail;
507 	buf->pages = pages;
508 	buf->page_base = base;
509 	buf->page_len = len;
510 
511 	iov->iov_base = (char *)xdr->p;
512 	iov->iov_len  = 0;
513 	xdr->iov = iov;
514 
515 	if (len & 3) {
516 		unsigned int pad = 4 - (len & 3);
517 
518 		BUG_ON(xdr->p >= xdr->end);
519 		iov->iov_base = (char *)xdr->p + (len & 3);
520 		iov->iov_len  += pad;
521 		len += pad;
522 		*xdr->p++ = 0;
523 	}
524 	buf->buflen += len;
525 	buf->len += len;
526 }
527 EXPORT_SYMBOL(xdr_write_pages);
528 
529 /**
530  * xdr_init_decode - Initialize an xdr_stream for decoding data.
531  * @xdr: pointer to xdr_stream struct
532  * @buf: pointer to XDR buffer from which to decode data
533  * @p: current pointer inside XDR buffer
534  */
535 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
536 {
537 	struct kvec *iov = buf->head;
538 	unsigned int len = iov->iov_len;
539 
540 	if (len > buf->len)
541 		len = buf->len;
542 	xdr->buf = buf;
543 	xdr->iov = iov;
544 	xdr->p = p;
545 	xdr->end = (__be32 *)((char *)iov->iov_base + len);
546 }
547 EXPORT_SYMBOL(xdr_init_decode);
548 
549 /**
550  * xdr_inline_decode - Retrieve non-page XDR data to decode
551  * @xdr: pointer to xdr_stream struct
552  * @nbytes: number of bytes of data to decode
553  *
554  * Check if the input buffer is long enough to enable us to decode
555  * 'nbytes' more bytes of data starting at the current position.
556  * If so return the current pointer, then update the current
557  * pointer position.
558  */
559 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
560 {
561 	__be32 *p = xdr->p;
562 	__be32 *q = p + XDR_QUADLEN(nbytes);
563 
564 	if (unlikely(q > xdr->end || q < p))
565 		return NULL;
566 	xdr->p = q;
567 	return p;
568 }
569 EXPORT_SYMBOL(xdr_inline_decode);
570 
571 /**
572  * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
573  * @xdr: pointer to xdr_stream struct
574  * @len: number of bytes of page data
575  *
576  * Moves data beyond the current pointer position from the XDR head[] buffer
577  * into the page list. Any data that lies beyond current position + "len"
578  * bytes is moved into the XDR tail[].
579  */
580 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
581 {
582 	struct xdr_buf *buf = xdr->buf;
583 	struct kvec *iov;
584 	ssize_t shift;
585 	unsigned int end;
586 	int padding;
587 
588 	/* Realign pages to current pointer position */
589 	iov  = buf->head;
590 	shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
591 	if (shift > 0)
592 		xdr_shrink_bufhead(buf, shift);
593 
594 	/* Truncate page data and move it into the tail */
595 	if (buf->page_len > len)
596 		xdr_shrink_pagelen(buf, buf->page_len - len);
597 	padding = (XDR_QUADLEN(len) << 2) - len;
598 	xdr->iov = iov = buf->tail;
599 	/* Compute remaining message length.  */
600 	end = iov->iov_len;
601 	shift = buf->buflen - buf->len;
602 	if (shift < end)
603 		end -= shift;
604 	else if (shift > 0)
605 		end = 0;
606 	/*
607 	 * Position current pointer at beginning of tail, and
608 	 * set remaining message length.
609 	 */
610 	xdr->p = (__be32 *)((char *)iov->iov_base + padding);
611 	xdr->end = (__be32 *)((char *)iov->iov_base + end);
612 }
613 EXPORT_SYMBOL(xdr_read_pages);
614 
615 /**
616  * xdr_enter_page - decode data from the XDR page
617  * @xdr: pointer to xdr_stream struct
618  * @len: number of bytes of page data
619  *
620  * Moves data beyond the current pointer position from the XDR head[] buffer
621  * into the page list. Any data that lies beyond current position + "len"
622  * bytes is moved into the XDR tail[]. The current pointer is then
623  * repositioned at the beginning of the first XDR page.
624  */
625 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
626 {
627 	char * kaddr = page_address(xdr->buf->pages[0]);
628 	xdr_read_pages(xdr, len);
629 	/*
630 	 * Position current pointer at beginning of tail, and
631 	 * set remaining message length.
632 	 */
633 	if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
634 		len = PAGE_CACHE_SIZE - xdr->buf->page_base;
635 	xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
636 	xdr->end = (__be32 *)((char *)xdr->p + len);
637 }
638 EXPORT_SYMBOL(xdr_enter_page);
639 
640 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
641 
642 void
643 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
644 {
645 	buf->head[0] = *iov;
646 	buf->tail[0] = empty_iov;
647 	buf->page_len = 0;
648 	buf->buflen = buf->len = iov->iov_len;
649 }
650 EXPORT_SYMBOL(xdr_buf_from_iov);
651 
652 /* Sets subbuf to the portion of buf of length len beginning base bytes
653  * from the start of buf. Returns -1 if base of length are out of bounds. */
654 int
655 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
656 			unsigned int base, unsigned int len)
657 {
658 	subbuf->buflen = subbuf->len = len;
659 	if (base < buf->head[0].iov_len) {
660 		subbuf->head[0].iov_base = buf->head[0].iov_base + base;
661 		subbuf->head[0].iov_len = min_t(unsigned int, len,
662 						buf->head[0].iov_len - base);
663 		len -= subbuf->head[0].iov_len;
664 		base = 0;
665 	} else {
666 		subbuf->head[0].iov_base = NULL;
667 		subbuf->head[0].iov_len = 0;
668 		base -= buf->head[0].iov_len;
669 	}
670 
671 	if (base < buf->page_len) {
672 		subbuf->page_len = min(buf->page_len - base, len);
673 		base += buf->page_base;
674 		subbuf->page_base = base & ~PAGE_CACHE_MASK;
675 		subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
676 		len -= subbuf->page_len;
677 		base = 0;
678 	} else {
679 		base -= buf->page_len;
680 		subbuf->page_len = 0;
681 	}
682 
683 	if (base < buf->tail[0].iov_len) {
684 		subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
685 		subbuf->tail[0].iov_len = min_t(unsigned int, len,
686 						buf->tail[0].iov_len - base);
687 		len -= subbuf->tail[0].iov_len;
688 		base = 0;
689 	} else {
690 		subbuf->tail[0].iov_base = NULL;
691 		subbuf->tail[0].iov_len = 0;
692 		base -= buf->tail[0].iov_len;
693 	}
694 
695 	if (base || len)
696 		return -1;
697 	return 0;
698 }
699 EXPORT_SYMBOL(xdr_buf_subsegment);
700 
701 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
702 {
703 	unsigned int this_len;
704 
705 	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
706 	memcpy(obj, subbuf->head[0].iov_base, this_len);
707 	len -= this_len;
708 	obj += this_len;
709 	this_len = min_t(unsigned int, len, subbuf->page_len);
710 	if (this_len)
711 		_copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
712 	len -= this_len;
713 	obj += this_len;
714 	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
715 	memcpy(obj, subbuf->tail[0].iov_base, this_len);
716 }
717 
718 /* obj is assumed to point to allocated memory of size at least len: */
719 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
720 {
721 	struct xdr_buf subbuf;
722 	int status;
723 
724 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
725 	if (status != 0)
726 		return status;
727 	__read_bytes_from_xdr_buf(&subbuf, obj, len);
728 	return 0;
729 }
730 EXPORT_SYMBOL(read_bytes_from_xdr_buf);
731 
732 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
733 {
734 	unsigned int this_len;
735 
736 	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
737 	memcpy(subbuf->head[0].iov_base, obj, this_len);
738 	len -= this_len;
739 	obj += this_len;
740 	this_len = min_t(unsigned int, len, subbuf->page_len);
741 	if (this_len)
742 		_copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
743 	len -= this_len;
744 	obj += this_len;
745 	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
746 	memcpy(subbuf->tail[0].iov_base, obj, this_len);
747 }
748 
749 /* obj is assumed to point to allocated memory of size at least len: */
750 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
751 {
752 	struct xdr_buf subbuf;
753 	int status;
754 
755 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
756 	if (status != 0)
757 		return status;
758 	__write_bytes_to_xdr_buf(&subbuf, obj, len);
759 	return 0;
760 }
761 
762 int
763 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
764 {
765 	__be32	raw;
766 	int	status;
767 
768 	status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
769 	if (status)
770 		return status;
771 	*obj = ntohl(raw);
772 	return 0;
773 }
774 EXPORT_SYMBOL(xdr_decode_word);
775 
776 int
777 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
778 {
779 	__be32	raw = htonl(obj);
780 
781 	return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
782 }
783 EXPORT_SYMBOL(xdr_encode_word);
784 
785 /* If the netobj starting offset bytes from the start of xdr_buf is contained
786  * entirely in the head or the tail, set object to point to it; otherwise
787  * try to find space for it at the end of the tail, copy it there, and
788  * set obj to point to it. */
789 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
790 {
791 	struct xdr_buf subbuf;
792 
793 	if (xdr_decode_word(buf, offset, &obj->len))
794 		return -EFAULT;
795 	if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
796 		return -EFAULT;
797 
798 	/* Is the obj contained entirely in the head? */
799 	obj->data = subbuf.head[0].iov_base;
800 	if (subbuf.head[0].iov_len == obj->len)
801 		return 0;
802 	/* ..or is the obj contained entirely in the tail? */
803 	obj->data = subbuf.tail[0].iov_base;
804 	if (subbuf.tail[0].iov_len == obj->len)
805 		return 0;
806 
807 	/* use end of tail as storage for obj:
808 	 * (We don't copy to the beginning because then we'd have
809 	 * to worry about doing a potentially overlapping copy.
810 	 * This assumes the object is at most half the length of the
811 	 * tail.) */
812 	if (obj->len > buf->buflen - buf->len)
813 		return -ENOMEM;
814 	if (buf->tail[0].iov_len != 0)
815 		obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
816 	else
817 		obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
818 	__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
819 	return 0;
820 }
821 EXPORT_SYMBOL(xdr_buf_read_netobj);
822 
823 /* Returns 0 on success, or else a negative error code. */
824 static int
825 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
826 		 struct xdr_array2_desc *desc, int encode)
827 {
828 	char *elem = NULL, *c;
829 	unsigned int copied = 0, todo, avail_here;
830 	struct page **ppages = NULL;
831 	int err;
832 
833 	if (encode) {
834 		if (xdr_encode_word(buf, base, desc->array_len) != 0)
835 			return -EINVAL;
836 	} else {
837 		if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
838 		    desc->array_len > desc->array_maxlen ||
839 		    (unsigned long) base + 4 + desc->array_len *
840 				    desc->elem_size > buf->len)
841 			return -EINVAL;
842 	}
843 	base += 4;
844 
845 	if (!desc->xcode)
846 		return 0;
847 
848 	todo = desc->array_len * desc->elem_size;
849 
850 	/* process head */
851 	if (todo && base < buf->head->iov_len) {
852 		c = buf->head->iov_base + base;
853 		avail_here = min_t(unsigned int, todo,
854 				   buf->head->iov_len - base);
855 		todo -= avail_here;
856 
857 		while (avail_here >= desc->elem_size) {
858 			err = desc->xcode(desc, c);
859 			if (err)
860 				goto out;
861 			c += desc->elem_size;
862 			avail_here -= desc->elem_size;
863 		}
864 		if (avail_here) {
865 			if (!elem) {
866 				elem = kmalloc(desc->elem_size, GFP_KERNEL);
867 				err = -ENOMEM;
868 				if (!elem)
869 					goto out;
870 			}
871 			if (encode) {
872 				err = desc->xcode(desc, elem);
873 				if (err)
874 					goto out;
875 				memcpy(c, elem, avail_here);
876 			} else
877 				memcpy(elem, c, avail_here);
878 			copied = avail_here;
879 		}
880 		base = buf->head->iov_len;  /* align to start of pages */
881 	}
882 
883 	/* process pages array */
884 	base -= buf->head->iov_len;
885 	if (todo && base < buf->page_len) {
886 		unsigned int avail_page;
887 
888 		avail_here = min(todo, buf->page_len - base);
889 		todo -= avail_here;
890 
891 		base += buf->page_base;
892 		ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
893 		base &= ~PAGE_CACHE_MASK;
894 		avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
895 					avail_here);
896 		c = kmap(*ppages) + base;
897 
898 		while (avail_here) {
899 			avail_here -= avail_page;
900 			if (copied || avail_page < desc->elem_size) {
901 				unsigned int l = min(avail_page,
902 					desc->elem_size - copied);
903 				if (!elem) {
904 					elem = kmalloc(desc->elem_size,
905 						       GFP_KERNEL);
906 					err = -ENOMEM;
907 					if (!elem)
908 						goto out;
909 				}
910 				if (encode) {
911 					if (!copied) {
912 						err = desc->xcode(desc, elem);
913 						if (err)
914 							goto out;
915 					}
916 					memcpy(c, elem + copied, l);
917 					copied += l;
918 					if (copied == desc->elem_size)
919 						copied = 0;
920 				} else {
921 					memcpy(elem + copied, c, l);
922 					copied += l;
923 					if (copied == desc->elem_size) {
924 						err = desc->xcode(desc, elem);
925 						if (err)
926 							goto out;
927 						copied = 0;
928 					}
929 				}
930 				avail_page -= l;
931 				c += l;
932 			}
933 			while (avail_page >= desc->elem_size) {
934 				err = desc->xcode(desc, c);
935 				if (err)
936 					goto out;
937 				c += desc->elem_size;
938 				avail_page -= desc->elem_size;
939 			}
940 			if (avail_page) {
941 				unsigned int l = min(avail_page,
942 					    desc->elem_size - copied);
943 				if (!elem) {
944 					elem = kmalloc(desc->elem_size,
945 						       GFP_KERNEL);
946 					err = -ENOMEM;
947 					if (!elem)
948 						goto out;
949 				}
950 				if (encode) {
951 					if (!copied) {
952 						err = desc->xcode(desc, elem);
953 						if (err)
954 							goto out;
955 					}
956 					memcpy(c, elem + copied, l);
957 					copied += l;
958 					if (copied == desc->elem_size)
959 						copied = 0;
960 				} else {
961 					memcpy(elem + copied, c, l);
962 					copied += l;
963 					if (copied == desc->elem_size) {
964 						err = desc->xcode(desc, elem);
965 						if (err)
966 							goto out;
967 						copied = 0;
968 					}
969 				}
970 			}
971 			if (avail_here) {
972 				kunmap(*ppages);
973 				ppages++;
974 				c = kmap(*ppages);
975 			}
976 
977 			avail_page = min(avail_here,
978 				 (unsigned int) PAGE_CACHE_SIZE);
979 		}
980 		base = buf->page_len;  /* align to start of tail */
981 	}
982 
983 	/* process tail */
984 	base -= buf->page_len;
985 	if (todo) {
986 		c = buf->tail->iov_base + base;
987 		if (copied) {
988 			unsigned int l = desc->elem_size - copied;
989 
990 			if (encode)
991 				memcpy(c, elem + copied, l);
992 			else {
993 				memcpy(elem + copied, c, l);
994 				err = desc->xcode(desc, elem);
995 				if (err)
996 					goto out;
997 			}
998 			todo -= l;
999 			c += l;
1000 		}
1001 		while (todo) {
1002 			err = desc->xcode(desc, c);
1003 			if (err)
1004 				goto out;
1005 			c += desc->elem_size;
1006 			todo -= desc->elem_size;
1007 		}
1008 	}
1009 	err = 0;
1010 
1011 out:
1012 	kfree(elem);
1013 	if (ppages)
1014 		kunmap(*ppages);
1015 	return err;
1016 }
1017 
1018 int
1019 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1020 		  struct xdr_array2_desc *desc)
1021 {
1022 	if (base >= buf->len)
1023 		return -EINVAL;
1024 
1025 	return xdr_xcode_array2(buf, base, desc, 0);
1026 }
1027 EXPORT_SYMBOL(xdr_decode_array2);
1028 
1029 int
1030 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1031 		  struct xdr_array2_desc *desc)
1032 {
1033 	if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1034 	    buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1035 		return -EINVAL;
1036 
1037 	return xdr_xcode_array2(buf, base, desc, 1);
1038 }
1039 EXPORT_SYMBOL(xdr_encode_array2);
1040 
1041 int
1042 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1043 		int (*actor)(struct scatterlist *, void *), void *data)
1044 {
1045 	int i, ret = 0;
1046 	unsigned page_len, thislen, page_offset;
1047 	struct scatterlist      sg[1];
1048 
1049 	sg_init_table(sg, 1);
1050 
1051 	if (offset >= buf->head[0].iov_len) {
1052 		offset -= buf->head[0].iov_len;
1053 	} else {
1054 		thislen = buf->head[0].iov_len - offset;
1055 		if (thislen > len)
1056 			thislen = len;
1057 		sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1058 		ret = actor(sg, data);
1059 		if (ret)
1060 			goto out;
1061 		offset = 0;
1062 		len -= thislen;
1063 	}
1064 	if (len == 0)
1065 		goto out;
1066 
1067 	if (offset >= buf->page_len) {
1068 		offset -= buf->page_len;
1069 	} else {
1070 		page_len = buf->page_len - offset;
1071 		if (page_len > len)
1072 			page_len = len;
1073 		len -= page_len;
1074 		page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1075 		i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1076 		thislen = PAGE_CACHE_SIZE - page_offset;
1077 		do {
1078 			if (thislen > page_len)
1079 				thislen = page_len;
1080 			sg_set_page(sg, buf->pages[i], thislen, page_offset);
1081 			ret = actor(sg, data);
1082 			if (ret)
1083 				goto out;
1084 			page_len -= thislen;
1085 			i++;
1086 			page_offset = 0;
1087 			thislen = PAGE_CACHE_SIZE;
1088 		} while (page_len != 0);
1089 		offset = 0;
1090 	}
1091 	if (len == 0)
1092 		goto out;
1093 	if (offset < buf->tail[0].iov_len) {
1094 		thislen = buf->tail[0].iov_len - offset;
1095 		if (thislen > len)
1096 			thislen = len;
1097 		sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1098 		ret = actor(sg, data);
1099 		len -= thislen;
1100 	}
1101 	if (len != 0)
1102 		ret = -EINVAL;
1103 out:
1104 	return ret;
1105 }
1106 EXPORT_SYMBOL(xdr_process_buf);
1107 
1108