1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/xdr.c 4 * 5 * Generic XDR support. 6 * 7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/string.h> 14 #include <linux/kernel.h> 15 #include <linux/pagemap.h> 16 #include <linux/errno.h> 17 #include <linux/sunrpc/xdr.h> 18 #include <linux/sunrpc/msg_prot.h> 19 #include <linux/bvec.h> 20 #include <trace/events/sunrpc.h> 21 22 static void _copy_to_pages(struct page **, size_t, const char *, size_t); 23 24 25 /* 26 * XDR functions for basic NFS types 27 */ 28 __be32 * 29 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) 30 { 31 unsigned int quadlen = XDR_QUADLEN(obj->len); 32 33 p[quadlen] = 0; /* zero trailing bytes */ 34 *p++ = cpu_to_be32(obj->len); 35 memcpy(p, obj->data, obj->len); 36 return p + XDR_QUADLEN(obj->len); 37 } 38 EXPORT_SYMBOL_GPL(xdr_encode_netobj); 39 40 __be32 * 41 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) 42 { 43 unsigned int len; 44 45 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) 46 return NULL; 47 obj->len = len; 48 obj->data = (u8 *) p; 49 return p + XDR_QUADLEN(len); 50 } 51 EXPORT_SYMBOL_GPL(xdr_decode_netobj); 52 53 /** 54 * xdr_encode_opaque_fixed - Encode fixed length opaque data 55 * @p: pointer to current position in XDR buffer. 56 * @ptr: pointer to data to encode (or NULL) 57 * @nbytes: size of data. 58 * 59 * Copy the array of data of length nbytes at ptr to the XDR buffer 60 * at position p, then align to the next 32-bit boundary by padding 61 * with zero bytes (see RFC1832). 62 * Note: if ptr is NULL, only the padding is performed. 63 * 64 * Returns the updated current XDR buffer position 65 * 66 */ 67 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) 68 { 69 if (likely(nbytes != 0)) { 70 unsigned int quadlen = XDR_QUADLEN(nbytes); 71 unsigned int padding = (quadlen << 2) - nbytes; 72 73 if (ptr != NULL) 74 memcpy(p, ptr, nbytes); 75 if (padding != 0) 76 memset((char *)p + nbytes, 0, padding); 77 p += quadlen; 78 } 79 return p; 80 } 81 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); 82 83 /** 84 * xdr_encode_opaque - Encode variable length opaque data 85 * @p: pointer to current position in XDR buffer. 86 * @ptr: pointer to data to encode (or NULL) 87 * @nbytes: size of data. 88 * 89 * Returns the updated current XDR buffer position 90 */ 91 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) 92 { 93 *p++ = cpu_to_be32(nbytes); 94 return xdr_encode_opaque_fixed(p, ptr, nbytes); 95 } 96 EXPORT_SYMBOL_GPL(xdr_encode_opaque); 97 98 __be32 * 99 xdr_encode_string(__be32 *p, const char *string) 100 { 101 return xdr_encode_array(p, string, strlen(string)); 102 } 103 EXPORT_SYMBOL_GPL(xdr_encode_string); 104 105 __be32 * 106 xdr_decode_string_inplace(__be32 *p, char **sp, 107 unsigned int *lenp, unsigned int maxlen) 108 { 109 u32 len; 110 111 len = be32_to_cpu(*p++); 112 if (len > maxlen) 113 return NULL; 114 *lenp = len; 115 *sp = (char *) p; 116 return p + XDR_QUADLEN(len); 117 } 118 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); 119 120 /** 121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf 122 * @buf: XDR buffer where string resides 123 * @len: length of string, in bytes 124 * 125 */ 126 void 127 xdr_terminate_string(struct xdr_buf *buf, const u32 len) 128 { 129 char *kaddr; 130 131 kaddr = kmap_atomic(buf->pages[0]); 132 kaddr[buf->page_base + len] = '\0'; 133 kunmap_atomic(kaddr); 134 } 135 EXPORT_SYMBOL_GPL(xdr_terminate_string); 136 137 size_t 138 xdr_buf_pagecount(struct xdr_buf *buf) 139 { 140 if (!buf->page_len) 141 return 0; 142 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 143 } 144 145 int 146 xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp) 147 { 148 size_t i, n = xdr_buf_pagecount(buf); 149 150 if (n != 0 && buf->bvec == NULL) { 151 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp); 152 if (!buf->bvec) 153 return -ENOMEM; 154 for (i = 0; i < n; i++) { 155 buf->bvec[i].bv_page = buf->pages[i]; 156 buf->bvec[i].bv_len = PAGE_SIZE; 157 buf->bvec[i].bv_offset = 0; 158 } 159 } 160 return 0; 161 } 162 163 void 164 xdr_free_bvec(struct xdr_buf *buf) 165 { 166 kfree(buf->bvec); 167 buf->bvec = NULL; 168 } 169 170 /** 171 * xdr_inline_pages - Prepare receive buffer for a large reply 172 * @xdr: xdr_buf into which reply will be placed 173 * @offset: expected offset where data payload will start, in bytes 174 * @pages: vector of struct page pointers 175 * @base: offset in first page where receive should start, in bytes 176 * @len: expected size of the upper layer data payload, in bytes 177 * 178 */ 179 void 180 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 181 struct page **pages, unsigned int base, unsigned int len) 182 { 183 struct kvec *head = xdr->head; 184 struct kvec *tail = xdr->tail; 185 char *buf = (char *)head->iov_base; 186 unsigned int buflen = head->iov_len; 187 188 head->iov_len = offset; 189 190 xdr->pages = pages; 191 xdr->page_base = base; 192 xdr->page_len = len; 193 194 tail->iov_base = buf + offset; 195 tail->iov_len = buflen - offset; 196 if ((xdr->page_len & 3) == 0) 197 tail->iov_len -= sizeof(__be32); 198 199 xdr->buflen += len; 200 } 201 EXPORT_SYMBOL_GPL(xdr_inline_pages); 202 203 /* 204 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 205 */ 206 207 /** 208 * _shift_data_left_pages 209 * @pages: vector of pages containing both the source and dest memory area. 210 * @pgto_base: page vector address of destination 211 * @pgfrom_base: page vector address of source 212 * @len: number of bytes to copy 213 * 214 * Note: the addresses pgto_base and pgfrom_base are both calculated in 215 * the same way: 216 * if a memory area starts at byte 'base' in page 'pages[i]', 217 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 218 * Alse note: pgto_base must be < pgfrom_base, but the memory areas 219 * they point to may overlap. 220 */ 221 static void 222 _shift_data_left_pages(struct page **pages, size_t pgto_base, 223 size_t pgfrom_base, size_t len) 224 { 225 struct page **pgfrom, **pgto; 226 char *vfrom, *vto; 227 size_t copy; 228 229 BUG_ON(pgfrom_base <= pgto_base); 230 231 pgto = pages + (pgto_base >> PAGE_SHIFT); 232 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); 233 234 pgto_base &= ~PAGE_MASK; 235 pgfrom_base &= ~PAGE_MASK; 236 237 do { 238 if (pgto_base >= PAGE_SIZE) { 239 pgto_base = 0; 240 pgto++; 241 } 242 if (pgfrom_base >= PAGE_SIZE){ 243 pgfrom_base = 0; 244 pgfrom++; 245 } 246 247 copy = len; 248 if (copy > (PAGE_SIZE - pgto_base)) 249 copy = PAGE_SIZE - pgto_base; 250 if (copy > (PAGE_SIZE - pgfrom_base)) 251 copy = PAGE_SIZE - pgfrom_base; 252 253 vto = kmap_atomic(*pgto); 254 if (*pgto != *pgfrom) { 255 vfrom = kmap_atomic(*pgfrom); 256 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); 257 kunmap_atomic(vfrom); 258 } else 259 memmove(vto + pgto_base, vto + pgfrom_base, copy); 260 flush_dcache_page(*pgto); 261 kunmap_atomic(vto); 262 263 pgto_base += copy; 264 pgfrom_base += copy; 265 266 } while ((len -= copy) != 0); 267 } 268 269 static void 270 _shift_data_left_tail(struct xdr_buf *buf, unsigned int pgto, size_t len) 271 { 272 struct kvec *tail = buf->tail; 273 274 if (len > tail->iov_len) 275 len = tail->iov_len; 276 277 _copy_to_pages(buf->pages, 278 buf->page_base + pgto, 279 (char *)tail->iov_base, 280 len); 281 tail->iov_len -= len; 282 283 if (tail->iov_len > 0) 284 memmove((char *)tail->iov_base, 285 tail->iov_base + len, 286 tail->iov_len); 287 } 288 289 /** 290 * _shift_data_right_pages 291 * @pages: vector of pages containing both the source and dest memory area. 292 * @pgto_base: page vector address of destination 293 * @pgfrom_base: page vector address of source 294 * @len: number of bytes to copy 295 * 296 * Note: the addresses pgto_base and pgfrom_base are both calculated in 297 * the same way: 298 * if a memory area starts at byte 'base' in page 'pages[i]', 299 * then its address is given as (i << PAGE_SHIFT) + base 300 * Also note: pgfrom_base must be < pgto_base, but the memory areas 301 * they point to may overlap. 302 */ 303 static void 304 _shift_data_right_pages(struct page **pages, size_t pgto_base, 305 size_t pgfrom_base, size_t len) 306 { 307 struct page **pgfrom, **pgto; 308 char *vfrom, *vto; 309 size_t copy; 310 311 BUG_ON(pgto_base <= pgfrom_base); 312 313 pgto_base += len; 314 pgfrom_base += len; 315 316 pgto = pages + (pgto_base >> PAGE_SHIFT); 317 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); 318 319 pgto_base &= ~PAGE_MASK; 320 pgfrom_base &= ~PAGE_MASK; 321 322 do { 323 /* Are any pointers crossing a page boundary? */ 324 if (pgto_base == 0) { 325 pgto_base = PAGE_SIZE; 326 pgto--; 327 } 328 if (pgfrom_base == 0) { 329 pgfrom_base = PAGE_SIZE; 330 pgfrom--; 331 } 332 333 copy = len; 334 if (copy > pgto_base) 335 copy = pgto_base; 336 if (copy > pgfrom_base) 337 copy = pgfrom_base; 338 pgto_base -= copy; 339 pgfrom_base -= copy; 340 341 vto = kmap_atomic(*pgto); 342 if (*pgto != *pgfrom) { 343 vfrom = kmap_atomic(*pgfrom); 344 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); 345 kunmap_atomic(vfrom); 346 } else 347 memmove(vto + pgto_base, vto + pgfrom_base, copy); 348 flush_dcache_page(*pgto); 349 kunmap_atomic(vto); 350 351 } while ((len -= copy) != 0); 352 } 353 354 static unsigned int 355 _shift_data_right_tail(struct xdr_buf *buf, unsigned int pgfrom, size_t len) 356 { 357 struct kvec *tail = buf->tail; 358 unsigned int tailbuf_len; 359 unsigned int result = 0; 360 size_t copy; 361 362 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; 363 364 /* Shift the tail first */ 365 if (tailbuf_len != 0) { 366 unsigned int free_space = tailbuf_len - tail->iov_len; 367 368 if (len < free_space) 369 free_space = len; 370 if (len > free_space) 371 len = free_space; 372 373 tail->iov_len += free_space; 374 copy = len; 375 376 if (tail->iov_len > len) { 377 char *p = (char *)tail->iov_base + len; 378 memmove(p, tail->iov_base, tail->iov_len - free_space); 379 result += tail->iov_len - free_space; 380 } else 381 copy = tail->iov_len; 382 383 /* Copy from the inlined pages into the tail */ 384 _copy_from_pages((char *)tail->iov_base, 385 buf->pages, 386 buf->page_base + pgfrom, 387 copy); 388 result += copy; 389 } 390 391 return result; 392 } 393 394 /** 395 * _copy_to_pages 396 * @pages: array of pages 397 * @pgbase: page vector address of destination 398 * @p: pointer to source data 399 * @len: length 400 * 401 * Copies data from an arbitrary memory location into an array of pages 402 * The copy is assumed to be non-overlapping. 403 */ 404 static void 405 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 406 { 407 struct page **pgto; 408 char *vto; 409 size_t copy; 410 411 pgto = pages + (pgbase >> PAGE_SHIFT); 412 pgbase &= ~PAGE_MASK; 413 414 for (;;) { 415 copy = PAGE_SIZE - pgbase; 416 if (copy > len) 417 copy = len; 418 419 vto = kmap_atomic(*pgto); 420 memcpy(vto + pgbase, p, copy); 421 kunmap_atomic(vto); 422 423 len -= copy; 424 if (len == 0) 425 break; 426 427 pgbase += copy; 428 if (pgbase == PAGE_SIZE) { 429 flush_dcache_page(*pgto); 430 pgbase = 0; 431 pgto++; 432 } 433 p += copy; 434 } 435 flush_dcache_page(*pgto); 436 } 437 438 /** 439 * _copy_from_pages 440 * @p: pointer to destination 441 * @pages: array of pages 442 * @pgbase: offset of source data 443 * @len: length 444 * 445 * Copies data into an arbitrary memory location from an array of pages 446 * The copy is assumed to be non-overlapping. 447 */ 448 void 449 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) 450 { 451 struct page **pgfrom; 452 char *vfrom; 453 size_t copy; 454 455 pgfrom = pages + (pgbase >> PAGE_SHIFT); 456 pgbase &= ~PAGE_MASK; 457 458 do { 459 copy = PAGE_SIZE - pgbase; 460 if (copy > len) 461 copy = len; 462 463 vfrom = kmap_atomic(*pgfrom); 464 memcpy(p, vfrom + pgbase, copy); 465 kunmap_atomic(vfrom); 466 467 pgbase += copy; 468 if (pgbase == PAGE_SIZE) { 469 pgbase = 0; 470 pgfrom++; 471 } 472 p += copy; 473 474 } while ((len -= copy) != 0); 475 } 476 EXPORT_SYMBOL_GPL(_copy_from_pages); 477 478 /** 479 * _zero_pages 480 * @pages: array of pages 481 * @pgbase: beginning page vector address 482 * @len: length 483 */ 484 static void 485 _zero_pages(struct page **pages, size_t pgbase, size_t len) 486 { 487 struct page **page; 488 char *vpage; 489 size_t zero; 490 491 page = pages + (pgbase >> PAGE_SHIFT); 492 pgbase &= ~PAGE_MASK; 493 494 do { 495 zero = PAGE_SIZE - pgbase; 496 if (zero > len) 497 zero = len; 498 499 vpage = kmap_atomic(*page); 500 memset(vpage + pgbase, 0, zero); 501 kunmap_atomic(vpage); 502 503 flush_dcache_page(*page); 504 pgbase = 0; 505 page++; 506 507 } while ((len -= zero) != 0); 508 } 509 510 /** 511 * xdr_shrink_bufhead 512 * @buf: xdr_buf 513 * @len: bytes to remove from buf->head[0] 514 * 515 * Shrinks XDR buffer's header kvec buf->head[0] by 516 * 'len' bytes. The extra data is not lost, but is instead 517 * moved into the inlined pages and/or the tail. 518 */ 519 static unsigned int 520 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) 521 { 522 struct kvec *head, *tail; 523 size_t copy, offs; 524 unsigned int pglen = buf->page_len; 525 unsigned int result; 526 527 result = 0; 528 tail = buf->tail; 529 head = buf->head; 530 531 WARN_ON_ONCE(len > head->iov_len); 532 if (len > head->iov_len) 533 len = head->iov_len; 534 535 /* Shift the tail first */ 536 if (tail->iov_len != 0) { 537 if (tail->iov_len > len) { 538 copy = tail->iov_len - len; 539 memmove((char *)tail->iov_base + len, 540 tail->iov_base, copy); 541 result += copy; 542 } 543 /* Copy from the inlined pages into the tail */ 544 copy = len; 545 if (copy > pglen) 546 copy = pglen; 547 offs = len - copy; 548 if (offs >= tail->iov_len) 549 copy = 0; 550 else if (copy > tail->iov_len - offs) 551 copy = tail->iov_len - offs; 552 if (copy != 0) { 553 _copy_from_pages((char *)tail->iov_base + offs, 554 buf->pages, 555 buf->page_base + pglen + offs - len, 556 copy); 557 result += copy; 558 } 559 /* Do we also need to copy data from the head into the tail ? */ 560 if (len > pglen) { 561 offs = copy = len - pglen; 562 if (copy > tail->iov_len) 563 copy = tail->iov_len; 564 memcpy(tail->iov_base, 565 (char *)head->iov_base + 566 head->iov_len - offs, 567 copy); 568 result += copy; 569 } 570 } 571 /* Now handle pages */ 572 if (pglen != 0) { 573 if (pglen > len) 574 _shift_data_right_pages(buf->pages, 575 buf->page_base + len, 576 buf->page_base, 577 pglen - len); 578 copy = len; 579 if (len > pglen) 580 copy = pglen; 581 _copy_to_pages(buf->pages, buf->page_base, 582 (char *)head->iov_base + head->iov_len - len, 583 copy); 584 result += copy; 585 } 586 head->iov_len -= len; 587 buf->buflen -= len; 588 /* Have we truncated the message? */ 589 if (buf->len > buf->buflen) 590 buf->len = buf->buflen; 591 592 return result; 593 } 594 595 /** 596 * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes 597 * @buf: xdr_buf 598 * @len: bytes to remove from buf->pages 599 * 600 * The extra data is not lost, but is instead moved into buf->tail. 601 * Returns the actual number of bytes moved. 602 */ 603 static unsigned int 604 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) 605 { 606 unsigned int pglen = buf->page_len; 607 unsigned int result; 608 609 if (len > buf->page_len) 610 len = buf-> page_len; 611 612 result = _shift_data_right_tail(buf, pglen - len, len); 613 buf->page_len -= len; 614 buf->buflen -= len; 615 /* Have we truncated the message? */ 616 if (buf->len > buf->buflen) 617 buf->len = buf->buflen; 618 619 return result; 620 } 621 622 void 623 xdr_shift_buf(struct xdr_buf *buf, size_t len) 624 { 625 xdr_shrink_bufhead(buf, len); 626 } 627 EXPORT_SYMBOL_GPL(xdr_shift_buf); 628 629 /** 630 * xdr_stream_pos - Return the current offset from the start of the xdr_stream 631 * @xdr: pointer to struct xdr_stream 632 */ 633 unsigned int xdr_stream_pos(const struct xdr_stream *xdr) 634 { 635 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; 636 } 637 EXPORT_SYMBOL_GPL(xdr_stream_pos); 638 639 /** 640 * xdr_page_pos - Return the current offset from the start of the xdr pages 641 * @xdr: pointer to struct xdr_stream 642 */ 643 unsigned int xdr_page_pos(const struct xdr_stream *xdr) 644 { 645 unsigned int pos = xdr_stream_pos(xdr); 646 647 WARN_ON(pos < xdr->buf->head[0].iov_len); 648 return pos - xdr->buf->head[0].iov_len; 649 } 650 EXPORT_SYMBOL_GPL(xdr_page_pos); 651 652 /** 653 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 654 * @xdr: pointer to xdr_stream struct 655 * @buf: pointer to XDR buffer in which to encode data 656 * @p: current pointer inside XDR buffer 657 * @rqst: pointer to controlling rpc_rqst, for debugging 658 * 659 * Note: at the moment the RPC client only passes the length of our 660 * scratch buffer in the xdr_buf's header kvec. Previously this 661 * meant we needed to call xdr_adjust_iovec() after encoding the 662 * data. With the new scheme, the xdr_stream manages the details 663 * of the buffer length, and takes care of adjusting the kvec 664 * length for us. 665 */ 666 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, 667 struct rpc_rqst *rqst) 668 { 669 struct kvec *iov = buf->head; 670 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 671 672 xdr_set_scratch_buffer(xdr, NULL, 0); 673 BUG_ON(scratch_len < 0); 674 xdr->buf = buf; 675 xdr->iov = iov; 676 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); 677 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); 678 BUG_ON(iov->iov_len > scratch_len); 679 680 if (p != xdr->p && p != NULL) { 681 size_t len; 682 683 BUG_ON(p < xdr->p || p > xdr->end); 684 len = (char *)p - (char *)xdr->p; 685 xdr->p = p; 686 buf->len += len; 687 iov->iov_len += len; 688 } 689 xdr->rqst = rqst; 690 } 691 EXPORT_SYMBOL_GPL(xdr_init_encode); 692 693 /** 694 * xdr_commit_encode - Ensure all data is written to buffer 695 * @xdr: pointer to xdr_stream 696 * 697 * We handle encoding across page boundaries by giving the caller a 698 * temporary location to write to, then later copying the data into 699 * place; xdr_commit_encode does that copying. 700 * 701 * Normally the caller doesn't need to call this directly, as the 702 * following xdr_reserve_space will do it. But an explicit call may be 703 * required at the end of encoding, or any other time when the xdr_buf 704 * data might be read. 705 */ 706 inline void xdr_commit_encode(struct xdr_stream *xdr) 707 { 708 int shift = xdr->scratch.iov_len; 709 void *page; 710 711 if (shift == 0) 712 return; 713 page = page_address(*xdr->page_ptr); 714 memcpy(xdr->scratch.iov_base, page, shift); 715 memmove(page, page + shift, (void *)xdr->p - page); 716 xdr->scratch.iov_len = 0; 717 } 718 EXPORT_SYMBOL_GPL(xdr_commit_encode); 719 720 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, 721 size_t nbytes) 722 { 723 __be32 *p; 724 int space_left; 725 int frag1bytes, frag2bytes; 726 727 if (nbytes > PAGE_SIZE) 728 goto out_overflow; /* Bigger buffers require special handling */ 729 if (xdr->buf->len + nbytes > xdr->buf->buflen) 730 goto out_overflow; /* Sorry, we're totally out of space */ 731 frag1bytes = (xdr->end - xdr->p) << 2; 732 frag2bytes = nbytes - frag1bytes; 733 if (xdr->iov) 734 xdr->iov->iov_len += frag1bytes; 735 else 736 xdr->buf->page_len += frag1bytes; 737 xdr->page_ptr++; 738 xdr->iov = NULL; 739 /* 740 * If the last encode didn't end exactly on a page boundary, the 741 * next one will straddle boundaries. Encode into the next 742 * page, then copy it back later in xdr_commit_encode. We use 743 * the "scratch" iov to track any temporarily unused fragment of 744 * space at the end of the previous buffer: 745 */ 746 xdr->scratch.iov_base = xdr->p; 747 xdr->scratch.iov_len = frag1bytes; 748 p = page_address(*xdr->page_ptr); 749 /* 750 * Note this is where the next encode will start after we've 751 * shifted this one back: 752 */ 753 xdr->p = (void *)p + frag2bytes; 754 space_left = xdr->buf->buflen - xdr->buf->len; 755 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE); 756 xdr->buf->page_len += frag2bytes; 757 xdr->buf->len += nbytes; 758 return p; 759 out_overflow: 760 trace_rpc_xdr_overflow(xdr, nbytes); 761 return NULL; 762 } 763 764 /** 765 * xdr_reserve_space - Reserve buffer space for sending 766 * @xdr: pointer to xdr_stream 767 * @nbytes: number of bytes to reserve 768 * 769 * Checks that we have enough buffer space to encode 'nbytes' more 770 * bytes of data. If so, update the total xdr_buf length, and 771 * adjust the length of the current kvec. 772 */ 773 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 774 { 775 __be32 *p = xdr->p; 776 __be32 *q; 777 778 xdr_commit_encode(xdr); 779 /* align nbytes on the next 32-bit boundary */ 780 nbytes += 3; 781 nbytes &= ~3; 782 q = p + (nbytes >> 2); 783 if (unlikely(q > xdr->end || q < p)) 784 return xdr_get_next_encode_buffer(xdr, nbytes); 785 xdr->p = q; 786 if (xdr->iov) 787 xdr->iov->iov_len += nbytes; 788 else 789 xdr->buf->page_len += nbytes; 790 xdr->buf->len += nbytes; 791 return p; 792 } 793 EXPORT_SYMBOL_GPL(xdr_reserve_space); 794 795 796 /** 797 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending 798 * @xdr: pointer to xdr_stream 799 * @vec: pointer to a kvec array 800 * @nbytes: number of bytes to reserve 801 * 802 * Reserves enough buffer space to encode 'nbytes' of data and stores the 803 * pointers in 'vec'. The size argument passed to xdr_reserve_space() is 804 * determined based on the number of bytes remaining in the current page to 805 * avoid invalidating iov_base pointers when xdr_commit_encode() is called. 806 */ 807 int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes) 808 { 809 int thislen; 810 int v = 0; 811 __be32 *p; 812 813 /* 814 * svcrdma requires every READ payload to start somewhere 815 * in xdr->pages. 816 */ 817 if (xdr->iov == xdr->buf->head) { 818 xdr->iov = NULL; 819 xdr->end = xdr->p; 820 } 821 822 while (nbytes) { 823 thislen = xdr->buf->page_len % PAGE_SIZE; 824 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen); 825 826 p = xdr_reserve_space(xdr, thislen); 827 if (!p) 828 return -EIO; 829 830 vec[v].iov_base = p; 831 vec[v].iov_len = thislen; 832 v++; 833 nbytes -= thislen; 834 } 835 836 return v; 837 } 838 EXPORT_SYMBOL_GPL(xdr_reserve_space_vec); 839 840 /** 841 * xdr_truncate_encode - truncate an encode buffer 842 * @xdr: pointer to xdr_stream 843 * @len: new length of buffer 844 * 845 * Truncates the xdr stream, so that xdr->buf->len == len, 846 * and xdr->p points at offset len from the start of the buffer, and 847 * head, tail, and page lengths are adjusted to correspond. 848 * 849 * If this means moving xdr->p to a different buffer, we assume that 850 * the end pointer should be set to the end of the current page, 851 * except in the case of the head buffer when we assume the head 852 * buffer's current length represents the end of the available buffer. 853 * 854 * This is *not* safe to use on a buffer that already has inlined page 855 * cache pages (as in a zero-copy server read reply), except for the 856 * simple case of truncating from one position in the tail to another. 857 * 858 */ 859 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) 860 { 861 struct xdr_buf *buf = xdr->buf; 862 struct kvec *head = buf->head; 863 struct kvec *tail = buf->tail; 864 int fraglen; 865 int new; 866 867 if (len > buf->len) { 868 WARN_ON_ONCE(1); 869 return; 870 } 871 xdr_commit_encode(xdr); 872 873 fraglen = min_t(int, buf->len - len, tail->iov_len); 874 tail->iov_len -= fraglen; 875 buf->len -= fraglen; 876 if (tail->iov_len) { 877 xdr->p = tail->iov_base + tail->iov_len; 878 WARN_ON_ONCE(!xdr->end); 879 WARN_ON_ONCE(!xdr->iov); 880 return; 881 } 882 WARN_ON_ONCE(fraglen); 883 fraglen = min_t(int, buf->len - len, buf->page_len); 884 buf->page_len -= fraglen; 885 buf->len -= fraglen; 886 887 new = buf->page_base + buf->page_len; 888 889 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); 890 891 if (buf->page_len) { 892 xdr->p = page_address(*xdr->page_ptr); 893 xdr->end = (void *)xdr->p + PAGE_SIZE; 894 xdr->p = (void *)xdr->p + (new % PAGE_SIZE); 895 WARN_ON_ONCE(xdr->iov); 896 return; 897 } 898 if (fraglen) 899 xdr->end = head->iov_base + head->iov_len; 900 /* (otherwise assume xdr->end is already set) */ 901 xdr->page_ptr--; 902 head->iov_len = len; 903 buf->len = len; 904 xdr->p = head->iov_base + head->iov_len; 905 xdr->iov = buf->head; 906 } 907 EXPORT_SYMBOL(xdr_truncate_encode); 908 909 /** 910 * xdr_restrict_buflen - decrease available buffer space 911 * @xdr: pointer to xdr_stream 912 * @newbuflen: new maximum number of bytes available 913 * 914 * Adjust our idea of how much space is available in the buffer. 915 * If we've already used too much space in the buffer, returns -1. 916 * If the available space is already smaller than newbuflen, returns 0 917 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen 918 * and ensures xdr->end is set at most offset newbuflen from the start 919 * of the buffer. 920 */ 921 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen) 922 { 923 struct xdr_buf *buf = xdr->buf; 924 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p; 925 int end_offset = buf->len + left_in_this_buf; 926 927 if (newbuflen < 0 || newbuflen < buf->len) 928 return -1; 929 if (newbuflen > buf->buflen) 930 return 0; 931 if (newbuflen < end_offset) 932 xdr->end = (void *)xdr->end + newbuflen - end_offset; 933 buf->buflen = newbuflen; 934 return 0; 935 } 936 EXPORT_SYMBOL(xdr_restrict_buflen); 937 938 /** 939 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 940 * @xdr: pointer to xdr_stream 941 * @pages: list of pages 942 * @base: offset of first byte 943 * @len: length of data in bytes 944 * 945 */ 946 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, 947 unsigned int len) 948 { 949 struct xdr_buf *buf = xdr->buf; 950 struct kvec *iov = buf->tail; 951 buf->pages = pages; 952 buf->page_base = base; 953 buf->page_len = len; 954 955 iov->iov_base = (char *)xdr->p; 956 iov->iov_len = 0; 957 xdr->iov = iov; 958 959 if (len & 3) { 960 unsigned int pad = 4 - (len & 3); 961 962 BUG_ON(xdr->p >= xdr->end); 963 iov->iov_base = (char *)xdr->p + (len & 3); 964 iov->iov_len += pad; 965 len += pad; 966 *xdr->p++ = 0; 967 } 968 buf->buflen += len; 969 buf->len += len; 970 } 971 EXPORT_SYMBOL_GPL(xdr_write_pages); 972 973 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, 974 unsigned int len) 975 { 976 if (len > iov->iov_len) 977 len = iov->iov_len; 978 xdr->p = (__be32*)iov->iov_base; 979 xdr->end = (__be32*)(iov->iov_base + len); 980 xdr->iov = iov; 981 xdr->page_ptr = NULL; 982 } 983 984 static int xdr_set_page_base(struct xdr_stream *xdr, 985 unsigned int base, unsigned int len) 986 { 987 unsigned int pgnr; 988 unsigned int maxlen; 989 unsigned int pgoff; 990 unsigned int pgend; 991 void *kaddr; 992 993 maxlen = xdr->buf->page_len; 994 if (base >= maxlen) 995 return -EINVAL; 996 maxlen -= base; 997 if (len > maxlen) 998 len = maxlen; 999 1000 base += xdr->buf->page_base; 1001 1002 pgnr = base >> PAGE_SHIFT; 1003 xdr->page_ptr = &xdr->buf->pages[pgnr]; 1004 kaddr = page_address(*xdr->page_ptr); 1005 1006 pgoff = base & ~PAGE_MASK; 1007 xdr->p = (__be32*)(kaddr + pgoff); 1008 1009 pgend = pgoff + len; 1010 if (pgend > PAGE_SIZE) 1011 pgend = PAGE_SIZE; 1012 xdr->end = (__be32*)(kaddr + pgend); 1013 xdr->iov = NULL; 1014 return 0; 1015 } 1016 1017 static void xdr_set_page(struct xdr_stream *xdr, unsigned int base, 1018 unsigned int len) 1019 { 1020 if (xdr_set_page_base(xdr, base, len) < 0) 1021 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2); 1022 } 1023 1024 static void xdr_set_next_page(struct xdr_stream *xdr) 1025 { 1026 unsigned int newbase; 1027 1028 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; 1029 newbase -= xdr->buf->page_base; 1030 1031 xdr_set_page(xdr, newbase, PAGE_SIZE); 1032 } 1033 1034 static bool xdr_set_next_buffer(struct xdr_stream *xdr) 1035 { 1036 if (xdr->page_ptr != NULL) 1037 xdr_set_next_page(xdr); 1038 else if (xdr->iov == xdr->buf->head) { 1039 xdr_set_page(xdr, 0, PAGE_SIZE); 1040 } 1041 return xdr->p != xdr->end; 1042 } 1043 1044 /** 1045 * xdr_init_decode - Initialize an xdr_stream for decoding data. 1046 * @xdr: pointer to xdr_stream struct 1047 * @buf: pointer to XDR buffer from which to decode data 1048 * @p: current pointer inside XDR buffer 1049 * @rqst: pointer to controlling rpc_rqst, for debugging 1050 */ 1051 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, 1052 struct rpc_rqst *rqst) 1053 { 1054 xdr->buf = buf; 1055 xdr->scratch.iov_base = NULL; 1056 xdr->scratch.iov_len = 0; 1057 xdr->nwords = XDR_QUADLEN(buf->len); 1058 if (buf->head[0].iov_len != 0) 1059 xdr_set_iov(xdr, buf->head, buf->len); 1060 else if (buf->page_len != 0) 1061 xdr_set_page_base(xdr, 0, buf->len); 1062 else 1063 xdr_set_iov(xdr, buf->head, buf->len); 1064 if (p != NULL && p > xdr->p && xdr->end >= p) { 1065 xdr->nwords -= p - xdr->p; 1066 xdr->p = p; 1067 } 1068 xdr->rqst = rqst; 1069 } 1070 EXPORT_SYMBOL_GPL(xdr_init_decode); 1071 1072 /** 1073 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages 1074 * @xdr: pointer to xdr_stream struct 1075 * @buf: pointer to XDR buffer from which to decode data 1076 * @pages: list of pages to decode into 1077 * @len: length in bytes of buffer in pages 1078 */ 1079 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, 1080 struct page **pages, unsigned int len) 1081 { 1082 memset(buf, 0, sizeof(*buf)); 1083 buf->pages = pages; 1084 buf->page_len = len; 1085 buf->buflen = len; 1086 buf->len = len; 1087 xdr_init_decode(xdr, buf, NULL, NULL); 1088 } 1089 EXPORT_SYMBOL_GPL(xdr_init_decode_pages); 1090 1091 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 1092 { 1093 unsigned int nwords = XDR_QUADLEN(nbytes); 1094 __be32 *p = xdr->p; 1095 __be32 *q = p + nwords; 1096 1097 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) 1098 return NULL; 1099 xdr->p = q; 1100 xdr->nwords -= nwords; 1101 return p; 1102 } 1103 1104 /** 1105 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. 1106 * @xdr: pointer to xdr_stream struct 1107 * @buf: pointer to an empty buffer 1108 * @buflen: size of 'buf' 1109 * 1110 * The scratch buffer is used when decoding from an array of pages. 1111 * If an xdr_inline_decode() call spans across page boundaries, then 1112 * we copy the data into the scratch buffer in order to allow linear 1113 * access. 1114 */ 1115 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) 1116 { 1117 xdr->scratch.iov_base = buf; 1118 xdr->scratch.iov_len = buflen; 1119 } 1120 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer); 1121 1122 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) 1123 { 1124 __be32 *p; 1125 char *cpdest = xdr->scratch.iov_base; 1126 size_t cplen = (char *)xdr->end - (char *)xdr->p; 1127 1128 if (nbytes > xdr->scratch.iov_len) 1129 goto out_overflow; 1130 p = __xdr_inline_decode(xdr, cplen); 1131 if (p == NULL) 1132 return NULL; 1133 memcpy(cpdest, p, cplen); 1134 if (!xdr_set_next_buffer(xdr)) 1135 goto out_overflow; 1136 cpdest += cplen; 1137 nbytes -= cplen; 1138 p = __xdr_inline_decode(xdr, nbytes); 1139 if (p == NULL) 1140 return NULL; 1141 memcpy(cpdest, p, nbytes); 1142 return xdr->scratch.iov_base; 1143 out_overflow: 1144 trace_rpc_xdr_overflow(xdr, nbytes); 1145 return NULL; 1146 } 1147 1148 /** 1149 * xdr_inline_decode - Retrieve XDR data to decode 1150 * @xdr: pointer to xdr_stream struct 1151 * @nbytes: number of bytes of data to decode 1152 * 1153 * Check if the input buffer is long enough to enable us to decode 1154 * 'nbytes' more bytes of data starting at the current position. 1155 * If so return the current pointer, then update the current 1156 * pointer position. 1157 */ 1158 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 1159 { 1160 __be32 *p; 1161 1162 if (unlikely(nbytes == 0)) 1163 return xdr->p; 1164 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) 1165 goto out_overflow; 1166 p = __xdr_inline_decode(xdr, nbytes); 1167 if (p != NULL) 1168 return p; 1169 return xdr_copy_to_scratch(xdr, nbytes); 1170 out_overflow: 1171 trace_rpc_xdr_overflow(xdr, nbytes); 1172 return NULL; 1173 } 1174 EXPORT_SYMBOL_GPL(xdr_inline_decode); 1175 1176 static void xdr_realign_pages(struct xdr_stream *xdr) 1177 { 1178 struct xdr_buf *buf = xdr->buf; 1179 struct kvec *iov = buf->head; 1180 unsigned int cur = xdr_stream_pos(xdr); 1181 unsigned int copied, offset; 1182 1183 /* Realign pages to current pointer position */ 1184 if (iov->iov_len > cur) { 1185 offset = iov->iov_len - cur; 1186 copied = xdr_shrink_bufhead(buf, offset); 1187 trace_rpc_xdr_alignment(xdr, offset, copied); 1188 xdr->nwords = XDR_QUADLEN(buf->len - cur); 1189 } 1190 } 1191 1192 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) 1193 { 1194 struct xdr_buf *buf = xdr->buf; 1195 unsigned int nwords = XDR_QUADLEN(len); 1196 unsigned int cur = xdr_stream_pos(xdr); 1197 unsigned int copied, offset; 1198 1199 if (xdr->nwords == 0) 1200 return 0; 1201 1202 xdr_realign_pages(xdr); 1203 if (nwords > xdr->nwords) { 1204 nwords = xdr->nwords; 1205 len = nwords << 2; 1206 } 1207 if (buf->page_len <= len) 1208 len = buf->page_len; 1209 else if (nwords < xdr->nwords) { 1210 /* Truncate page data and move it into the tail */ 1211 offset = buf->page_len - len; 1212 copied = xdr_shrink_pagelen(buf, offset); 1213 trace_rpc_xdr_alignment(xdr, offset, copied); 1214 xdr->nwords = XDR_QUADLEN(buf->len - cur); 1215 } 1216 return len; 1217 } 1218 1219 /** 1220 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 1221 * @xdr: pointer to xdr_stream struct 1222 * @len: number of bytes of page data 1223 * 1224 * Moves data beyond the current pointer position from the XDR head[] buffer 1225 * into the page list. Any data that lies beyond current position + "len" 1226 * bytes is moved into the XDR tail[]. 1227 * 1228 * Returns the number of XDR encoded bytes now contained in the pages 1229 */ 1230 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 1231 { 1232 struct xdr_buf *buf = xdr->buf; 1233 struct kvec *iov; 1234 unsigned int nwords; 1235 unsigned int end; 1236 unsigned int padding; 1237 1238 len = xdr_align_pages(xdr, len); 1239 if (len == 0) 1240 return 0; 1241 nwords = XDR_QUADLEN(len); 1242 padding = (nwords << 2) - len; 1243 xdr->iov = iov = buf->tail; 1244 /* Compute remaining message length. */ 1245 end = ((xdr->nwords - nwords) << 2) + padding; 1246 if (end > iov->iov_len) 1247 end = iov->iov_len; 1248 1249 /* 1250 * Position current pointer at beginning of tail, and 1251 * set remaining message length. 1252 */ 1253 xdr->p = (__be32 *)((char *)iov->iov_base + padding); 1254 xdr->end = (__be32 *)((char *)iov->iov_base + end); 1255 xdr->page_ptr = NULL; 1256 xdr->nwords = XDR_QUADLEN(end - padding); 1257 return len; 1258 } 1259 EXPORT_SYMBOL_GPL(xdr_read_pages); 1260 1261 uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length) 1262 { 1263 struct xdr_buf *buf = xdr->buf; 1264 unsigned int from, bytes; 1265 unsigned int shift = 0; 1266 1267 if ((offset + length) < offset || 1268 (offset + length) > buf->page_len) 1269 length = buf->page_len - offset; 1270 1271 xdr_realign_pages(xdr); 1272 from = xdr_page_pos(xdr); 1273 bytes = xdr->nwords << 2; 1274 if (length < bytes) 1275 bytes = length; 1276 1277 /* Move page data to the left */ 1278 if (from > offset) { 1279 shift = min_t(unsigned int, bytes, buf->page_len - from); 1280 _shift_data_left_pages(buf->pages, 1281 buf->page_base + offset, 1282 buf->page_base + from, 1283 shift); 1284 bytes -= shift; 1285 1286 /* Move tail data into the pages, if necessary */ 1287 if (bytes > 0) 1288 _shift_data_left_tail(buf, offset + shift, bytes); 1289 } 1290 1291 xdr->nwords -= XDR_QUADLEN(length); 1292 xdr_set_page(xdr, from + length, PAGE_SIZE); 1293 return length; 1294 } 1295 EXPORT_SYMBOL_GPL(xdr_align_data); 1296 1297 uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t length) 1298 { 1299 struct xdr_buf *buf = xdr->buf; 1300 unsigned int bytes; 1301 unsigned int from; 1302 unsigned int truncated = 0; 1303 1304 if ((offset + length) < offset || 1305 (offset + length) > buf->page_len) 1306 length = buf->page_len - offset; 1307 1308 xdr_realign_pages(xdr); 1309 from = xdr_page_pos(xdr); 1310 bytes = xdr->nwords << 2; 1311 1312 if (offset + length + bytes > buf->page_len) { 1313 unsigned int shift = (offset + length + bytes) - buf->page_len; 1314 unsigned int res = _shift_data_right_tail(buf, from + bytes - shift, shift); 1315 truncated = shift - res; 1316 xdr->nwords -= XDR_QUADLEN(truncated); 1317 bytes -= shift; 1318 } 1319 1320 /* Now move the page data over and zero pages */ 1321 if (bytes > 0) 1322 _shift_data_right_pages(buf->pages, 1323 buf->page_base + offset + length, 1324 buf->page_base + from, 1325 bytes); 1326 _zero_pages(buf->pages, buf->page_base + offset, length); 1327 1328 buf->len += length - (from - offset) - truncated; 1329 xdr_set_page(xdr, offset + length, PAGE_SIZE); 1330 return length; 1331 } 1332 EXPORT_SYMBOL_GPL(xdr_expand_hole); 1333 1334 /** 1335 * xdr_enter_page - decode data from the XDR page 1336 * @xdr: pointer to xdr_stream struct 1337 * @len: number of bytes of page data 1338 * 1339 * Moves data beyond the current pointer position from the XDR head[] buffer 1340 * into the page list. Any data that lies beyond current position + "len" 1341 * bytes is moved into the XDR tail[]. The current pointer is then 1342 * repositioned at the beginning of the first XDR page. 1343 */ 1344 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 1345 { 1346 len = xdr_align_pages(xdr, len); 1347 /* 1348 * Position current pointer at beginning of tail, and 1349 * set remaining message length. 1350 */ 1351 if (len != 0) 1352 xdr_set_page_base(xdr, 0, len); 1353 } 1354 EXPORT_SYMBOL_GPL(xdr_enter_page); 1355 1356 static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 1357 1358 void 1359 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) 1360 { 1361 buf->head[0] = *iov; 1362 buf->tail[0] = empty_iov; 1363 buf->page_len = 0; 1364 buf->buflen = buf->len = iov->iov_len; 1365 } 1366 EXPORT_SYMBOL_GPL(xdr_buf_from_iov); 1367 1368 /** 1369 * xdr_buf_subsegment - set subbuf to a portion of buf 1370 * @buf: an xdr buffer 1371 * @subbuf: the result buffer 1372 * @base: beginning of range in bytes 1373 * @len: length of range in bytes 1374 * 1375 * sets @subbuf to an xdr buffer representing the portion of @buf of 1376 * length @len starting at offset @base. 1377 * 1378 * @buf and @subbuf may be pointers to the same struct xdr_buf. 1379 * 1380 * Returns -1 if base of length are out of bounds. 1381 */ 1382 int 1383 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, 1384 unsigned int base, unsigned int len) 1385 { 1386 subbuf->buflen = subbuf->len = len; 1387 if (base < buf->head[0].iov_len) { 1388 subbuf->head[0].iov_base = buf->head[0].iov_base + base; 1389 subbuf->head[0].iov_len = min_t(unsigned int, len, 1390 buf->head[0].iov_len - base); 1391 len -= subbuf->head[0].iov_len; 1392 base = 0; 1393 } else { 1394 base -= buf->head[0].iov_len; 1395 subbuf->head[0].iov_base = buf->head[0].iov_base; 1396 subbuf->head[0].iov_len = 0; 1397 } 1398 1399 if (base < buf->page_len) { 1400 subbuf->page_len = min(buf->page_len - base, len); 1401 base += buf->page_base; 1402 subbuf->page_base = base & ~PAGE_MASK; 1403 subbuf->pages = &buf->pages[base >> PAGE_SHIFT]; 1404 len -= subbuf->page_len; 1405 base = 0; 1406 } else { 1407 base -= buf->page_len; 1408 subbuf->pages = buf->pages; 1409 subbuf->page_base = 0; 1410 subbuf->page_len = 0; 1411 } 1412 1413 if (base < buf->tail[0].iov_len) { 1414 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; 1415 subbuf->tail[0].iov_len = min_t(unsigned int, len, 1416 buf->tail[0].iov_len - base); 1417 len -= subbuf->tail[0].iov_len; 1418 base = 0; 1419 } else { 1420 base -= buf->tail[0].iov_len; 1421 subbuf->tail[0].iov_base = buf->tail[0].iov_base; 1422 subbuf->tail[0].iov_len = 0; 1423 } 1424 1425 if (base || len) 1426 return -1; 1427 return 0; 1428 } 1429 EXPORT_SYMBOL_GPL(xdr_buf_subsegment); 1430 1431 /** 1432 * xdr_buf_trim - lop at most "len" bytes off the end of "buf" 1433 * @buf: buf to be trimmed 1434 * @len: number of bytes to reduce "buf" by 1435 * 1436 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note 1437 * that it's possible that we'll trim less than that amount if the xdr_buf is 1438 * too small, or if (for instance) it's all in the head and the parser has 1439 * already read too far into it. 1440 */ 1441 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) 1442 { 1443 size_t cur; 1444 unsigned int trim = len; 1445 1446 if (buf->tail[0].iov_len) { 1447 cur = min_t(size_t, buf->tail[0].iov_len, trim); 1448 buf->tail[0].iov_len -= cur; 1449 trim -= cur; 1450 if (!trim) 1451 goto fix_len; 1452 } 1453 1454 if (buf->page_len) { 1455 cur = min_t(unsigned int, buf->page_len, trim); 1456 buf->page_len -= cur; 1457 trim -= cur; 1458 if (!trim) 1459 goto fix_len; 1460 } 1461 1462 if (buf->head[0].iov_len) { 1463 cur = min_t(size_t, buf->head[0].iov_len, trim); 1464 buf->head[0].iov_len -= cur; 1465 trim -= cur; 1466 } 1467 fix_len: 1468 buf->len -= (len - trim); 1469 } 1470 EXPORT_SYMBOL_GPL(xdr_buf_trim); 1471 1472 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 1473 { 1474 unsigned int this_len; 1475 1476 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 1477 memcpy(obj, subbuf->head[0].iov_base, this_len); 1478 len -= this_len; 1479 obj += this_len; 1480 this_len = min_t(unsigned int, len, subbuf->page_len); 1481 if (this_len) 1482 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); 1483 len -= this_len; 1484 obj += this_len; 1485 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 1486 memcpy(obj, subbuf->tail[0].iov_base, this_len); 1487 } 1488 1489 /* obj is assumed to point to allocated memory of size at least len: */ 1490 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 1491 { 1492 struct xdr_buf subbuf; 1493 int status; 1494 1495 status = xdr_buf_subsegment(buf, &subbuf, base, len); 1496 if (status != 0) 1497 return status; 1498 __read_bytes_from_xdr_buf(&subbuf, obj, len); 1499 return 0; 1500 } 1501 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); 1502 1503 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 1504 { 1505 unsigned int this_len; 1506 1507 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 1508 memcpy(subbuf->head[0].iov_base, obj, this_len); 1509 len -= this_len; 1510 obj += this_len; 1511 this_len = min_t(unsigned int, len, subbuf->page_len); 1512 if (this_len) 1513 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); 1514 len -= this_len; 1515 obj += this_len; 1516 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 1517 memcpy(subbuf->tail[0].iov_base, obj, this_len); 1518 } 1519 1520 /* obj is assumed to point to allocated memory of size at least len: */ 1521 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 1522 { 1523 struct xdr_buf subbuf; 1524 int status; 1525 1526 status = xdr_buf_subsegment(buf, &subbuf, base, len); 1527 if (status != 0) 1528 return status; 1529 __write_bytes_to_xdr_buf(&subbuf, obj, len); 1530 return 0; 1531 } 1532 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); 1533 1534 int 1535 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) 1536 { 1537 __be32 raw; 1538 int status; 1539 1540 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 1541 if (status) 1542 return status; 1543 *obj = be32_to_cpu(raw); 1544 return 0; 1545 } 1546 EXPORT_SYMBOL_GPL(xdr_decode_word); 1547 1548 int 1549 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) 1550 { 1551 __be32 raw = cpu_to_be32(obj); 1552 1553 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 1554 } 1555 EXPORT_SYMBOL_GPL(xdr_encode_word); 1556 1557 /* Returns 0 on success, or else a negative error code. */ 1558 static int 1559 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, 1560 struct xdr_array2_desc *desc, int encode) 1561 { 1562 char *elem = NULL, *c; 1563 unsigned int copied = 0, todo, avail_here; 1564 struct page **ppages = NULL; 1565 int err; 1566 1567 if (encode) { 1568 if (xdr_encode_word(buf, base, desc->array_len) != 0) 1569 return -EINVAL; 1570 } else { 1571 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 1572 desc->array_len > desc->array_maxlen || 1573 (unsigned long) base + 4 + desc->array_len * 1574 desc->elem_size > buf->len) 1575 return -EINVAL; 1576 } 1577 base += 4; 1578 1579 if (!desc->xcode) 1580 return 0; 1581 1582 todo = desc->array_len * desc->elem_size; 1583 1584 /* process head */ 1585 if (todo && base < buf->head->iov_len) { 1586 c = buf->head->iov_base + base; 1587 avail_here = min_t(unsigned int, todo, 1588 buf->head->iov_len - base); 1589 todo -= avail_here; 1590 1591 while (avail_here >= desc->elem_size) { 1592 err = desc->xcode(desc, c); 1593 if (err) 1594 goto out; 1595 c += desc->elem_size; 1596 avail_here -= desc->elem_size; 1597 } 1598 if (avail_here) { 1599 if (!elem) { 1600 elem = kmalloc(desc->elem_size, GFP_KERNEL); 1601 err = -ENOMEM; 1602 if (!elem) 1603 goto out; 1604 } 1605 if (encode) { 1606 err = desc->xcode(desc, elem); 1607 if (err) 1608 goto out; 1609 memcpy(c, elem, avail_here); 1610 } else 1611 memcpy(elem, c, avail_here); 1612 copied = avail_here; 1613 } 1614 base = buf->head->iov_len; /* align to start of pages */ 1615 } 1616 1617 /* process pages array */ 1618 base -= buf->head->iov_len; 1619 if (todo && base < buf->page_len) { 1620 unsigned int avail_page; 1621 1622 avail_here = min(todo, buf->page_len - base); 1623 todo -= avail_here; 1624 1625 base += buf->page_base; 1626 ppages = buf->pages + (base >> PAGE_SHIFT); 1627 base &= ~PAGE_MASK; 1628 avail_page = min_t(unsigned int, PAGE_SIZE - base, 1629 avail_here); 1630 c = kmap(*ppages) + base; 1631 1632 while (avail_here) { 1633 avail_here -= avail_page; 1634 if (copied || avail_page < desc->elem_size) { 1635 unsigned int l = min(avail_page, 1636 desc->elem_size - copied); 1637 if (!elem) { 1638 elem = kmalloc(desc->elem_size, 1639 GFP_KERNEL); 1640 err = -ENOMEM; 1641 if (!elem) 1642 goto out; 1643 } 1644 if (encode) { 1645 if (!copied) { 1646 err = desc->xcode(desc, elem); 1647 if (err) 1648 goto out; 1649 } 1650 memcpy(c, elem + copied, l); 1651 copied += l; 1652 if (copied == desc->elem_size) 1653 copied = 0; 1654 } else { 1655 memcpy(elem + copied, c, l); 1656 copied += l; 1657 if (copied == desc->elem_size) { 1658 err = desc->xcode(desc, elem); 1659 if (err) 1660 goto out; 1661 copied = 0; 1662 } 1663 } 1664 avail_page -= l; 1665 c += l; 1666 } 1667 while (avail_page >= desc->elem_size) { 1668 err = desc->xcode(desc, c); 1669 if (err) 1670 goto out; 1671 c += desc->elem_size; 1672 avail_page -= desc->elem_size; 1673 } 1674 if (avail_page) { 1675 unsigned int l = min(avail_page, 1676 desc->elem_size - copied); 1677 if (!elem) { 1678 elem = kmalloc(desc->elem_size, 1679 GFP_KERNEL); 1680 err = -ENOMEM; 1681 if (!elem) 1682 goto out; 1683 } 1684 if (encode) { 1685 if (!copied) { 1686 err = desc->xcode(desc, elem); 1687 if (err) 1688 goto out; 1689 } 1690 memcpy(c, elem + copied, l); 1691 copied += l; 1692 if (copied == desc->elem_size) 1693 copied = 0; 1694 } else { 1695 memcpy(elem + copied, c, l); 1696 copied += l; 1697 if (copied == desc->elem_size) { 1698 err = desc->xcode(desc, elem); 1699 if (err) 1700 goto out; 1701 copied = 0; 1702 } 1703 } 1704 } 1705 if (avail_here) { 1706 kunmap(*ppages); 1707 ppages++; 1708 c = kmap(*ppages); 1709 } 1710 1711 avail_page = min(avail_here, 1712 (unsigned int) PAGE_SIZE); 1713 } 1714 base = buf->page_len; /* align to start of tail */ 1715 } 1716 1717 /* process tail */ 1718 base -= buf->page_len; 1719 if (todo) { 1720 c = buf->tail->iov_base + base; 1721 if (copied) { 1722 unsigned int l = desc->elem_size - copied; 1723 1724 if (encode) 1725 memcpy(c, elem + copied, l); 1726 else { 1727 memcpy(elem + copied, c, l); 1728 err = desc->xcode(desc, elem); 1729 if (err) 1730 goto out; 1731 } 1732 todo -= l; 1733 c += l; 1734 } 1735 while (todo) { 1736 err = desc->xcode(desc, c); 1737 if (err) 1738 goto out; 1739 c += desc->elem_size; 1740 todo -= desc->elem_size; 1741 } 1742 } 1743 err = 0; 1744 1745 out: 1746 kfree(elem); 1747 if (ppages) 1748 kunmap(*ppages); 1749 return err; 1750 } 1751 1752 int 1753 xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 1754 struct xdr_array2_desc *desc) 1755 { 1756 if (base >= buf->len) 1757 return -EINVAL; 1758 1759 return xdr_xcode_array2(buf, base, desc, 0); 1760 } 1761 EXPORT_SYMBOL_GPL(xdr_decode_array2); 1762 1763 int 1764 xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 1765 struct xdr_array2_desc *desc) 1766 { 1767 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 1768 buf->head->iov_len + buf->page_len + buf->tail->iov_len) 1769 return -EINVAL; 1770 1771 return xdr_xcode_array2(buf, base, desc, 1); 1772 } 1773 EXPORT_SYMBOL_GPL(xdr_encode_array2); 1774 1775 int 1776 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1777 int (*actor)(struct scatterlist *, void *), void *data) 1778 { 1779 int i, ret = 0; 1780 unsigned int page_len, thislen, page_offset; 1781 struct scatterlist sg[1]; 1782 1783 sg_init_table(sg, 1); 1784 1785 if (offset >= buf->head[0].iov_len) { 1786 offset -= buf->head[0].iov_len; 1787 } else { 1788 thislen = buf->head[0].iov_len - offset; 1789 if (thislen > len) 1790 thislen = len; 1791 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); 1792 ret = actor(sg, data); 1793 if (ret) 1794 goto out; 1795 offset = 0; 1796 len -= thislen; 1797 } 1798 if (len == 0) 1799 goto out; 1800 1801 if (offset >= buf->page_len) { 1802 offset -= buf->page_len; 1803 } else { 1804 page_len = buf->page_len - offset; 1805 if (page_len > len) 1806 page_len = len; 1807 len -= page_len; 1808 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1); 1809 i = (offset + buf->page_base) >> PAGE_SHIFT; 1810 thislen = PAGE_SIZE - page_offset; 1811 do { 1812 if (thislen > page_len) 1813 thislen = page_len; 1814 sg_set_page(sg, buf->pages[i], thislen, page_offset); 1815 ret = actor(sg, data); 1816 if (ret) 1817 goto out; 1818 page_len -= thislen; 1819 i++; 1820 page_offset = 0; 1821 thislen = PAGE_SIZE; 1822 } while (page_len != 0); 1823 offset = 0; 1824 } 1825 if (len == 0) 1826 goto out; 1827 if (offset < buf->tail[0].iov_len) { 1828 thislen = buf->tail[0].iov_len - offset; 1829 if (thislen > len) 1830 thislen = len; 1831 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); 1832 ret = actor(sg, data); 1833 len -= thislen; 1834 } 1835 if (len != 0) 1836 ret = -EINVAL; 1837 out: 1838 return ret; 1839 } 1840 EXPORT_SYMBOL_GPL(xdr_process_buf); 1841 1842 /** 1843 * xdr_stream_decode_opaque - Decode variable length opaque 1844 * @xdr: pointer to xdr_stream 1845 * @ptr: location to store opaque data 1846 * @size: size of storage buffer @ptr 1847 * 1848 * Return values: 1849 * On success, returns size of object stored in *@ptr 1850 * %-EBADMSG on XDR buffer overflow 1851 * %-EMSGSIZE on overflow of storage buffer @ptr 1852 */ 1853 ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size) 1854 { 1855 ssize_t ret; 1856 void *p; 1857 1858 ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 1859 if (ret <= 0) 1860 return ret; 1861 memcpy(ptr, p, ret); 1862 return ret; 1863 } 1864 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque); 1865 1866 /** 1867 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque 1868 * @xdr: pointer to xdr_stream 1869 * @ptr: location to store pointer to opaque data 1870 * @maxlen: maximum acceptable object size 1871 * @gfp_flags: GFP mask to use 1872 * 1873 * Return values: 1874 * On success, returns size of object stored in *@ptr 1875 * %-EBADMSG on XDR buffer overflow 1876 * %-EMSGSIZE if the size of the object would exceed @maxlen 1877 * %-ENOMEM on memory allocation failure 1878 */ 1879 ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, 1880 size_t maxlen, gfp_t gfp_flags) 1881 { 1882 ssize_t ret; 1883 void *p; 1884 1885 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 1886 if (ret > 0) { 1887 *ptr = kmemdup(p, ret, gfp_flags); 1888 if (*ptr != NULL) 1889 return ret; 1890 ret = -ENOMEM; 1891 } 1892 *ptr = NULL; 1893 return ret; 1894 } 1895 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup); 1896 1897 /** 1898 * xdr_stream_decode_string - Decode variable length string 1899 * @xdr: pointer to xdr_stream 1900 * @str: location to store string 1901 * @size: size of storage buffer @str 1902 * 1903 * Return values: 1904 * On success, returns length of NUL-terminated string stored in *@str 1905 * %-EBADMSG on XDR buffer overflow 1906 * %-EMSGSIZE on overflow of storage buffer @str 1907 */ 1908 ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size) 1909 { 1910 ssize_t ret; 1911 void *p; 1912 1913 ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 1914 if (ret > 0) { 1915 memcpy(str, p, ret); 1916 str[ret] = '\0'; 1917 return strlen(str); 1918 } 1919 *str = '\0'; 1920 return ret; 1921 } 1922 EXPORT_SYMBOL_GPL(xdr_stream_decode_string); 1923 1924 /** 1925 * xdr_stream_decode_string_dup - Decode and duplicate variable length string 1926 * @xdr: pointer to xdr_stream 1927 * @str: location to store pointer to string 1928 * @maxlen: maximum acceptable string length 1929 * @gfp_flags: GFP mask to use 1930 * 1931 * Return values: 1932 * On success, returns length of NUL-terminated string stored in *@ptr 1933 * %-EBADMSG on XDR buffer overflow 1934 * %-EMSGSIZE if the size of the string would exceed @maxlen 1935 * %-ENOMEM on memory allocation failure 1936 */ 1937 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, 1938 size_t maxlen, gfp_t gfp_flags) 1939 { 1940 void *p; 1941 ssize_t ret; 1942 1943 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 1944 if (ret > 0) { 1945 char *s = kmalloc(ret + 1, gfp_flags); 1946 if (s != NULL) { 1947 memcpy(s, p, ret); 1948 s[ret] = '\0'; 1949 *str = s; 1950 return strlen(s); 1951 } 1952 ret = -ENOMEM; 1953 } 1954 *str = NULL; 1955 return ret; 1956 } 1957 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup); 1958