1 /* 2 * linux/net/sunrpc/xdr.c 3 * 4 * Generic XDR support. 5 * 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/types.h> 12 #include <linux/string.h> 13 #include <linux/kernel.h> 14 #include <linux/pagemap.h> 15 #include <linux/errno.h> 16 #include <linux/sunrpc/xdr.h> 17 #include <linux/sunrpc/msg_prot.h> 18 #include <linux/bvec.h> 19 20 /* 21 * XDR functions for basic NFS types 22 */ 23 __be32 * 24 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) 25 { 26 unsigned int quadlen = XDR_QUADLEN(obj->len); 27 28 p[quadlen] = 0; /* zero trailing bytes */ 29 *p++ = cpu_to_be32(obj->len); 30 memcpy(p, obj->data, obj->len); 31 return p + XDR_QUADLEN(obj->len); 32 } 33 EXPORT_SYMBOL_GPL(xdr_encode_netobj); 34 35 __be32 * 36 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) 37 { 38 unsigned int len; 39 40 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) 41 return NULL; 42 obj->len = len; 43 obj->data = (u8 *) p; 44 return p + XDR_QUADLEN(len); 45 } 46 EXPORT_SYMBOL_GPL(xdr_decode_netobj); 47 48 /** 49 * xdr_encode_opaque_fixed - Encode fixed length opaque data 50 * @p: pointer to current position in XDR buffer. 51 * @ptr: pointer to data to encode (or NULL) 52 * @nbytes: size of data. 53 * 54 * Copy the array of data of length nbytes at ptr to the XDR buffer 55 * at position p, then align to the next 32-bit boundary by padding 56 * with zero bytes (see RFC1832). 57 * Note: if ptr is NULL, only the padding is performed. 58 * 59 * Returns the updated current XDR buffer position 60 * 61 */ 62 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) 63 { 64 if (likely(nbytes != 0)) { 65 unsigned int quadlen = XDR_QUADLEN(nbytes); 66 unsigned int padding = (quadlen << 2) - nbytes; 67 68 if (ptr != NULL) 69 memcpy(p, ptr, nbytes); 70 if (padding != 0) 71 memset((char *)p + nbytes, 0, padding); 72 p += quadlen; 73 } 74 return p; 75 } 76 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); 77 78 /** 79 * xdr_encode_opaque - Encode variable length opaque data 80 * @p: pointer to current position in XDR buffer. 81 * @ptr: pointer to data to encode (or NULL) 82 * @nbytes: size of data. 83 * 84 * Returns the updated current XDR buffer position 85 */ 86 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) 87 { 88 *p++ = cpu_to_be32(nbytes); 89 return xdr_encode_opaque_fixed(p, ptr, nbytes); 90 } 91 EXPORT_SYMBOL_GPL(xdr_encode_opaque); 92 93 __be32 * 94 xdr_encode_string(__be32 *p, const char *string) 95 { 96 return xdr_encode_array(p, string, strlen(string)); 97 } 98 EXPORT_SYMBOL_GPL(xdr_encode_string); 99 100 __be32 * 101 xdr_decode_string_inplace(__be32 *p, char **sp, 102 unsigned int *lenp, unsigned int maxlen) 103 { 104 u32 len; 105 106 len = be32_to_cpu(*p++); 107 if (len > maxlen) 108 return NULL; 109 *lenp = len; 110 *sp = (char *) p; 111 return p + XDR_QUADLEN(len); 112 } 113 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); 114 115 /** 116 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf 117 * @buf: XDR buffer where string resides 118 * @len: length of string, in bytes 119 * 120 */ 121 void 122 xdr_terminate_string(struct xdr_buf *buf, const u32 len) 123 { 124 char *kaddr; 125 126 kaddr = kmap_atomic(buf->pages[0]); 127 kaddr[buf->page_base + len] = '\0'; 128 kunmap_atomic(kaddr); 129 } 130 EXPORT_SYMBOL_GPL(xdr_terminate_string); 131 132 size_t 133 xdr_buf_pagecount(struct xdr_buf *buf) 134 { 135 if (!buf->page_len) 136 return 0; 137 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 138 } 139 140 int 141 xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp) 142 { 143 size_t i, n = xdr_buf_pagecount(buf); 144 145 if (n != 0 && buf->bvec == NULL) { 146 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp); 147 if (!buf->bvec) 148 return -ENOMEM; 149 for (i = 0; i < n; i++) { 150 buf->bvec[i].bv_page = buf->pages[i]; 151 buf->bvec[i].bv_len = PAGE_SIZE; 152 buf->bvec[i].bv_offset = 0; 153 } 154 } 155 return 0; 156 } 157 158 void 159 xdr_free_bvec(struct xdr_buf *buf) 160 { 161 kfree(buf->bvec); 162 buf->bvec = NULL; 163 } 164 165 void 166 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 167 struct page **pages, unsigned int base, unsigned int len) 168 { 169 struct kvec *head = xdr->head; 170 struct kvec *tail = xdr->tail; 171 char *buf = (char *)head->iov_base; 172 unsigned int buflen = head->iov_len; 173 174 head->iov_len = offset; 175 176 xdr->pages = pages; 177 xdr->page_base = base; 178 xdr->page_len = len; 179 180 tail->iov_base = buf + offset; 181 tail->iov_len = buflen - offset; 182 183 xdr->buflen += len; 184 } 185 EXPORT_SYMBOL_GPL(xdr_inline_pages); 186 187 /* 188 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 189 */ 190 191 /** 192 * _shift_data_right_pages 193 * @pages: vector of pages containing both the source and dest memory area. 194 * @pgto_base: page vector address of destination 195 * @pgfrom_base: page vector address of source 196 * @len: number of bytes to copy 197 * 198 * Note: the addresses pgto_base and pgfrom_base are both calculated in 199 * the same way: 200 * if a memory area starts at byte 'base' in page 'pages[i]', 201 * then its address is given as (i << PAGE_SHIFT) + base 202 * Also note: pgfrom_base must be < pgto_base, but the memory areas 203 * they point to may overlap. 204 */ 205 static void 206 _shift_data_right_pages(struct page **pages, size_t pgto_base, 207 size_t pgfrom_base, size_t len) 208 { 209 struct page **pgfrom, **pgto; 210 char *vfrom, *vto; 211 size_t copy; 212 213 BUG_ON(pgto_base <= pgfrom_base); 214 215 pgto_base += len; 216 pgfrom_base += len; 217 218 pgto = pages + (pgto_base >> PAGE_SHIFT); 219 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); 220 221 pgto_base &= ~PAGE_MASK; 222 pgfrom_base &= ~PAGE_MASK; 223 224 do { 225 /* Are any pointers crossing a page boundary? */ 226 if (pgto_base == 0) { 227 pgto_base = PAGE_SIZE; 228 pgto--; 229 } 230 if (pgfrom_base == 0) { 231 pgfrom_base = PAGE_SIZE; 232 pgfrom--; 233 } 234 235 copy = len; 236 if (copy > pgto_base) 237 copy = pgto_base; 238 if (copy > pgfrom_base) 239 copy = pgfrom_base; 240 pgto_base -= copy; 241 pgfrom_base -= copy; 242 243 vto = kmap_atomic(*pgto); 244 if (*pgto != *pgfrom) { 245 vfrom = kmap_atomic(*pgfrom); 246 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); 247 kunmap_atomic(vfrom); 248 } else 249 memmove(vto + pgto_base, vto + pgfrom_base, copy); 250 flush_dcache_page(*pgto); 251 kunmap_atomic(vto); 252 253 } while ((len -= copy) != 0); 254 } 255 256 /** 257 * _copy_to_pages 258 * @pages: array of pages 259 * @pgbase: page vector address of destination 260 * @p: pointer to source data 261 * @len: length 262 * 263 * Copies data from an arbitrary memory location into an array of pages 264 * The copy is assumed to be non-overlapping. 265 */ 266 static void 267 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 268 { 269 struct page **pgto; 270 char *vto; 271 size_t copy; 272 273 pgto = pages + (pgbase >> PAGE_SHIFT); 274 pgbase &= ~PAGE_MASK; 275 276 for (;;) { 277 copy = PAGE_SIZE - pgbase; 278 if (copy > len) 279 copy = len; 280 281 vto = kmap_atomic(*pgto); 282 memcpy(vto + pgbase, p, copy); 283 kunmap_atomic(vto); 284 285 len -= copy; 286 if (len == 0) 287 break; 288 289 pgbase += copy; 290 if (pgbase == PAGE_SIZE) { 291 flush_dcache_page(*pgto); 292 pgbase = 0; 293 pgto++; 294 } 295 p += copy; 296 } 297 flush_dcache_page(*pgto); 298 } 299 300 /** 301 * _copy_from_pages 302 * @p: pointer to destination 303 * @pages: array of pages 304 * @pgbase: offset of source data 305 * @len: length 306 * 307 * Copies data into an arbitrary memory location from an array of pages 308 * The copy is assumed to be non-overlapping. 309 */ 310 void 311 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) 312 { 313 struct page **pgfrom; 314 char *vfrom; 315 size_t copy; 316 317 pgfrom = pages + (pgbase >> PAGE_SHIFT); 318 pgbase &= ~PAGE_MASK; 319 320 do { 321 copy = PAGE_SIZE - pgbase; 322 if (copy > len) 323 copy = len; 324 325 vfrom = kmap_atomic(*pgfrom); 326 memcpy(p, vfrom + pgbase, copy); 327 kunmap_atomic(vfrom); 328 329 pgbase += copy; 330 if (pgbase == PAGE_SIZE) { 331 pgbase = 0; 332 pgfrom++; 333 } 334 p += copy; 335 336 } while ((len -= copy) != 0); 337 } 338 EXPORT_SYMBOL_GPL(_copy_from_pages); 339 340 /** 341 * xdr_shrink_bufhead 342 * @buf: xdr_buf 343 * @len: bytes to remove from buf->head[0] 344 * 345 * Shrinks XDR buffer's header kvec buf->head[0] by 346 * 'len' bytes. The extra data is not lost, but is instead 347 * moved into the inlined pages and/or the tail. 348 */ 349 static void 350 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) 351 { 352 struct kvec *head, *tail; 353 size_t copy, offs; 354 unsigned int pglen = buf->page_len; 355 356 tail = buf->tail; 357 head = buf->head; 358 359 WARN_ON_ONCE(len > head->iov_len); 360 if (len > head->iov_len) 361 len = head->iov_len; 362 363 /* Shift the tail first */ 364 if (tail->iov_len != 0) { 365 if (tail->iov_len > len) { 366 copy = tail->iov_len - len; 367 memmove((char *)tail->iov_base + len, 368 tail->iov_base, copy); 369 } 370 /* Copy from the inlined pages into the tail */ 371 copy = len; 372 if (copy > pglen) 373 copy = pglen; 374 offs = len - copy; 375 if (offs >= tail->iov_len) 376 copy = 0; 377 else if (copy > tail->iov_len - offs) 378 copy = tail->iov_len - offs; 379 if (copy != 0) 380 _copy_from_pages((char *)tail->iov_base + offs, 381 buf->pages, 382 buf->page_base + pglen + offs - len, 383 copy); 384 /* Do we also need to copy data from the head into the tail ? */ 385 if (len > pglen) { 386 offs = copy = len - pglen; 387 if (copy > tail->iov_len) 388 copy = tail->iov_len; 389 memcpy(tail->iov_base, 390 (char *)head->iov_base + 391 head->iov_len - offs, 392 copy); 393 } 394 } 395 /* Now handle pages */ 396 if (pglen != 0) { 397 if (pglen > len) 398 _shift_data_right_pages(buf->pages, 399 buf->page_base + len, 400 buf->page_base, 401 pglen - len); 402 copy = len; 403 if (len > pglen) 404 copy = pglen; 405 _copy_to_pages(buf->pages, buf->page_base, 406 (char *)head->iov_base + head->iov_len - len, 407 copy); 408 } 409 head->iov_len -= len; 410 buf->buflen -= len; 411 /* Have we truncated the message? */ 412 if (buf->len > buf->buflen) 413 buf->len = buf->buflen; 414 } 415 416 /** 417 * xdr_shrink_pagelen 418 * @buf: xdr_buf 419 * @len: bytes to remove from buf->pages 420 * 421 * Shrinks XDR buffer's page array buf->pages by 422 * 'len' bytes. The extra data is not lost, but is instead 423 * moved into the tail. 424 */ 425 static void 426 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) 427 { 428 struct kvec *tail; 429 size_t copy; 430 unsigned int pglen = buf->page_len; 431 unsigned int tailbuf_len; 432 433 tail = buf->tail; 434 BUG_ON (len > pglen); 435 436 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; 437 438 /* Shift the tail first */ 439 if (tailbuf_len != 0) { 440 unsigned int free_space = tailbuf_len - tail->iov_len; 441 442 if (len < free_space) 443 free_space = len; 444 tail->iov_len += free_space; 445 446 copy = len; 447 if (tail->iov_len > len) { 448 char *p = (char *)tail->iov_base + len; 449 memmove(p, tail->iov_base, tail->iov_len - len); 450 } else 451 copy = tail->iov_len; 452 /* Copy from the inlined pages into the tail */ 453 _copy_from_pages((char *)tail->iov_base, 454 buf->pages, buf->page_base + pglen - len, 455 copy); 456 } 457 buf->page_len -= len; 458 buf->buflen -= len; 459 /* Have we truncated the message? */ 460 if (buf->len > buf->buflen) 461 buf->len = buf->buflen; 462 } 463 464 void 465 xdr_shift_buf(struct xdr_buf *buf, size_t len) 466 { 467 xdr_shrink_bufhead(buf, len); 468 } 469 EXPORT_SYMBOL_GPL(xdr_shift_buf); 470 471 /** 472 * xdr_stream_pos - Return the current offset from the start of the xdr_stream 473 * @xdr: pointer to struct xdr_stream 474 */ 475 unsigned int xdr_stream_pos(const struct xdr_stream *xdr) 476 { 477 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; 478 } 479 EXPORT_SYMBOL_GPL(xdr_stream_pos); 480 481 /** 482 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 483 * @xdr: pointer to xdr_stream struct 484 * @buf: pointer to XDR buffer in which to encode data 485 * @p: current pointer inside XDR buffer 486 * 487 * Note: at the moment the RPC client only passes the length of our 488 * scratch buffer in the xdr_buf's header kvec. Previously this 489 * meant we needed to call xdr_adjust_iovec() after encoding the 490 * data. With the new scheme, the xdr_stream manages the details 491 * of the buffer length, and takes care of adjusting the kvec 492 * length for us. 493 */ 494 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 495 { 496 struct kvec *iov = buf->head; 497 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 498 499 xdr_set_scratch_buffer(xdr, NULL, 0); 500 BUG_ON(scratch_len < 0); 501 xdr->buf = buf; 502 xdr->iov = iov; 503 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); 504 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); 505 BUG_ON(iov->iov_len > scratch_len); 506 507 if (p != xdr->p && p != NULL) { 508 size_t len; 509 510 BUG_ON(p < xdr->p || p > xdr->end); 511 len = (char *)p - (char *)xdr->p; 512 xdr->p = p; 513 buf->len += len; 514 iov->iov_len += len; 515 } 516 } 517 EXPORT_SYMBOL_GPL(xdr_init_encode); 518 519 /** 520 * xdr_commit_encode - Ensure all data is written to buffer 521 * @xdr: pointer to xdr_stream 522 * 523 * We handle encoding across page boundaries by giving the caller a 524 * temporary location to write to, then later copying the data into 525 * place; xdr_commit_encode does that copying. 526 * 527 * Normally the caller doesn't need to call this directly, as the 528 * following xdr_reserve_space will do it. But an explicit call may be 529 * required at the end of encoding, or any other time when the xdr_buf 530 * data might be read. 531 */ 532 void xdr_commit_encode(struct xdr_stream *xdr) 533 { 534 int shift = xdr->scratch.iov_len; 535 void *page; 536 537 if (shift == 0) 538 return; 539 page = page_address(*xdr->page_ptr); 540 memcpy(xdr->scratch.iov_base, page, shift); 541 memmove(page, page + shift, (void *)xdr->p - page); 542 xdr->scratch.iov_len = 0; 543 } 544 EXPORT_SYMBOL_GPL(xdr_commit_encode); 545 546 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, 547 size_t nbytes) 548 { 549 static __be32 *p; 550 int space_left; 551 int frag1bytes, frag2bytes; 552 553 if (nbytes > PAGE_SIZE) 554 return NULL; /* Bigger buffers require special handling */ 555 if (xdr->buf->len + nbytes > xdr->buf->buflen) 556 return NULL; /* Sorry, we're totally out of space */ 557 frag1bytes = (xdr->end - xdr->p) << 2; 558 frag2bytes = nbytes - frag1bytes; 559 if (xdr->iov) 560 xdr->iov->iov_len += frag1bytes; 561 else 562 xdr->buf->page_len += frag1bytes; 563 xdr->page_ptr++; 564 xdr->iov = NULL; 565 /* 566 * If the last encode didn't end exactly on a page boundary, the 567 * next one will straddle boundaries. Encode into the next 568 * page, then copy it back later in xdr_commit_encode. We use 569 * the "scratch" iov to track any temporarily unused fragment of 570 * space at the end of the previous buffer: 571 */ 572 xdr->scratch.iov_base = xdr->p; 573 xdr->scratch.iov_len = frag1bytes; 574 p = page_address(*xdr->page_ptr); 575 /* 576 * Note this is where the next encode will start after we've 577 * shifted this one back: 578 */ 579 xdr->p = (void *)p + frag2bytes; 580 space_left = xdr->buf->buflen - xdr->buf->len; 581 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE); 582 xdr->buf->page_len += frag2bytes; 583 xdr->buf->len += nbytes; 584 return p; 585 } 586 587 /** 588 * xdr_reserve_space - Reserve buffer space for sending 589 * @xdr: pointer to xdr_stream 590 * @nbytes: number of bytes to reserve 591 * 592 * Checks that we have enough buffer space to encode 'nbytes' more 593 * bytes of data. If so, update the total xdr_buf length, and 594 * adjust the length of the current kvec. 595 */ 596 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 597 { 598 __be32 *p = xdr->p; 599 __be32 *q; 600 601 xdr_commit_encode(xdr); 602 /* align nbytes on the next 32-bit boundary */ 603 nbytes += 3; 604 nbytes &= ~3; 605 q = p + (nbytes >> 2); 606 if (unlikely(q > xdr->end || q < p)) 607 return xdr_get_next_encode_buffer(xdr, nbytes); 608 xdr->p = q; 609 if (xdr->iov) 610 xdr->iov->iov_len += nbytes; 611 else 612 xdr->buf->page_len += nbytes; 613 xdr->buf->len += nbytes; 614 return p; 615 } 616 EXPORT_SYMBOL_GPL(xdr_reserve_space); 617 618 /** 619 * xdr_truncate_encode - truncate an encode buffer 620 * @xdr: pointer to xdr_stream 621 * @len: new length of buffer 622 * 623 * Truncates the xdr stream, so that xdr->buf->len == len, 624 * and xdr->p points at offset len from the start of the buffer, and 625 * head, tail, and page lengths are adjusted to correspond. 626 * 627 * If this means moving xdr->p to a different buffer, we assume that 628 * that the end pointer should be set to the end of the current page, 629 * except in the case of the head buffer when we assume the head 630 * buffer's current length represents the end of the available buffer. 631 * 632 * This is *not* safe to use on a buffer that already has inlined page 633 * cache pages (as in a zero-copy server read reply), except for the 634 * simple case of truncating from one position in the tail to another. 635 * 636 */ 637 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) 638 { 639 struct xdr_buf *buf = xdr->buf; 640 struct kvec *head = buf->head; 641 struct kvec *tail = buf->tail; 642 int fraglen; 643 int new; 644 645 if (len > buf->len) { 646 WARN_ON_ONCE(1); 647 return; 648 } 649 xdr_commit_encode(xdr); 650 651 fraglen = min_t(int, buf->len - len, tail->iov_len); 652 tail->iov_len -= fraglen; 653 buf->len -= fraglen; 654 if (tail->iov_len) { 655 xdr->p = tail->iov_base + tail->iov_len; 656 WARN_ON_ONCE(!xdr->end); 657 WARN_ON_ONCE(!xdr->iov); 658 return; 659 } 660 WARN_ON_ONCE(fraglen); 661 fraglen = min_t(int, buf->len - len, buf->page_len); 662 buf->page_len -= fraglen; 663 buf->len -= fraglen; 664 665 new = buf->page_base + buf->page_len; 666 667 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); 668 669 if (buf->page_len) { 670 xdr->p = page_address(*xdr->page_ptr); 671 xdr->end = (void *)xdr->p + PAGE_SIZE; 672 xdr->p = (void *)xdr->p + (new % PAGE_SIZE); 673 WARN_ON_ONCE(xdr->iov); 674 return; 675 } 676 if (fraglen) { 677 xdr->end = head->iov_base + head->iov_len; 678 xdr->page_ptr--; 679 } 680 /* (otherwise assume xdr->end is already set) */ 681 head->iov_len = len; 682 buf->len = len; 683 xdr->p = head->iov_base + head->iov_len; 684 xdr->iov = buf->head; 685 } 686 EXPORT_SYMBOL(xdr_truncate_encode); 687 688 /** 689 * xdr_restrict_buflen - decrease available buffer space 690 * @xdr: pointer to xdr_stream 691 * @newbuflen: new maximum number of bytes available 692 * 693 * Adjust our idea of how much space is available in the buffer. 694 * If we've already used too much space in the buffer, returns -1. 695 * If the available space is already smaller than newbuflen, returns 0 696 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen 697 * and ensures xdr->end is set at most offset newbuflen from the start 698 * of the buffer. 699 */ 700 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen) 701 { 702 struct xdr_buf *buf = xdr->buf; 703 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p; 704 int end_offset = buf->len + left_in_this_buf; 705 706 if (newbuflen < 0 || newbuflen < buf->len) 707 return -1; 708 if (newbuflen > buf->buflen) 709 return 0; 710 if (newbuflen < end_offset) 711 xdr->end = (void *)xdr->end + newbuflen - end_offset; 712 buf->buflen = newbuflen; 713 return 0; 714 } 715 EXPORT_SYMBOL(xdr_restrict_buflen); 716 717 /** 718 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 719 * @xdr: pointer to xdr_stream 720 * @pages: list of pages 721 * @base: offset of first byte 722 * @len: length of data in bytes 723 * 724 */ 725 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, 726 unsigned int len) 727 { 728 struct xdr_buf *buf = xdr->buf; 729 struct kvec *iov = buf->tail; 730 buf->pages = pages; 731 buf->page_base = base; 732 buf->page_len = len; 733 734 iov->iov_base = (char *)xdr->p; 735 iov->iov_len = 0; 736 xdr->iov = iov; 737 738 if (len & 3) { 739 unsigned int pad = 4 - (len & 3); 740 741 BUG_ON(xdr->p >= xdr->end); 742 iov->iov_base = (char *)xdr->p + (len & 3); 743 iov->iov_len += pad; 744 len += pad; 745 *xdr->p++ = 0; 746 } 747 buf->buflen += len; 748 buf->len += len; 749 } 750 EXPORT_SYMBOL_GPL(xdr_write_pages); 751 752 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, 753 unsigned int len) 754 { 755 if (len > iov->iov_len) 756 len = iov->iov_len; 757 xdr->p = (__be32*)iov->iov_base; 758 xdr->end = (__be32*)(iov->iov_base + len); 759 xdr->iov = iov; 760 xdr->page_ptr = NULL; 761 } 762 763 static int xdr_set_page_base(struct xdr_stream *xdr, 764 unsigned int base, unsigned int len) 765 { 766 unsigned int pgnr; 767 unsigned int maxlen; 768 unsigned int pgoff; 769 unsigned int pgend; 770 void *kaddr; 771 772 maxlen = xdr->buf->page_len; 773 if (base >= maxlen) 774 return -EINVAL; 775 maxlen -= base; 776 if (len > maxlen) 777 len = maxlen; 778 779 base += xdr->buf->page_base; 780 781 pgnr = base >> PAGE_SHIFT; 782 xdr->page_ptr = &xdr->buf->pages[pgnr]; 783 kaddr = page_address(*xdr->page_ptr); 784 785 pgoff = base & ~PAGE_MASK; 786 xdr->p = (__be32*)(kaddr + pgoff); 787 788 pgend = pgoff + len; 789 if (pgend > PAGE_SIZE) 790 pgend = PAGE_SIZE; 791 xdr->end = (__be32*)(kaddr + pgend); 792 xdr->iov = NULL; 793 return 0; 794 } 795 796 static void xdr_set_next_page(struct xdr_stream *xdr) 797 { 798 unsigned int newbase; 799 800 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; 801 newbase -= xdr->buf->page_base; 802 803 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0) 804 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2); 805 } 806 807 static bool xdr_set_next_buffer(struct xdr_stream *xdr) 808 { 809 if (xdr->page_ptr != NULL) 810 xdr_set_next_page(xdr); 811 else if (xdr->iov == xdr->buf->head) { 812 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0) 813 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2); 814 } 815 return xdr->p != xdr->end; 816 } 817 818 /** 819 * xdr_init_decode - Initialize an xdr_stream for decoding data. 820 * @xdr: pointer to xdr_stream struct 821 * @buf: pointer to XDR buffer from which to decode data 822 * @p: current pointer inside XDR buffer 823 */ 824 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 825 { 826 xdr->buf = buf; 827 xdr->scratch.iov_base = NULL; 828 xdr->scratch.iov_len = 0; 829 xdr->nwords = XDR_QUADLEN(buf->len); 830 if (buf->head[0].iov_len != 0) 831 xdr_set_iov(xdr, buf->head, buf->len); 832 else if (buf->page_len != 0) 833 xdr_set_page_base(xdr, 0, buf->len); 834 else 835 xdr_set_iov(xdr, buf->head, buf->len); 836 if (p != NULL && p > xdr->p && xdr->end >= p) { 837 xdr->nwords -= p - xdr->p; 838 xdr->p = p; 839 } 840 } 841 EXPORT_SYMBOL_GPL(xdr_init_decode); 842 843 /** 844 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages 845 * @xdr: pointer to xdr_stream struct 846 * @buf: pointer to XDR buffer from which to decode data 847 * @pages: list of pages to decode into 848 * @len: length in bytes of buffer in pages 849 */ 850 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, 851 struct page **pages, unsigned int len) 852 { 853 memset(buf, 0, sizeof(*buf)); 854 buf->pages = pages; 855 buf->page_len = len; 856 buf->buflen = len; 857 buf->len = len; 858 xdr_init_decode(xdr, buf, NULL); 859 } 860 EXPORT_SYMBOL_GPL(xdr_init_decode_pages); 861 862 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 863 { 864 unsigned int nwords = XDR_QUADLEN(nbytes); 865 __be32 *p = xdr->p; 866 __be32 *q = p + nwords; 867 868 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) 869 return NULL; 870 xdr->p = q; 871 xdr->nwords -= nwords; 872 return p; 873 } 874 875 /** 876 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. 877 * @xdr: pointer to xdr_stream struct 878 * @buf: pointer to an empty buffer 879 * @buflen: size of 'buf' 880 * 881 * The scratch buffer is used when decoding from an array of pages. 882 * If an xdr_inline_decode() call spans across page boundaries, then 883 * we copy the data into the scratch buffer in order to allow linear 884 * access. 885 */ 886 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) 887 { 888 xdr->scratch.iov_base = buf; 889 xdr->scratch.iov_len = buflen; 890 } 891 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer); 892 893 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) 894 { 895 __be32 *p; 896 char *cpdest = xdr->scratch.iov_base; 897 size_t cplen = (char *)xdr->end - (char *)xdr->p; 898 899 if (nbytes > xdr->scratch.iov_len) 900 return NULL; 901 p = __xdr_inline_decode(xdr, cplen); 902 if (p == NULL) 903 return NULL; 904 memcpy(cpdest, p, cplen); 905 cpdest += cplen; 906 nbytes -= cplen; 907 if (!xdr_set_next_buffer(xdr)) 908 return NULL; 909 p = __xdr_inline_decode(xdr, nbytes); 910 if (p == NULL) 911 return NULL; 912 memcpy(cpdest, p, nbytes); 913 return xdr->scratch.iov_base; 914 } 915 916 /** 917 * xdr_inline_decode - Retrieve XDR data to decode 918 * @xdr: pointer to xdr_stream struct 919 * @nbytes: number of bytes of data to decode 920 * 921 * Check if the input buffer is long enough to enable us to decode 922 * 'nbytes' more bytes of data starting at the current position. 923 * If so return the current pointer, then update the current 924 * pointer position. 925 */ 926 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 927 { 928 __be32 *p; 929 930 if (nbytes == 0) 931 return xdr->p; 932 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) 933 return NULL; 934 p = __xdr_inline_decode(xdr, nbytes); 935 if (p != NULL) 936 return p; 937 return xdr_copy_to_scratch(xdr, nbytes); 938 } 939 EXPORT_SYMBOL_GPL(xdr_inline_decode); 940 941 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) 942 { 943 struct xdr_buf *buf = xdr->buf; 944 struct kvec *iov; 945 unsigned int nwords = XDR_QUADLEN(len); 946 unsigned int cur = xdr_stream_pos(xdr); 947 948 if (xdr->nwords == 0) 949 return 0; 950 /* Realign pages to current pointer position */ 951 iov = buf->head; 952 if (iov->iov_len > cur) { 953 xdr_shrink_bufhead(buf, iov->iov_len - cur); 954 xdr->nwords = XDR_QUADLEN(buf->len - cur); 955 } 956 957 if (nwords > xdr->nwords) { 958 nwords = xdr->nwords; 959 len = nwords << 2; 960 } 961 if (buf->page_len <= len) 962 len = buf->page_len; 963 else if (nwords < xdr->nwords) { 964 /* Truncate page data and move it into the tail */ 965 xdr_shrink_pagelen(buf, buf->page_len - len); 966 xdr->nwords = XDR_QUADLEN(buf->len - cur); 967 } 968 return len; 969 } 970 971 /** 972 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 973 * @xdr: pointer to xdr_stream struct 974 * @len: number of bytes of page data 975 * 976 * Moves data beyond the current pointer position from the XDR head[] buffer 977 * into the page list. Any data that lies beyond current position + "len" 978 * bytes is moved into the XDR tail[]. 979 * 980 * Returns the number of XDR encoded bytes now contained in the pages 981 */ 982 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 983 { 984 struct xdr_buf *buf = xdr->buf; 985 struct kvec *iov; 986 unsigned int nwords; 987 unsigned int end; 988 unsigned int padding; 989 990 len = xdr_align_pages(xdr, len); 991 if (len == 0) 992 return 0; 993 nwords = XDR_QUADLEN(len); 994 padding = (nwords << 2) - len; 995 xdr->iov = iov = buf->tail; 996 /* Compute remaining message length. */ 997 end = ((xdr->nwords - nwords) << 2) + padding; 998 if (end > iov->iov_len) 999 end = iov->iov_len; 1000 1001 /* 1002 * Position current pointer at beginning of tail, and 1003 * set remaining message length. 1004 */ 1005 xdr->p = (__be32 *)((char *)iov->iov_base + padding); 1006 xdr->end = (__be32 *)((char *)iov->iov_base + end); 1007 xdr->page_ptr = NULL; 1008 xdr->nwords = XDR_QUADLEN(end - padding); 1009 return len; 1010 } 1011 EXPORT_SYMBOL_GPL(xdr_read_pages); 1012 1013 /** 1014 * xdr_enter_page - decode data from the XDR page 1015 * @xdr: pointer to xdr_stream struct 1016 * @len: number of bytes of page data 1017 * 1018 * Moves data beyond the current pointer position from the XDR head[] buffer 1019 * into the page list. Any data that lies beyond current position + "len" 1020 * bytes is moved into the XDR tail[]. The current pointer is then 1021 * repositioned at the beginning of the first XDR page. 1022 */ 1023 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 1024 { 1025 len = xdr_align_pages(xdr, len); 1026 /* 1027 * Position current pointer at beginning of tail, and 1028 * set remaining message length. 1029 */ 1030 if (len != 0) 1031 xdr_set_page_base(xdr, 0, len); 1032 } 1033 EXPORT_SYMBOL_GPL(xdr_enter_page); 1034 1035 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 1036 1037 void 1038 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) 1039 { 1040 buf->head[0] = *iov; 1041 buf->tail[0] = empty_iov; 1042 buf->page_len = 0; 1043 buf->buflen = buf->len = iov->iov_len; 1044 } 1045 EXPORT_SYMBOL_GPL(xdr_buf_from_iov); 1046 1047 /** 1048 * xdr_buf_subsegment - set subbuf to a portion of buf 1049 * @buf: an xdr buffer 1050 * @subbuf: the result buffer 1051 * @base: beginning of range in bytes 1052 * @len: length of range in bytes 1053 * 1054 * sets @subbuf to an xdr buffer representing the portion of @buf of 1055 * length @len starting at offset @base. 1056 * 1057 * @buf and @subbuf may be pointers to the same struct xdr_buf. 1058 * 1059 * Returns -1 if base of length are out of bounds. 1060 */ 1061 int 1062 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, 1063 unsigned int base, unsigned int len) 1064 { 1065 subbuf->buflen = subbuf->len = len; 1066 if (base < buf->head[0].iov_len) { 1067 subbuf->head[0].iov_base = buf->head[0].iov_base + base; 1068 subbuf->head[0].iov_len = min_t(unsigned int, len, 1069 buf->head[0].iov_len - base); 1070 len -= subbuf->head[0].iov_len; 1071 base = 0; 1072 } else { 1073 base -= buf->head[0].iov_len; 1074 subbuf->head[0].iov_len = 0; 1075 } 1076 1077 if (base < buf->page_len) { 1078 subbuf->page_len = min(buf->page_len - base, len); 1079 base += buf->page_base; 1080 subbuf->page_base = base & ~PAGE_MASK; 1081 subbuf->pages = &buf->pages[base >> PAGE_SHIFT]; 1082 len -= subbuf->page_len; 1083 base = 0; 1084 } else { 1085 base -= buf->page_len; 1086 subbuf->page_len = 0; 1087 } 1088 1089 if (base < buf->tail[0].iov_len) { 1090 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; 1091 subbuf->tail[0].iov_len = min_t(unsigned int, len, 1092 buf->tail[0].iov_len - base); 1093 len -= subbuf->tail[0].iov_len; 1094 base = 0; 1095 } else { 1096 base -= buf->tail[0].iov_len; 1097 subbuf->tail[0].iov_len = 0; 1098 } 1099 1100 if (base || len) 1101 return -1; 1102 return 0; 1103 } 1104 EXPORT_SYMBOL_GPL(xdr_buf_subsegment); 1105 1106 /** 1107 * xdr_buf_trim - lop at most "len" bytes off the end of "buf" 1108 * @buf: buf to be trimmed 1109 * @len: number of bytes to reduce "buf" by 1110 * 1111 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note 1112 * that it's possible that we'll trim less than that amount if the xdr_buf is 1113 * too small, or if (for instance) it's all in the head and the parser has 1114 * already read too far into it. 1115 */ 1116 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) 1117 { 1118 size_t cur; 1119 unsigned int trim = len; 1120 1121 if (buf->tail[0].iov_len) { 1122 cur = min_t(size_t, buf->tail[0].iov_len, trim); 1123 buf->tail[0].iov_len -= cur; 1124 trim -= cur; 1125 if (!trim) 1126 goto fix_len; 1127 } 1128 1129 if (buf->page_len) { 1130 cur = min_t(unsigned int, buf->page_len, trim); 1131 buf->page_len -= cur; 1132 trim -= cur; 1133 if (!trim) 1134 goto fix_len; 1135 } 1136 1137 if (buf->head[0].iov_len) { 1138 cur = min_t(size_t, buf->head[0].iov_len, trim); 1139 buf->head[0].iov_len -= cur; 1140 trim -= cur; 1141 } 1142 fix_len: 1143 buf->len -= (len - trim); 1144 } 1145 EXPORT_SYMBOL_GPL(xdr_buf_trim); 1146 1147 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 1148 { 1149 unsigned int this_len; 1150 1151 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 1152 memcpy(obj, subbuf->head[0].iov_base, this_len); 1153 len -= this_len; 1154 obj += this_len; 1155 this_len = min_t(unsigned int, len, subbuf->page_len); 1156 if (this_len) 1157 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); 1158 len -= this_len; 1159 obj += this_len; 1160 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 1161 memcpy(obj, subbuf->tail[0].iov_base, this_len); 1162 } 1163 1164 /* obj is assumed to point to allocated memory of size at least len: */ 1165 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 1166 { 1167 struct xdr_buf subbuf; 1168 int status; 1169 1170 status = xdr_buf_subsegment(buf, &subbuf, base, len); 1171 if (status != 0) 1172 return status; 1173 __read_bytes_from_xdr_buf(&subbuf, obj, len); 1174 return 0; 1175 } 1176 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); 1177 1178 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 1179 { 1180 unsigned int this_len; 1181 1182 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 1183 memcpy(subbuf->head[0].iov_base, obj, this_len); 1184 len -= this_len; 1185 obj += this_len; 1186 this_len = min_t(unsigned int, len, subbuf->page_len); 1187 if (this_len) 1188 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); 1189 len -= this_len; 1190 obj += this_len; 1191 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 1192 memcpy(subbuf->tail[0].iov_base, obj, this_len); 1193 } 1194 1195 /* obj is assumed to point to allocated memory of size at least len: */ 1196 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 1197 { 1198 struct xdr_buf subbuf; 1199 int status; 1200 1201 status = xdr_buf_subsegment(buf, &subbuf, base, len); 1202 if (status != 0) 1203 return status; 1204 __write_bytes_to_xdr_buf(&subbuf, obj, len); 1205 return 0; 1206 } 1207 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); 1208 1209 int 1210 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) 1211 { 1212 __be32 raw; 1213 int status; 1214 1215 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 1216 if (status) 1217 return status; 1218 *obj = be32_to_cpu(raw); 1219 return 0; 1220 } 1221 EXPORT_SYMBOL_GPL(xdr_decode_word); 1222 1223 int 1224 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) 1225 { 1226 __be32 raw = cpu_to_be32(obj); 1227 1228 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 1229 } 1230 EXPORT_SYMBOL_GPL(xdr_encode_word); 1231 1232 /* If the netobj starting offset bytes from the start of xdr_buf is contained 1233 * entirely in the head or the tail, set object to point to it; otherwise 1234 * try to find space for it at the end of the tail, copy it there, and 1235 * set obj to point to it. */ 1236 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset) 1237 { 1238 struct xdr_buf subbuf; 1239 1240 if (xdr_decode_word(buf, offset, &obj->len)) 1241 return -EFAULT; 1242 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len)) 1243 return -EFAULT; 1244 1245 /* Is the obj contained entirely in the head? */ 1246 obj->data = subbuf.head[0].iov_base; 1247 if (subbuf.head[0].iov_len == obj->len) 1248 return 0; 1249 /* ..or is the obj contained entirely in the tail? */ 1250 obj->data = subbuf.tail[0].iov_base; 1251 if (subbuf.tail[0].iov_len == obj->len) 1252 return 0; 1253 1254 /* use end of tail as storage for obj: 1255 * (We don't copy to the beginning because then we'd have 1256 * to worry about doing a potentially overlapping copy. 1257 * This assumes the object is at most half the length of the 1258 * tail.) */ 1259 if (obj->len > buf->buflen - buf->len) 1260 return -ENOMEM; 1261 if (buf->tail[0].iov_len != 0) 1262 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len; 1263 else 1264 obj->data = buf->head[0].iov_base + buf->head[0].iov_len; 1265 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len); 1266 return 0; 1267 } 1268 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj); 1269 1270 /* Returns 0 on success, or else a negative error code. */ 1271 static int 1272 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, 1273 struct xdr_array2_desc *desc, int encode) 1274 { 1275 char *elem = NULL, *c; 1276 unsigned int copied = 0, todo, avail_here; 1277 struct page **ppages = NULL; 1278 int err; 1279 1280 if (encode) { 1281 if (xdr_encode_word(buf, base, desc->array_len) != 0) 1282 return -EINVAL; 1283 } else { 1284 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 1285 desc->array_len > desc->array_maxlen || 1286 (unsigned long) base + 4 + desc->array_len * 1287 desc->elem_size > buf->len) 1288 return -EINVAL; 1289 } 1290 base += 4; 1291 1292 if (!desc->xcode) 1293 return 0; 1294 1295 todo = desc->array_len * desc->elem_size; 1296 1297 /* process head */ 1298 if (todo && base < buf->head->iov_len) { 1299 c = buf->head->iov_base + base; 1300 avail_here = min_t(unsigned int, todo, 1301 buf->head->iov_len - base); 1302 todo -= avail_here; 1303 1304 while (avail_here >= desc->elem_size) { 1305 err = desc->xcode(desc, c); 1306 if (err) 1307 goto out; 1308 c += desc->elem_size; 1309 avail_here -= desc->elem_size; 1310 } 1311 if (avail_here) { 1312 if (!elem) { 1313 elem = kmalloc(desc->elem_size, GFP_KERNEL); 1314 err = -ENOMEM; 1315 if (!elem) 1316 goto out; 1317 } 1318 if (encode) { 1319 err = desc->xcode(desc, elem); 1320 if (err) 1321 goto out; 1322 memcpy(c, elem, avail_here); 1323 } else 1324 memcpy(elem, c, avail_here); 1325 copied = avail_here; 1326 } 1327 base = buf->head->iov_len; /* align to start of pages */ 1328 } 1329 1330 /* process pages array */ 1331 base -= buf->head->iov_len; 1332 if (todo && base < buf->page_len) { 1333 unsigned int avail_page; 1334 1335 avail_here = min(todo, buf->page_len - base); 1336 todo -= avail_here; 1337 1338 base += buf->page_base; 1339 ppages = buf->pages + (base >> PAGE_SHIFT); 1340 base &= ~PAGE_MASK; 1341 avail_page = min_t(unsigned int, PAGE_SIZE - base, 1342 avail_here); 1343 c = kmap(*ppages) + base; 1344 1345 while (avail_here) { 1346 avail_here -= avail_page; 1347 if (copied || avail_page < desc->elem_size) { 1348 unsigned int l = min(avail_page, 1349 desc->elem_size - copied); 1350 if (!elem) { 1351 elem = kmalloc(desc->elem_size, 1352 GFP_KERNEL); 1353 err = -ENOMEM; 1354 if (!elem) 1355 goto out; 1356 } 1357 if (encode) { 1358 if (!copied) { 1359 err = desc->xcode(desc, elem); 1360 if (err) 1361 goto out; 1362 } 1363 memcpy(c, elem + copied, l); 1364 copied += l; 1365 if (copied == desc->elem_size) 1366 copied = 0; 1367 } else { 1368 memcpy(elem + copied, c, l); 1369 copied += l; 1370 if (copied == desc->elem_size) { 1371 err = desc->xcode(desc, elem); 1372 if (err) 1373 goto out; 1374 copied = 0; 1375 } 1376 } 1377 avail_page -= l; 1378 c += l; 1379 } 1380 while (avail_page >= desc->elem_size) { 1381 err = desc->xcode(desc, c); 1382 if (err) 1383 goto out; 1384 c += desc->elem_size; 1385 avail_page -= desc->elem_size; 1386 } 1387 if (avail_page) { 1388 unsigned int l = min(avail_page, 1389 desc->elem_size - copied); 1390 if (!elem) { 1391 elem = kmalloc(desc->elem_size, 1392 GFP_KERNEL); 1393 err = -ENOMEM; 1394 if (!elem) 1395 goto out; 1396 } 1397 if (encode) { 1398 if (!copied) { 1399 err = desc->xcode(desc, elem); 1400 if (err) 1401 goto out; 1402 } 1403 memcpy(c, elem + copied, l); 1404 copied += l; 1405 if (copied == desc->elem_size) 1406 copied = 0; 1407 } else { 1408 memcpy(elem + copied, c, l); 1409 copied += l; 1410 if (copied == desc->elem_size) { 1411 err = desc->xcode(desc, elem); 1412 if (err) 1413 goto out; 1414 copied = 0; 1415 } 1416 } 1417 } 1418 if (avail_here) { 1419 kunmap(*ppages); 1420 ppages++; 1421 c = kmap(*ppages); 1422 } 1423 1424 avail_page = min(avail_here, 1425 (unsigned int) PAGE_SIZE); 1426 } 1427 base = buf->page_len; /* align to start of tail */ 1428 } 1429 1430 /* process tail */ 1431 base -= buf->page_len; 1432 if (todo) { 1433 c = buf->tail->iov_base + base; 1434 if (copied) { 1435 unsigned int l = desc->elem_size - copied; 1436 1437 if (encode) 1438 memcpy(c, elem + copied, l); 1439 else { 1440 memcpy(elem + copied, c, l); 1441 err = desc->xcode(desc, elem); 1442 if (err) 1443 goto out; 1444 } 1445 todo -= l; 1446 c += l; 1447 } 1448 while (todo) { 1449 err = desc->xcode(desc, c); 1450 if (err) 1451 goto out; 1452 c += desc->elem_size; 1453 todo -= desc->elem_size; 1454 } 1455 } 1456 err = 0; 1457 1458 out: 1459 kfree(elem); 1460 if (ppages) 1461 kunmap(*ppages); 1462 return err; 1463 } 1464 1465 int 1466 xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 1467 struct xdr_array2_desc *desc) 1468 { 1469 if (base >= buf->len) 1470 return -EINVAL; 1471 1472 return xdr_xcode_array2(buf, base, desc, 0); 1473 } 1474 EXPORT_SYMBOL_GPL(xdr_decode_array2); 1475 1476 int 1477 xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 1478 struct xdr_array2_desc *desc) 1479 { 1480 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 1481 buf->head->iov_len + buf->page_len + buf->tail->iov_len) 1482 return -EINVAL; 1483 1484 return xdr_xcode_array2(buf, base, desc, 1); 1485 } 1486 EXPORT_SYMBOL_GPL(xdr_encode_array2); 1487 1488 int 1489 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1490 int (*actor)(struct scatterlist *, void *), void *data) 1491 { 1492 int i, ret = 0; 1493 unsigned int page_len, thislen, page_offset; 1494 struct scatterlist sg[1]; 1495 1496 sg_init_table(sg, 1); 1497 1498 if (offset >= buf->head[0].iov_len) { 1499 offset -= buf->head[0].iov_len; 1500 } else { 1501 thislen = buf->head[0].iov_len - offset; 1502 if (thislen > len) 1503 thislen = len; 1504 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); 1505 ret = actor(sg, data); 1506 if (ret) 1507 goto out; 1508 offset = 0; 1509 len -= thislen; 1510 } 1511 if (len == 0) 1512 goto out; 1513 1514 if (offset >= buf->page_len) { 1515 offset -= buf->page_len; 1516 } else { 1517 page_len = buf->page_len - offset; 1518 if (page_len > len) 1519 page_len = len; 1520 len -= page_len; 1521 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1); 1522 i = (offset + buf->page_base) >> PAGE_SHIFT; 1523 thislen = PAGE_SIZE - page_offset; 1524 do { 1525 if (thislen > page_len) 1526 thislen = page_len; 1527 sg_set_page(sg, buf->pages[i], thislen, page_offset); 1528 ret = actor(sg, data); 1529 if (ret) 1530 goto out; 1531 page_len -= thislen; 1532 i++; 1533 page_offset = 0; 1534 thislen = PAGE_SIZE; 1535 } while (page_len != 0); 1536 offset = 0; 1537 } 1538 if (len == 0) 1539 goto out; 1540 if (offset < buf->tail[0].iov_len) { 1541 thislen = buf->tail[0].iov_len - offset; 1542 if (thislen > len) 1543 thislen = len; 1544 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); 1545 ret = actor(sg, data); 1546 len -= thislen; 1547 } 1548 if (len != 0) 1549 ret = -EINVAL; 1550 out: 1551 return ret; 1552 } 1553 EXPORT_SYMBOL_GPL(xdr_process_buf); 1554 1555 /** 1556 * xdr_stream_decode_opaque - Decode variable length opaque 1557 * @xdr: pointer to xdr_stream 1558 * @ptr: location to store opaque data 1559 * @size: size of storage buffer @ptr 1560 * 1561 * Return values: 1562 * On success, returns size of object stored in *@ptr 1563 * %-EBADMSG on XDR buffer overflow 1564 * %-EMSGSIZE on overflow of storage buffer @ptr 1565 */ 1566 ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size) 1567 { 1568 ssize_t ret; 1569 void *p; 1570 1571 ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 1572 if (ret <= 0) 1573 return ret; 1574 memcpy(ptr, p, ret); 1575 return ret; 1576 } 1577 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque); 1578 1579 /** 1580 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque 1581 * @xdr: pointer to xdr_stream 1582 * @ptr: location to store pointer to opaque data 1583 * @maxlen: maximum acceptable object size 1584 * @gfp_flags: GFP mask to use 1585 * 1586 * Return values: 1587 * On success, returns size of object stored in *@ptr 1588 * %-EBADMSG on XDR buffer overflow 1589 * %-EMSGSIZE if the size of the object would exceed @maxlen 1590 * %-ENOMEM on memory allocation failure 1591 */ 1592 ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, 1593 size_t maxlen, gfp_t gfp_flags) 1594 { 1595 ssize_t ret; 1596 void *p; 1597 1598 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 1599 if (ret > 0) { 1600 *ptr = kmemdup(p, ret, gfp_flags); 1601 if (*ptr != NULL) 1602 return ret; 1603 ret = -ENOMEM; 1604 } 1605 *ptr = NULL; 1606 return ret; 1607 } 1608 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup); 1609 1610 /** 1611 * xdr_stream_decode_string - Decode variable length string 1612 * @xdr: pointer to xdr_stream 1613 * @str: location to store string 1614 * @size: size of storage buffer @str 1615 * 1616 * Return values: 1617 * On success, returns length of NUL-terminated string stored in *@str 1618 * %-EBADMSG on XDR buffer overflow 1619 * %-EMSGSIZE on overflow of storage buffer @str 1620 */ 1621 ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size) 1622 { 1623 ssize_t ret; 1624 void *p; 1625 1626 ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 1627 if (ret > 0) { 1628 memcpy(str, p, ret); 1629 str[ret] = '\0'; 1630 return strlen(str); 1631 } 1632 *str = '\0'; 1633 return ret; 1634 } 1635 EXPORT_SYMBOL_GPL(xdr_stream_decode_string); 1636 1637 /** 1638 * xdr_stream_decode_string_dup - Decode and duplicate variable length string 1639 * @xdr: pointer to xdr_stream 1640 * @str: location to store pointer to string 1641 * @maxlen: maximum acceptable string length 1642 * @gfp_flags: GFP mask to use 1643 * 1644 * Return values: 1645 * On success, returns length of NUL-terminated string stored in *@ptr 1646 * %-EBADMSG on XDR buffer overflow 1647 * %-EMSGSIZE if the size of the string would exceed @maxlen 1648 * %-ENOMEM on memory allocation failure 1649 */ 1650 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, 1651 size_t maxlen, gfp_t gfp_flags) 1652 { 1653 void *p; 1654 ssize_t ret; 1655 1656 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 1657 if (ret > 0) { 1658 char *s = kmalloc(ret + 1, gfp_flags); 1659 if (s != NULL) { 1660 memcpy(s, p, ret); 1661 s[ret] = '\0'; 1662 *str = s; 1663 return strlen(s); 1664 } 1665 ret = -ENOMEM; 1666 } 1667 *str = NULL; 1668 return ret; 1669 } 1670 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup); 1671