1 /* 2 * linux/net/sunrpc/xdr.c 3 * 4 * Generic XDR support. 5 * 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/types.h> 12 #include <linux/string.h> 13 #include <linux/kernel.h> 14 #include <linux/pagemap.h> 15 #include <linux/errno.h> 16 #include <linux/sunrpc/xdr.h> 17 #include <linux/sunrpc/msg_prot.h> 18 19 /* 20 * XDR functions for basic NFS types 21 */ 22 __be32 * 23 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) 24 { 25 unsigned int quadlen = XDR_QUADLEN(obj->len); 26 27 p[quadlen] = 0; /* zero trailing bytes */ 28 *p++ = cpu_to_be32(obj->len); 29 memcpy(p, obj->data, obj->len); 30 return p + XDR_QUADLEN(obj->len); 31 } 32 EXPORT_SYMBOL_GPL(xdr_encode_netobj); 33 34 __be32 * 35 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) 36 { 37 unsigned int len; 38 39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) 40 return NULL; 41 obj->len = len; 42 obj->data = (u8 *) p; 43 return p + XDR_QUADLEN(len); 44 } 45 EXPORT_SYMBOL_GPL(xdr_decode_netobj); 46 47 /** 48 * xdr_encode_opaque_fixed - Encode fixed length opaque data 49 * @p: pointer to current position in XDR buffer. 50 * @ptr: pointer to data to encode (or NULL) 51 * @nbytes: size of data. 52 * 53 * Copy the array of data of length nbytes at ptr to the XDR buffer 54 * at position p, then align to the next 32-bit boundary by padding 55 * with zero bytes (see RFC1832). 56 * Note: if ptr is NULL, only the padding is performed. 57 * 58 * Returns the updated current XDR buffer position 59 * 60 */ 61 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) 62 { 63 if (likely(nbytes != 0)) { 64 unsigned int quadlen = XDR_QUADLEN(nbytes); 65 unsigned int padding = (quadlen << 2) - nbytes; 66 67 if (ptr != NULL) 68 memcpy(p, ptr, nbytes); 69 if (padding != 0) 70 memset((char *)p + nbytes, 0, padding); 71 p += quadlen; 72 } 73 return p; 74 } 75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); 76 77 /** 78 * xdr_encode_opaque - Encode variable length opaque data 79 * @p: pointer to current position in XDR buffer. 80 * @ptr: pointer to data to encode (or NULL) 81 * @nbytes: size of data. 82 * 83 * Returns the updated current XDR buffer position 84 */ 85 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) 86 { 87 *p++ = cpu_to_be32(nbytes); 88 return xdr_encode_opaque_fixed(p, ptr, nbytes); 89 } 90 EXPORT_SYMBOL_GPL(xdr_encode_opaque); 91 92 __be32 * 93 xdr_encode_string(__be32 *p, const char *string) 94 { 95 return xdr_encode_array(p, string, strlen(string)); 96 } 97 EXPORT_SYMBOL_GPL(xdr_encode_string); 98 99 __be32 * 100 xdr_decode_string_inplace(__be32 *p, char **sp, 101 unsigned int *lenp, unsigned int maxlen) 102 { 103 u32 len; 104 105 len = be32_to_cpu(*p++); 106 if (len > maxlen) 107 return NULL; 108 *lenp = len; 109 *sp = (char *) p; 110 return p + XDR_QUADLEN(len); 111 } 112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); 113 114 /** 115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf 116 * @buf: XDR buffer where string resides 117 * @len: length of string, in bytes 118 * 119 */ 120 void 121 xdr_terminate_string(struct xdr_buf *buf, const u32 len) 122 { 123 char *kaddr; 124 125 kaddr = kmap_atomic(buf->pages[0]); 126 kaddr[buf->page_base + len] = '\0'; 127 kunmap_atomic(kaddr); 128 } 129 EXPORT_SYMBOL_GPL(xdr_terminate_string); 130 131 void 132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 133 struct page **pages, unsigned int base, unsigned int len) 134 { 135 struct kvec *head = xdr->head; 136 struct kvec *tail = xdr->tail; 137 char *buf = (char *)head->iov_base; 138 unsigned int buflen = head->iov_len; 139 140 head->iov_len = offset; 141 142 xdr->pages = pages; 143 xdr->page_base = base; 144 xdr->page_len = len; 145 146 tail->iov_base = buf + offset; 147 tail->iov_len = buflen - offset; 148 149 xdr->buflen += len; 150 } 151 EXPORT_SYMBOL_GPL(xdr_inline_pages); 152 153 /* 154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 155 */ 156 157 /** 158 * _shift_data_right_pages 159 * @pages: vector of pages containing both the source and dest memory area. 160 * @pgto_base: page vector address of destination 161 * @pgfrom_base: page vector address of source 162 * @len: number of bytes to copy 163 * 164 * Note: the addresses pgto_base and pgfrom_base are both calculated in 165 * the same way: 166 * if a memory area starts at byte 'base' in page 'pages[i]', 167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 168 * Also note: pgfrom_base must be < pgto_base, but the memory areas 169 * they point to may overlap. 170 */ 171 static void 172 _shift_data_right_pages(struct page **pages, size_t pgto_base, 173 size_t pgfrom_base, size_t len) 174 { 175 struct page **pgfrom, **pgto; 176 char *vfrom, *vto; 177 size_t copy; 178 179 BUG_ON(pgto_base <= pgfrom_base); 180 181 pgto_base += len; 182 pgfrom_base += len; 183 184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); 185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); 186 187 pgto_base &= ~PAGE_CACHE_MASK; 188 pgfrom_base &= ~PAGE_CACHE_MASK; 189 190 do { 191 /* Are any pointers crossing a page boundary? */ 192 if (pgto_base == 0) { 193 pgto_base = PAGE_CACHE_SIZE; 194 pgto--; 195 } 196 if (pgfrom_base == 0) { 197 pgfrom_base = PAGE_CACHE_SIZE; 198 pgfrom--; 199 } 200 201 copy = len; 202 if (copy > pgto_base) 203 copy = pgto_base; 204 if (copy > pgfrom_base) 205 copy = pgfrom_base; 206 pgto_base -= copy; 207 pgfrom_base -= copy; 208 209 vto = kmap_atomic(*pgto); 210 vfrom = kmap_atomic(*pgfrom); 211 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 212 flush_dcache_page(*pgto); 213 kunmap_atomic(vfrom); 214 kunmap_atomic(vto); 215 216 } while ((len -= copy) != 0); 217 } 218 219 /** 220 * _copy_to_pages 221 * @pages: array of pages 222 * @pgbase: page vector address of destination 223 * @p: pointer to source data 224 * @len: length 225 * 226 * Copies data from an arbitrary memory location into an array of pages 227 * The copy is assumed to be non-overlapping. 228 */ 229 static void 230 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 231 { 232 struct page **pgto; 233 char *vto; 234 size_t copy; 235 236 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); 237 pgbase &= ~PAGE_CACHE_MASK; 238 239 for (;;) { 240 copy = PAGE_CACHE_SIZE - pgbase; 241 if (copy > len) 242 copy = len; 243 244 vto = kmap_atomic(*pgto); 245 memcpy(vto + pgbase, p, copy); 246 kunmap_atomic(vto); 247 248 len -= copy; 249 if (len == 0) 250 break; 251 252 pgbase += copy; 253 if (pgbase == PAGE_CACHE_SIZE) { 254 flush_dcache_page(*pgto); 255 pgbase = 0; 256 pgto++; 257 } 258 p += copy; 259 } 260 flush_dcache_page(*pgto); 261 } 262 263 /** 264 * _copy_from_pages 265 * @p: pointer to destination 266 * @pages: array of pages 267 * @pgbase: offset of source data 268 * @len: length 269 * 270 * Copies data into an arbitrary memory location from an array of pages 271 * The copy is assumed to be non-overlapping. 272 */ 273 void 274 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) 275 { 276 struct page **pgfrom; 277 char *vfrom; 278 size_t copy; 279 280 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); 281 pgbase &= ~PAGE_CACHE_MASK; 282 283 do { 284 copy = PAGE_CACHE_SIZE - pgbase; 285 if (copy > len) 286 copy = len; 287 288 vfrom = kmap_atomic(*pgfrom); 289 memcpy(p, vfrom + pgbase, copy); 290 kunmap_atomic(vfrom); 291 292 pgbase += copy; 293 if (pgbase == PAGE_CACHE_SIZE) { 294 pgbase = 0; 295 pgfrom++; 296 } 297 p += copy; 298 299 } while ((len -= copy) != 0); 300 } 301 EXPORT_SYMBOL_GPL(_copy_from_pages); 302 303 /** 304 * xdr_shrink_bufhead 305 * @buf: xdr_buf 306 * @len: bytes to remove from buf->head[0] 307 * 308 * Shrinks XDR buffer's header kvec buf->head[0] by 309 * 'len' bytes. The extra data is not lost, but is instead 310 * moved into the inlined pages and/or the tail. 311 */ 312 static void 313 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) 314 { 315 struct kvec *head, *tail; 316 size_t copy, offs; 317 unsigned int pglen = buf->page_len; 318 319 tail = buf->tail; 320 head = buf->head; 321 322 WARN_ON_ONCE(len > head->iov_len); 323 if (len > head->iov_len) 324 len = head->iov_len; 325 326 /* Shift the tail first */ 327 if (tail->iov_len != 0) { 328 if (tail->iov_len > len) { 329 copy = tail->iov_len - len; 330 memmove((char *)tail->iov_base + len, 331 tail->iov_base, copy); 332 } 333 /* Copy from the inlined pages into the tail */ 334 copy = len; 335 if (copy > pglen) 336 copy = pglen; 337 offs = len - copy; 338 if (offs >= tail->iov_len) 339 copy = 0; 340 else if (copy > tail->iov_len - offs) 341 copy = tail->iov_len - offs; 342 if (copy != 0) 343 _copy_from_pages((char *)tail->iov_base + offs, 344 buf->pages, 345 buf->page_base + pglen + offs - len, 346 copy); 347 /* Do we also need to copy data from the head into the tail ? */ 348 if (len > pglen) { 349 offs = copy = len - pglen; 350 if (copy > tail->iov_len) 351 copy = tail->iov_len; 352 memcpy(tail->iov_base, 353 (char *)head->iov_base + 354 head->iov_len - offs, 355 copy); 356 } 357 } 358 /* Now handle pages */ 359 if (pglen != 0) { 360 if (pglen > len) 361 _shift_data_right_pages(buf->pages, 362 buf->page_base + len, 363 buf->page_base, 364 pglen - len); 365 copy = len; 366 if (len > pglen) 367 copy = pglen; 368 _copy_to_pages(buf->pages, buf->page_base, 369 (char *)head->iov_base + head->iov_len - len, 370 copy); 371 } 372 head->iov_len -= len; 373 buf->buflen -= len; 374 /* Have we truncated the message? */ 375 if (buf->len > buf->buflen) 376 buf->len = buf->buflen; 377 } 378 379 /** 380 * xdr_shrink_pagelen 381 * @buf: xdr_buf 382 * @len: bytes to remove from buf->pages 383 * 384 * Shrinks XDR buffer's page array buf->pages by 385 * 'len' bytes. The extra data is not lost, but is instead 386 * moved into the tail. 387 */ 388 static void 389 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) 390 { 391 struct kvec *tail; 392 size_t copy; 393 unsigned int pglen = buf->page_len; 394 unsigned int tailbuf_len; 395 396 tail = buf->tail; 397 BUG_ON (len > pglen); 398 399 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; 400 401 /* Shift the tail first */ 402 if (tailbuf_len != 0) { 403 unsigned int free_space = tailbuf_len - tail->iov_len; 404 405 if (len < free_space) 406 free_space = len; 407 tail->iov_len += free_space; 408 409 copy = len; 410 if (tail->iov_len > len) { 411 char *p = (char *)tail->iov_base + len; 412 memmove(p, tail->iov_base, tail->iov_len - len); 413 } else 414 copy = tail->iov_len; 415 /* Copy from the inlined pages into the tail */ 416 _copy_from_pages((char *)tail->iov_base, 417 buf->pages, buf->page_base + pglen - len, 418 copy); 419 } 420 buf->page_len -= len; 421 buf->buflen -= len; 422 /* Have we truncated the message? */ 423 if (buf->len > buf->buflen) 424 buf->len = buf->buflen; 425 } 426 427 void 428 xdr_shift_buf(struct xdr_buf *buf, size_t len) 429 { 430 xdr_shrink_bufhead(buf, len); 431 } 432 EXPORT_SYMBOL_GPL(xdr_shift_buf); 433 434 /** 435 * xdr_stream_pos - Return the current offset from the start of the xdr_stream 436 * @xdr: pointer to struct xdr_stream 437 */ 438 unsigned int xdr_stream_pos(const struct xdr_stream *xdr) 439 { 440 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; 441 } 442 EXPORT_SYMBOL_GPL(xdr_stream_pos); 443 444 /** 445 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 446 * @xdr: pointer to xdr_stream struct 447 * @buf: pointer to XDR buffer in which to encode data 448 * @p: current pointer inside XDR buffer 449 * 450 * Note: at the moment the RPC client only passes the length of our 451 * scratch buffer in the xdr_buf's header kvec. Previously this 452 * meant we needed to call xdr_adjust_iovec() after encoding the 453 * data. With the new scheme, the xdr_stream manages the details 454 * of the buffer length, and takes care of adjusting the kvec 455 * length for us. 456 */ 457 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 458 { 459 struct kvec *iov = buf->head; 460 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 461 462 BUG_ON(scratch_len < 0); 463 xdr->buf = buf; 464 xdr->iov = iov; 465 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); 466 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); 467 BUG_ON(iov->iov_len > scratch_len); 468 469 if (p != xdr->p && p != NULL) { 470 size_t len; 471 472 BUG_ON(p < xdr->p || p > xdr->end); 473 len = (char *)p - (char *)xdr->p; 474 xdr->p = p; 475 buf->len += len; 476 iov->iov_len += len; 477 } 478 } 479 EXPORT_SYMBOL_GPL(xdr_init_encode); 480 481 /** 482 * xdr_reserve_space - Reserve buffer space for sending 483 * @xdr: pointer to xdr_stream 484 * @nbytes: number of bytes to reserve 485 * 486 * Checks that we have enough buffer space to encode 'nbytes' more 487 * bytes of data. If so, update the total xdr_buf length, and 488 * adjust the length of the current kvec. 489 */ 490 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 491 { 492 __be32 *p = xdr->p; 493 __be32 *q; 494 495 /* align nbytes on the next 32-bit boundary */ 496 nbytes += 3; 497 nbytes &= ~3; 498 q = p + (nbytes >> 2); 499 if (unlikely(q > xdr->end || q < p)) 500 return NULL; 501 xdr->p = q; 502 xdr->iov->iov_len += nbytes; 503 xdr->buf->len += nbytes; 504 return p; 505 } 506 EXPORT_SYMBOL_GPL(xdr_reserve_space); 507 508 /** 509 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 510 * @xdr: pointer to xdr_stream 511 * @pages: list of pages 512 * @base: offset of first byte 513 * @len: length of data in bytes 514 * 515 */ 516 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, 517 unsigned int len) 518 { 519 struct xdr_buf *buf = xdr->buf; 520 struct kvec *iov = buf->tail; 521 buf->pages = pages; 522 buf->page_base = base; 523 buf->page_len = len; 524 525 iov->iov_base = (char *)xdr->p; 526 iov->iov_len = 0; 527 xdr->iov = iov; 528 529 if (len & 3) { 530 unsigned int pad = 4 - (len & 3); 531 532 BUG_ON(xdr->p >= xdr->end); 533 iov->iov_base = (char *)xdr->p + (len & 3); 534 iov->iov_len += pad; 535 len += pad; 536 *xdr->p++ = 0; 537 } 538 buf->buflen += len; 539 buf->len += len; 540 } 541 EXPORT_SYMBOL_GPL(xdr_write_pages); 542 543 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, 544 unsigned int len) 545 { 546 if (len > iov->iov_len) 547 len = iov->iov_len; 548 xdr->p = (__be32*)iov->iov_base; 549 xdr->end = (__be32*)(iov->iov_base + len); 550 xdr->iov = iov; 551 xdr->page_ptr = NULL; 552 } 553 554 static int xdr_set_page_base(struct xdr_stream *xdr, 555 unsigned int base, unsigned int len) 556 { 557 unsigned int pgnr; 558 unsigned int maxlen; 559 unsigned int pgoff; 560 unsigned int pgend; 561 void *kaddr; 562 563 maxlen = xdr->buf->page_len; 564 if (base >= maxlen) 565 return -EINVAL; 566 maxlen -= base; 567 if (len > maxlen) 568 len = maxlen; 569 570 base += xdr->buf->page_base; 571 572 pgnr = base >> PAGE_SHIFT; 573 xdr->page_ptr = &xdr->buf->pages[pgnr]; 574 kaddr = page_address(*xdr->page_ptr); 575 576 pgoff = base & ~PAGE_MASK; 577 xdr->p = (__be32*)(kaddr + pgoff); 578 579 pgend = pgoff + len; 580 if (pgend > PAGE_SIZE) 581 pgend = PAGE_SIZE; 582 xdr->end = (__be32*)(kaddr + pgend); 583 xdr->iov = NULL; 584 return 0; 585 } 586 587 static void xdr_set_next_page(struct xdr_stream *xdr) 588 { 589 unsigned int newbase; 590 591 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; 592 newbase -= xdr->buf->page_base; 593 594 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0) 595 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len); 596 } 597 598 static bool xdr_set_next_buffer(struct xdr_stream *xdr) 599 { 600 if (xdr->page_ptr != NULL) 601 xdr_set_next_page(xdr); 602 else if (xdr->iov == xdr->buf->head) { 603 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0) 604 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len); 605 } 606 return xdr->p != xdr->end; 607 } 608 609 /** 610 * xdr_init_decode - Initialize an xdr_stream for decoding data. 611 * @xdr: pointer to xdr_stream struct 612 * @buf: pointer to XDR buffer from which to decode data 613 * @p: current pointer inside XDR buffer 614 */ 615 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 616 { 617 xdr->buf = buf; 618 xdr->scratch.iov_base = NULL; 619 xdr->scratch.iov_len = 0; 620 xdr->nwords = XDR_QUADLEN(buf->len); 621 if (buf->head[0].iov_len != 0) 622 xdr_set_iov(xdr, buf->head, buf->len); 623 else if (buf->page_len != 0) 624 xdr_set_page_base(xdr, 0, buf->len); 625 if (p != NULL && p > xdr->p && xdr->end >= p) { 626 xdr->nwords -= p - xdr->p; 627 xdr->p = p; 628 } 629 } 630 EXPORT_SYMBOL_GPL(xdr_init_decode); 631 632 /** 633 * xdr_init_decode - Initialize an xdr_stream for decoding data. 634 * @xdr: pointer to xdr_stream struct 635 * @buf: pointer to XDR buffer from which to decode data 636 * @pages: list of pages to decode into 637 * @len: length in bytes of buffer in pages 638 */ 639 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, 640 struct page **pages, unsigned int len) 641 { 642 memset(buf, 0, sizeof(*buf)); 643 buf->pages = pages; 644 buf->page_len = len; 645 buf->buflen = len; 646 buf->len = len; 647 xdr_init_decode(xdr, buf, NULL); 648 } 649 EXPORT_SYMBOL_GPL(xdr_init_decode_pages); 650 651 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 652 { 653 unsigned int nwords = XDR_QUADLEN(nbytes); 654 __be32 *p = xdr->p; 655 __be32 *q = p + nwords; 656 657 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) 658 return NULL; 659 xdr->p = q; 660 xdr->nwords -= nwords; 661 return p; 662 } 663 664 /** 665 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. 666 * @xdr: pointer to xdr_stream struct 667 * @buf: pointer to an empty buffer 668 * @buflen: size of 'buf' 669 * 670 * The scratch buffer is used when decoding from an array of pages. 671 * If an xdr_inline_decode() call spans across page boundaries, then 672 * we copy the data into the scratch buffer in order to allow linear 673 * access. 674 */ 675 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) 676 { 677 xdr->scratch.iov_base = buf; 678 xdr->scratch.iov_len = buflen; 679 } 680 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer); 681 682 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) 683 { 684 __be32 *p; 685 void *cpdest = xdr->scratch.iov_base; 686 size_t cplen = (char *)xdr->end - (char *)xdr->p; 687 688 if (nbytes > xdr->scratch.iov_len) 689 return NULL; 690 memcpy(cpdest, xdr->p, cplen); 691 cpdest += cplen; 692 nbytes -= cplen; 693 if (!xdr_set_next_buffer(xdr)) 694 return NULL; 695 p = __xdr_inline_decode(xdr, nbytes); 696 if (p == NULL) 697 return NULL; 698 memcpy(cpdest, p, nbytes); 699 return xdr->scratch.iov_base; 700 } 701 702 /** 703 * xdr_inline_decode - Retrieve XDR data to decode 704 * @xdr: pointer to xdr_stream struct 705 * @nbytes: number of bytes of data to decode 706 * 707 * Check if the input buffer is long enough to enable us to decode 708 * 'nbytes' more bytes of data starting at the current position. 709 * If so return the current pointer, then update the current 710 * pointer position. 711 */ 712 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 713 { 714 __be32 *p; 715 716 if (nbytes == 0) 717 return xdr->p; 718 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) 719 return NULL; 720 p = __xdr_inline_decode(xdr, nbytes); 721 if (p != NULL) 722 return p; 723 return xdr_copy_to_scratch(xdr, nbytes); 724 } 725 EXPORT_SYMBOL_GPL(xdr_inline_decode); 726 727 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) 728 { 729 struct xdr_buf *buf = xdr->buf; 730 struct kvec *iov; 731 unsigned int nwords = XDR_QUADLEN(len); 732 unsigned int cur = xdr_stream_pos(xdr); 733 734 if (xdr->nwords == 0) 735 return 0; 736 /* Realign pages to current pointer position */ 737 iov = buf->head; 738 if (iov->iov_len > cur) { 739 xdr_shrink_bufhead(buf, iov->iov_len - cur); 740 xdr->nwords = XDR_QUADLEN(buf->len - cur); 741 } 742 743 if (nwords > xdr->nwords) { 744 nwords = xdr->nwords; 745 len = nwords << 2; 746 } 747 if (buf->page_len <= len) 748 len = buf->page_len; 749 else if (nwords < xdr->nwords) { 750 /* Truncate page data and move it into the tail */ 751 xdr_shrink_pagelen(buf, buf->page_len - len); 752 xdr->nwords = XDR_QUADLEN(buf->len - cur); 753 } 754 return len; 755 } 756 757 /** 758 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 759 * @xdr: pointer to xdr_stream struct 760 * @len: number of bytes of page data 761 * 762 * Moves data beyond the current pointer position from the XDR head[] buffer 763 * into the page list. Any data that lies beyond current position + "len" 764 * bytes is moved into the XDR tail[]. 765 * 766 * Returns the number of XDR encoded bytes now contained in the pages 767 */ 768 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 769 { 770 struct xdr_buf *buf = xdr->buf; 771 struct kvec *iov; 772 unsigned int nwords; 773 unsigned int end; 774 unsigned int padding; 775 776 len = xdr_align_pages(xdr, len); 777 if (len == 0) 778 return 0; 779 nwords = XDR_QUADLEN(len); 780 padding = (nwords << 2) - len; 781 xdr->iov = iov = buf->tail; 782 /* Compute remaining message length. */ 783 end = ((xdr->nwords - nwords) << 2) + padding; 784 if (end > iov->iov_len) 785 end = iov->iov_len; 786 787 /* 788 * Position current pointer at beginning of tail, and 789 * set remaining message length. 790 */ 791 xdr->p = (__be32 *)((char *)iov->iov_base + padding); 792 xdr->end = (__be32 *)((char *)iov->iov_base + end); 793 xdr->page_ptr = NULL; 794 xdr->nwords = XDR_QUADLEN(end - padding); 795 return len; 796 } 797 EXPORT_SYMBOL_GPL(xdr_read_pages); 798 799 /** 800 * xdr_enter_page - decode data from the XDR page 801 * @xdr: pointer to xdr_stream struct 802 * @len: number of bytes of page data 803 * 804 * Moves data beyond the current pointer position from the XDR head[] buffer 805 * into the page list. Any data that lies beyond current position + "len" 806 * bytes is moved into the XDR tail[]. The current pointer is then 807 * repositioned at the beginning of the first XDR page. 808 */ 809 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 810 { 811 len = xdr_align_pages(xdr, len); 812 /* 813 * Position current pointer at beginning of tail, and 814 * set remaining message length. 815 */ 816 if (len != 0) 817 xdr_set_page_base(xdr, 0, len); 818 } 819 EXPORT_SYMBOL_GPL(xdr_enter_page); 820 821 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 822 823 void 824 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) 825 { 826 buf->head[0] = *iov; 827 buf->tail[0] = empty_iov; 828 buf->page_len = 0; 829 buf->buflen = buf->len = iov->iov_len; 830 } 831 EXPORT_SYMBOL_GPL(xdr_buf_from_iov); 832 833 /* Sets subbuf to the portion of buf of length len beginning base bytes 834 * from the start of buf. Returns -1 if base of length are out of bounds. */ 835 int 836 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, 837 unsigned int base, unsigned int len) 838 { 839 subbuf->buflen = subbuf->len = len; 840 if (base < buf->head[0].iov_len) { 841 subbuf->head[0].iov_base = buf->head[0].iov_base + base; 842 subbuf->head[0].iov_len = min_t(unsigned int, len, 843 buf->head[0].iov_len - base); 844 len -= subbuf->head[0].iov_len; 845 base = 0; 846 } else { 847 subbuf->head[0].iov_base = NULL; 848 subbuf->head[0].iov_len = 0; 849 base -= buf->head[0].iov_len; 850 } 851 852 if (base < buf->page_len) { 853 subbuf->page_len = min(buf->page_len - base, len); 854 base += buf->page_base; 855 subbuf->page_base = base & ~PAGE_CACHE_MASK; 856 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; 857 len -= subbuf->page_len; 858 base = 0; 859 } else { 860 base -= buf->page_len; 861 subbuf->page_len = 0; 862 } 863 864 if (base < buf->tail[0].iov_len) { 865 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; 866 subbuf->tail[0].iov_len = min_t(unsigned int, len, 867 buf->tail[0].iov_len - base); 868 len -= subbuf->tail[0].iov_len; 869 base = 0; 870 } else { 871 subbuf->tail[0].iov_base = NULL; 872 subbuf->tail[0].iov_len = 0; 873 base -= buf->tail[0].iov_len; 874 } 875 876 if (base || len) 877 return -1; 878 return 0; 879 } 880 EXPORT_SYMBOL_GPL(xdr_buf_subsegment); 881 882 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 883 { 884 unsigned int this_len; 885 886 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 887 memcpy(obj, subbuf->head[0].iov_base, this_len); 888 len -= this_len; 889 obj += this_len; 890 this_len = min_t(unsigned int, len, subbuf->page_len); 891 if (this_len) 892 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); 893 len -= this_len; 894 obj += this_len; 895 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 896 memcpy(obj, subbuf->tail[0].iov_base, this_len); 897 } 898 899 /* obj is assumed to point to allocated memory of size at least len: */ 900 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 901 { 902 struct xdr_buf subbuf; 903 int status; 904 905 status = xdr_buf_subsegment(buf, &subbuf, base, len); 906 if (status != 0) 907 return status; 908 __read_bytes_from_xdr_buf(&subbuf, obj, len); 909 return 0; 910 } 911 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); 912 913 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 914 { 915 unsigned int this_len; 916 917 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 918 memcpy(subbuf->head[0].iov_base, obj, this_len); 919 len -= this_len; 920 obj += this_len; 921 this_len = min_t(unsigned int, len, subbuf->page_len); 922 if (this_len) 923 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); 924 len -= this_len; 925 obj += this_len; 926 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 927 memcpy(subbuf->tail[0].iov_base, obj, this_len); 928 } 929 930 /* obj is assumed to point to allocated memory of size at least len: */ 931 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 932 { 933 struct xdr_buf subbuf; 934 int status; 935 936 status = xdr_buf_subsegment(buf, &subbuf, base, len); 937 if (status != 0) 938 return status; 939 __write_bytes_to_xdr_buf(&subbuf, obj, len); 940 return 0; 941 } 942 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); 943 944 int 945 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) 946 { 947 __be32 raw; 948 int status; 949 950 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 951 if (status) 952 return status; 953 *obj = be32_to_cpu(raw); 954 return 0; 955 } 956 EXPORT_SYMBOL_GPL(xdr_decode_word); 957 958 int 959 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) 960 { 961 __be32 raw = cpu_to_be32(obj); 962 963 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 964 } 965 EXPORT_SYMBOL_GPL(xdr_encode_word); 966 967 /* If the netobj starting offset bytes from the start of xdr_buf is contained 968 * entirely in the head or the tail, set object to point to it; otherwise 969 * try to find space for it at the end of the tail, copy it there, and 970 * set obj to point to it. */ 971 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset) 972 { 973 struct xdr_buf subbuf; 974 975 if (xdr_decode_word(buf, offset, &obj->len)) 976 return -EFAULT; 977 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len)) 978 return -EFAULT; 979 980 /* Is the obj contained entirely in the head? */ 981 obj->data = subbuf.head[0].iov_base; 982 if (subbuf.head[0].iov_len == obj->len) 983 return 0; 984 /* ..or is the obj contained entirely in the tail? */ 985 obj->data = subbuf.tail[0].iov_base; 986 if (subbuf.tail[0].iov_len == obj->len) 987 return 0; 988 989 /* use end of tail as storage for obj: 990 * (We don't copy to the beginning because then we'd have 991 * to worry about doing a potentially overlapping copy. 992 * This assumes the object is at most half the length of the 993 * tail.) */ 994 if (obj->len > buf->buflen - buf->len) 995 return -ENOMEM; 996 if (buf->tail[0].iov_len != 0) 997 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len; 998 else 999 obj->data = buf->head[0].iov_base + buf->head[0].iov_len; 1000 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len); 1001 return 0; 1002 } 1003 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj); 1004 1005 /* Returns 0 on success, or else a negative error code. */ 1006 static int 1007 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, 1008 struct xdr_array2_desc *desc, int encode) 1009 { 1010 char *elem = NULL, *c; 1011 unsigned int copied = 0, todo, avail_here; 1012 struct page **ppages = NULL; 1013 int err; 1014 1015 if (encode) { 1016 if (xdr_encode_word(buf, base, desc->array_len) != 0) 1017 return -EINVAL; 1018 } else { 1019 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 1020 desc->array_len > desc->array_maxlen || 1021 (unsigned long) base + 4 + desc->array_len * 1022 desc->elem_size > buf->len) 1023 return -EINVAL; 1024 } 1025 base += 4; 1026 1027 if (!desc->xcode) 1028 return 0; 1029 1030 todo = desc->array_len * desc->elem_size; 1031 1032 /* process head */ 1033 if (todo && base < buf->head->iov_len) { 1034 c = buf->head->iov_base + base; 1035 avail_here = min_t(unsigned int, todo, 1036 buf->head->iov_len - base); 1037 todo -= avail_here; 1038 1039 while (avail_here >= desc->elem_size) { 1040 err = desc->xcode(desc, c); 1041 if (err) 1042 goto out; 1043 c += desc->elem_size; 1044 avail_here -= desc->elem_size; 1045 } 1046 if (avail_here) { 1047 if (!elem) { 1048 elem = kmalloc(desc->elem_size, GFP_KERNEL); 1049 err = -ENOMEM; 1050 if (!elem) 1051 goto out; 1052 } 1053 if (encode) { 1054 err = desc->xcode(desc, elem); 1055 if (err) 1056 goto out; 1057 memcpy(c, elem, avail_here); 1058 } else 1059 memcpy(elem, c, avail_here); 1060 copied = avail_here; 1061 } 1062 base = buf->head->iov_len; /* align to start of pages */ 1063 } 1064 1065 /* process pages array */ 1066 base -= buf->head->iov_len; 1067 if (todo && base < buf->page_len) { 1068 unsigned int avail_page; 1069 1070 avail_here = min(todo, buf->page_len - base); 1071 todo -= avail_here; 1072 1073 base += buf->page_base; 1074 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 1075 base &= ~PAGE_CACHE_MASK; 1076 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 1077 avail_here); 1078 c = kmap(*ppages) + base; 1079 1080 while (avail_here) { 1081 avail_here -= avail_page; 1082 if (copied || avail_page < desc->elem_size) { 1083 unsigned int l = min(avail_page, 1084 desc->elem_size - copied); 1085 if (!elem) { 1086 elem = kmalloc(desc->elem_size, 1087 GFP_KERNEL); 1088 err = -ENOMEM; 1089 if (!elem) 1090 goto out; 1091 } 1092 if (encode) { 1093 if (!copied) { 1094 err = desc->xcode(desc, elem); 1095 if (err) 1096 goto out; 1097 } 1098 memcpy(c, elem + copied, l); 1099 copied += l; 1100 if (copied == desc->elem_size) 1101 copied = 0; 1102 } else { 1103 memcpy(elem + copied, c, l); 1104 copied += l; 1105 if (copied == desc->elem_size) { 1106 err = desc->xcode(desc, elem); 1107 if (err) 1108 goto out; 1109 copied = 0; 1110 } 1111 } 1112 avail_page -= l; 1113 c += l; 1114 } 1115 while (avail_page >= desc->elem_size) { 1116 err = desc->xcode(desc, c); 1117 if (err) 1118 goto out; 1119 c += desc->elem_size; 1120 avail_page -= desc->elem_size; 1121 } 1122 if (avail_page) { 1123 unsigned int l = min(avail_page, 1124 desc->elem_size - copied); 1125 if (!elem) { 1126 elem = kmalloc(desc->elem_size, 1127 GFP_KERNEL); 1128 err = -ENOMEM; 1129 if (!elem) 1130 goto out; 1131 } 1132 if (encode) { 1133 if (!copied) { 1134 err = desc->xcode(desc, elem); 1135 if (err) 1136 goto out; 1137 } 1138 memcpy(c, elem + copied, l); 1139 copied += l; 1140 if (copied == desc->elem_size) 1141 copied = 0; 1142 } else { 1143 memcpy(elem + copied, c, l); 1144 copied += l; 1145 if (copied == desc->elem_size) { 1146 err = desc->xcode(desc, elem); 1147 if (err) 1148 goto out; 1149 copied = 0; 1150 } 1151 } 1152 } 1153 if (avail_here) { 1154 kunmap(*ppages); 1155 ppages++; 1156 c = kmap(*ppages); 1157 } 1158 1159 avail_page = min(avail_here, 1160 (unsigned int) PAGE_CACHE_SIZE); 1161 } 1162 base = buf->page_len; /* align to start of tail */ 1163 } 1164 1165 /* process tail */ 1166 base -= buf->page_len; 1167 if (todo) { 1168 c = buf->tail->iov_base + base; 1169 if (copied) { 1170 unsigned int l = desc->elem_size - copied; 1171 1172 if (encode) 1173 memcpy(c, elem + copied, l); 1174 else { 1175 memcpy(elem + copied, c, l); 1176 err = desc->xcode(desc, elem); 1177 if (err) 1178 goto out; 1179 } 1180 todo -= l; 1181 c += l; 1182 } 1183 while (todo) { 1184 err = desc->xcode(desc, c); 1185 if (err) 1186 goto out; 1187 c += desc->elem_size; 1188 todo -= desc->elem_size; 1189 } 1190 } 1191 err = 0; 1192 1193 out: 1194 kfree(elem); 1195 if (ppages) 1196 kunmap(*ppages); 1197 return err; 1198 } 1199 1200 int 1201 xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 1202 struct xdr_array2_desc *desc) 1203 { 1204 if (base >= buf->len) 1205 return -EINVAL; 1206 1207 return xdr_xcode_array2(buf, base, desc, 0); 1208 } 1209 EXPORT_SYMBOL_GPL(xdr_decode_array2); 1210 1211 int 1212 xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 1213 struct xdr_array2_desc *desc) 1214 { 1215 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 1216 buf->head->iov_len + buf->page_len + buf->tail->iov_len) 1217 return -EINVAL; 1218 1219 return xdr_xcode_array2(buf, base, desc, 1); 1220 } 1221 EXPORT_SYMBOL_GPL(xdr_encode_array2); 1222 1223 int 1224 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1225 int (*actor)(struct scatterlist *, void *), void *data) 1226 { 1227 int i, ret = 0; 1228 unsigned int page_len, thislen, page_offset; 1229 struct scatterlist sg[1]; 1230 1231 sg_init_table(sg, 1); 1232 1233 if (offset >= buf->head[0].iov_len) { 1234 offset -= buf->head[0].iov_len; 1235 } else { 1236 thislen = buf->head[0].iov_len - offset; 1237 if (thislen > len) 1238 thislen = len; 1239 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); 1240 ret = actor(sg, data); 1241 if (ret) 1242 goto out; 1243 offset = 0; 1244 len -= thislen; 1245 } 1246 if (len == 0) 1247 goto out; 1248 1249 if (offset >= buf->page_len) { 1250 offset -= buf->page_len; 1251 } else { 1252 page_len = buf->page_len - offset; 1253 if (page_len > len) 1254 page_len = len; 1255 len -= page_len; 1256 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); 1257 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; 1258 thislen = PAGE_CACHE_SIZE - page_offset; 1259 do { 1260 if (thislen > page_len) 1261 thislen = page_len; 1262 sg_set_page(sg, buf->pages[i], thislen, page_offset); 1263 ret = actor(sg, data); 1264 if (ret) 1265 goto out; 1266 page_len -= thislen; 1267 i++; 1268 page_offset = 0; 1269 thislen = PAGE_CACHE_SIZE; 1270 } while (page_len != 0); 1271 offset = 0; 1272 } 1273 if (len == 0) 1274 goto out; 1275 if (offset < buf->tail[0].iov_len) { 1276 thislen = buf->tail[0].iov_len - offset; 1277 if (thislen > len) 1278 thislen = len; 1279 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); 1280 ret = actor(sg, data); 1281 len -= thislen; 1282 } 1283 if (len != 0) 1284 ret = -EINVAL; 1285 out: 1286 return ret; 1287 } 1288 EXPORT_SYMBOL_GPL(xdr_process_buf); 1289 1290