1 /****************************************************************************** 2 * grant_table.c 3 * 4 * Granting foreign access to our memory reservation. 5 * 6 * Copyright (c) 2005-2006, Christopher Clark 7 * Copyright (c) 2004-2005, K A Fraser 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35 36 #include <linux/memblock.h> 37 #include <linux/sched.h> 38 #include <linux/mm.h> 39 #include <linux/slab.h> 40 #include <linux/vmalloc.h> 41 #include <linux/uaccess.h> 42 #include <linux/io.h> 43 #include <linux/delay.h> 44 #include <linux/hardirq.h> 45 #include <linux/workqueue.h> 46 #include <linux/ratelimit.h> 47 #include <linux/moduleparam.h> 48 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC 49 #include <linux/dma-mapping.h> 50 #endif 51 52 #include <xen/xen.h> 53 #include <xen/interface/xen.h> 54 #include <xen/page.h> 55 #include <xen/grant_table.h> 56 #include <xen/interface/memory.h> 57 #include <xen/hvc-console.h> 58 #include <xen/swiotlb-xen.h> 59 #include <xen/balloon.h> 60 #ifdef CONFIG_X86 61 #include <asm/xen/cpuid.h> 62 #endif 63 #include <xen/mem-reservation.h> 64 #include <asm/xen/hypercall.h> 65 #include <asm/xen/interface.h> 66 67 #include <asm/sync_bitops.h> 68 69 /* External tools reserve first few grant table entries. */ 70 #define NR_RESERVED_ENTRIES 8 71 #define GNTTAB_LIST_END 0xffffffff 72 73 static grant_ref_t **gnttab_list; 74 static unsigned int nr_grant_frames; 75 static int gnttab_free_count; 76 static grant_ref_t gnttab_free_head; 77 static DEFINE_SPINLOCK(gnttab_list_lock); 78 struct grant_frames xen_auto_xlat_grant_frames; 79 static unsigned int xen_gnttab_version; 80 module_param_named(version, xen_gnttab_version, uint, 0); 81 82 static union { 83 struct grant_entry_v1 *v1; 84 union grant_entry_v2 *v2; 85 void *addr; 86 } gnttab_shared; 87 88 /*This is a structure of function pointers for grant table*/ 89 struct gnttab_ops { 90 /* 91 * Version of the grant interface. 92 */ 93 unsigned int version; 94 /* 95 * Grant refs per grant frame. 96 */ 97 unsigned int grefs_per_grant_frame; 98 /* 99 * Mapping a list of frames for storing grant entries. Frames parameter 100 * is used to store grant table address when grant table being setup, 101 * nr_gframes is the number of frames to map grant table. Returning 102 * GNTST_okay means success and negative value means failure. 103 */ 104 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes); 105 /* 106 * Release a list of frames which are mapped in map_frames for grant 107 * entry status. 108 */ 109 void (*unmap_frames)(void); 110 /* 111 * Introducing a valid entry into the grant table, granting the frame of 112 * this grant entry to domain for accessing or transfering. Ref 113 * parameter is reference of this introduced grant entry, domid is id of 114 * granted domain, frame is the page frame to be granted, and flags is 115 * status of the grant entry to be updated. 116 */ 117 void (*update_entry)(grant_ref_t ref, domid_t domid, 118 unsigned long frame, unsigned flags); 119 /* 120 * Stop granting a grant entry to domain for accessing. Ref parameter is 121 * reference of a grant entry whose grant access will be stopped, 122 * readonly is not in use in this function. If the grant entry is 123 * currently mapped for reading or writing, just return failure(==0) 124 * directly and don't tear down the grant access. Otherwise, stop grant 125 * access for this entry and return success(==1). 126 */ 127 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly); 128 /* 129 * Stop granting a grant entry to domain for transfer. Ref parameter is 130 * reference of a grant entry whose grant transfer will be stopped. If 131 * tranfer has not started, just reclaim the grant entry and return 132 * failure(==0). Otherwise, wait for the transfer to complete and then 133 * return the frame. 134 */ 135 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); 136 /* 137 * Read the frame number related to a given grant reference. 138 */ 139 unsigned long (*read_frame)(grant_ref_t ref); 140 }; 141 142 struct unmap_refs_callback_data { 143 struct completion completion; 144 int result; 145 }; 146 147 static const struct gnttab_ops *gnttab_interface; 148 149 /* This reflects status of grant entries, so act as a global value. */ 150 static grant_status_t *grstatus; 151 152 static struct gnttab_free_callback *gnttab_free_callback_list; 153 154 static int gnttab_expand(unsigned int req_entries); 155 156 #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) 157 #define SPP (PAGE_SIZE / sizeof(grant_status_t)) 158 159 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) 160 { 161 return &gnttab_list[(entry) / RPP][(entry) % RPP]; 162 } 163 /* This can be used as an l-value */ 164 #define gnttab_entry(entry) (*__gnttab_entry(entry)) 165 166 static int get_free_entries(unsigned count) 167 { 168 unsigned long flags; 169 int ref, rc = 0; 170 grant_ref_t head; 171 172 spin_lock_irqsave(&gnttab_list_lock, flags); 173 174 if ((gnttab_free_count < count) && 175 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { 176 spin_unlock_irqrestore(&gnttab_list_lock, flags); 177 return rc; 178 } 179 180 ref = head = gnttab_free_head; 181 gnttab_free_count -= count; 182 while (count-- > 1) 183 head = gnttab_entry(head); 184 gnttab_free_head = gnttab_entry(head); 185 gnttab_entry(head) = GNTTAB_LIST_END; 186 187 spin_unlock_irqrestore(&gnttab_list_lock, flags); 188 189 return ref; 190 } 191 192 static void do_free_callbacks(void) 193 { 194 struct gnttab_free_callback *callback, *next; 195 196 callback = gnttab_free_callback_list; 197 gnttab_free_callback_list = NULL; 198 199 while (callback != NULL) { 200 next = callback->next; 201 if (gnttab_free_count >= callback->count) { 202 callback->next = NULL; 203 callback->fn(callback->arg); 204 } else { 205 callback->next = gnttab_free_callback_list; 206 gnttab_free_callback_list = callback; 207 } 208 callback = next; 209 } 210 } 211 212 static inline void check_free_callbacks(void) 213 { 214 if (unlikely(gnttab_free_callback_list)) 215 do_free_callbacks(); 216 } 217 218 static void put_free_entry(grant_ref_t ref) 219 { 220 unsigned long flags; 221 spin_lock_irqsave(&gnttab_list_lock, flags); 222 gnttab_entry(ref) = gnttab_free_head; 223 gnttab_free_head = ref; 224 gnttab_free_count++; 225 check_free_callbacks(); 226 spin_unlock_irqrestore(&gnttab_list_lock, flags); 227 } 228 229 /* 230 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. 231 * Introducing a valid entry into the grant table: 232 * 1. Write ent->domid. 233 * 2. Write ent->frame: 234 * GTF_permit_access: Frame to which access is permitted. 235 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new 236 * frame, or zero if none. 237 * 3. Write memory barrier (WMB). 238 * 4. Write ent->flags, inc. valid type. 239 */ 240 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, 241 unsigned long frame, unsigned flags) 242 { 243 gnttab_shared.v1[ref].domid = domid; 244 gnttab_shared.v1[ref].frame = frame; 245 wmb(); 246 gnttab_shared.v1[ref].flags = flags; 247 } 248 249 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid, 250 unsigned long frame, unsigned int flags) 251 { 252 gnttab_shared.v2[ref].hdr.domid = domid; 253 gnttab_shared.v2[ref].full_page.frame = frame; 254 wmb(); /* Hypervisor concurrent accesses. */ 255 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags; 256 } 257 258 /* 259 * Public grant-issuing interface functions 260 */ 261 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, 262 unsigned long frame, int readonly) 263 { 264 gnttab_interface->update_entry(ref, domid, frame, 265 GTF_permit_access | (readonly ? GTF_readonly : 0)); 266 } 267 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); 268 269 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, 270 int readonly) 271 { 272 int ref; 273 274 ref = get_free_entries(1); 275 if (unlikely(ref < 0)) 276 return -ENOSPC; 277 278 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly); 279 280 return ref; 281 } 282 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 283 284 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) 285 { 286 u16 flags, nflags; 287 u16 *pflags; 288 289 pflags = &gnttab_shared.v1[ref].flags; 290 nflags = *pflags; 291 do { 292 flags = nflags; 293 if (flags & (GTF_reading|GTF_writing)) 294 return 0; 295 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); 296 297 return 1; 298 } 299 300 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) 301 { 302 gnttab_shared.v2[ref].hdr.flags = 0; 303 mb(); /* Concurrent access by hypervisor. */ 304 if (grstatus[ref] & (GTF_reading|GTF_writing)) { 305 return 0; 306 } else { 307 /* 308 * The read of grstatus needs to have acquire semantics. 309 * On x86, reads already have that, and we just need to 310 * protect against compiler reorderings. 311 * On other architectures we may need a full barrier. 312 */ 313 #ifdef CONFIG_X86 314 barrier(); 315 #else 316 mb(); 317 #endif 318 } 319 320 return 1; 321 } 322 323 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 324 { 325 return gnttab_interface->end_foreign_access_ref(ref, readonly); 326 } 327 328 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 329 { 330 if (_gnttab_end_foreign_access_ref(ref, readonly)) 331 return 1; 332 pr_warn("WARNING: g.e. %#x still in use!\n", ref); 333 return 0; 334 } 335 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); 336 337 static unsigned long gnttab_read_frame_v1(grant_ref_t ref) 338 { 339 return gnttab_shared.v1[ref].frame; 340 } 341 342 static unsigned long gnttab_read_frame_v2(grant_ref_t ref) 343 { 344 return gnttab_shared.v2[ref].full_page.frame; 345 } 346 347 struct deferred_entry { 348 struct list_head list; 349 grant_ref_t ref; 350 bool ro; 351 uint16_t warn_delay; 352 struct page *page; 353 }; 354 static LIST_HEAD(deferred_list); 355 static void gnttab_handle_deferred(struct timer_list *); 356 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred); 357 358 static void gnttab_handle_deferred(struct timer_list *unused) 359 { 360 unsigned int nr = 10; 361 struct deferred_entry *first = NULL; 362 unsigned long flags; 363 364 spin_lock_irqsave(&gnttab_list_lock, flags); 365 while (nr--) { 366 struct deferred_entry *entry 367 = list_first_entry(&deferred_list, 368 struct deferred_entry, list); 369 370 if (entry == first) 371 break; 372 list_del(&entry->list); 373 spin_unlock_irqrestore(&gnttab_list_lock, flags); 374 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { 375 put_free_entry(entry->ref); 376 pr_debug("freeing g.e. %#x (pfn %#lx)\n", 377 entry->ref, page_to_pfn(entry->page)); 378 put_page(entry->page); 379 kfree(entry); 380 entry = NULL; 381 } else { 382 if (!--entry->warn_delay) 383 pr_info("g.e. %#x still pending\n", entry->ref); 384 if (!first) 385 first = entry; 386 } 387 spin_lock_irqsave(&gnttab_list_lock, flags); 388 if (entry) 389 list_add_tail(&entry->list, &deferred_list); 390 else if (list_empty(&deferred_list)) 391 break; 392 } 393 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { 394 deferred_timer.expires = jiffies + HZ; 395 add_timer(&deferred_timer); 396 } 397 spin_unlock_irqrestore(&gnttab_list_lock, flags); 398 } 399 400 static void gnttab_add_deferred(grant_ref_t ref, bool readonly, 401 struct page *page) 402 { 403 struct deferred_entry *entry; 404 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; 405 const char *what = KERN_WARNING "leaking"; 406 407 entry = kmalloc(sizeof(*entry), gfp); 408 if (!page) { 409 unsigned long gfn = gnttab_interface->read_frame(ref); 410 411 page = pfn_to_page(gfn_to_pfn(gfn)); 412 get_page(page); 413 } 414 415 if (entry) { 416 unsigned long flags; 417 418 entry->ref = ref; 419 entry->ro = readonly; 420 entry->page = page; 421 entry->warn_delay = 60; 422 spin_lock_irqsave(&gnttab_list_lock, flags); 423 list_add_tail(&entry->list, &deferred_list); 424 if (!timer_pending(&deferred_timer)) { 425 deferred_timer.expires = jiffies + HZ; 426 add_timer(&deferred_timer); 427 } 428 spin_unlock_irqrestore(&gnttab_list_lock, flags); 429 what = KERN_DEBUG "deferring"; 430 } 431 printk("%s g.e. %#x (pfn %#lx)\n", 432 what, ref, page ? page_to_pfn(page) : -1); 433 } 434 435 int gnttab_try_end_foreign_access(grant_ref_t ref) 436 { 437 int ret = _gnttab_end_foreign_access_ref(ref, 0); 438 439 if (ret) 440 put_free_entry(ref); 441 442 return ret; 443 } 444 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); 445 446 void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 447 unsigned long page) 448 { 449 if (gnttab_try_end_foreign_access(ref)) { 450 if (page != 0) 451 put_page(virt_to_page(page)); 452 } else 453 gnttab_add_deferred(ref, readonly, 454 page ? virt_to_page(page) : NULL); 455 } 456 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 457 458 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) 459 { 460 int ref; 461 462 ref = get_free_entries(1); 463 if (unlikely(ref < 0)) 464 return -ENOSPC; 465 gnttab_grant_foreign_transfer_ref(ref, domid, pfn); 466 467 return ref; 468 } 469 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); 470 471 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, 472 unsigned long pfn) 473 { 474 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); 475 } 476 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); 477 478 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) 479 { 480 unsigned long frame; 481 u16 flags; 482 u16 *pflags; 483 484 pflags = &gnttab_shared.v1[ref].flags; 485 486 /* 487 * If a transfer is not even yet started, try to reclaim the grant 488 * reference and return failure (== 0). 489 */ 490 while (!((flags = *pflags) & GTF_transfer_committed)) { 491 if (sync_cmpxchg(pflags, flags, 0) == flags) 492 return 0; 493 cpu_relax(); 494 } 495 496 /* If a transfer is in progress then wait until it is completed. */ 497 while (!(flags & GTF_transfer_completed)) { 498 flags = *pflags; 499 cpu_relax(); 500 } 501 502 rmb(); /* Read the frame number /after/ reading completion status. */ 503 frame = gnttab_shared.v1[ref].frame; 504 BUG_ON(frame == 0); 505 506 return frame; 507 } 508 509 static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref) 510 { 511 unsigned long frame; 512 u16 flags; 513 u16 *pflags; 514 515 pflags = &gnttab_shared.v2[ref].hdr.flags; 516 517 /* 518 * If a transfer is not even yet started, try to reclaim the grant 519 * reference and return failure (== 0). 520 */ 521 while (!((flags = *pflags) & GTF_transfer_committed)) { 522 if (sync_cmpxchg(pflags, flags, 0) == flags) 523 return 0; 524 cpu_relax(); 525 } 526 527 /* If a transfer is in progress then wait until it is completed. */ 528 while (!(flags & GTF_transfer_completed)) { 529 flags = *pflags; 530 cpu_relax(); 531 } 532 533 rmb(); /* Read the frame number /after/ reading completion status. */ 534 frame = gnttab_shared.v2[ref].full_page.frame; 535 BUG_ON(frame == 0); 536 537 return frame; 538 } 539 540 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) 541 { 542 return gnttab_interface->end_foreign_transfer_ref(ref); 543 } 544 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); 545 546 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) 547 { 548 unsigned long frame = gnttab_end_foreign_transfer_ref(ref); 549 put_free_entry(ref); 550 return frame; 551 } 552 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); 553 554 void gnttab_free_grant_reference(grant_ref_t ref) 555 { 556 put_free_entry(ref); 557 } 558 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); 559 560 void gnttab_free_grant_references(grant_ref_t head) 561 { 562 grant_ref_t ref; 563 unsigned long flags; 564 int count = 1; 565 if (head == GNTTAB_LIST_END) 566 return; 567 spin_lock_irqsave(&gnttab_list_lock, flags); 568 ref = head; 569 while (gnttab_entry(ref) != GNTTAB_LIST_END) { 570 ref = gnttab_entry(ref); 571 count++; 572 } 573 gnttab_entry(ref) = gnttab_free_head; 574 gnttab_free_head = head; 575 gnttab_free_count += count; 576 check_free_callbacks(); 577 spin_unlock_irqrestore(&gnttab_list_lock, flags); 578 } 579 EXPORT_SYMBOL_GPL(gnttab_free_grant_references); 580 581 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) 582 { 583 int h = get_free_entries(count); 584 585 if (h < 0) 586 return -ENOSPC; 587 588 *head = h; 589 590 return 0; 591 } 592 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); 593 594 int gnttab_empty_grant_references(const grant_ref_t *private_head) 595 { 596 return (*private_head == GNTTAB_LIST_END); 597 } 598 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); 599 600 int gnttab_claim_grant_reference(grant_ref_t *private_head) 601 { 602 grant_ref_t g = *private_head; 603 if (unlikely(g == GNTTAB_LIST_END)) 604 return -ENOSPC; 605 *private_head = gnttab_entry(g); 606 return g; 607 } 608 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); 609 610 void gnttab_release_grant_reference(grant_ref_t *private_head, 611 grant_ref_t release) 612 { 613 gnttab_entry(release) = *private_head; 614 *private_head = release; 615 } 616 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); 617 618 void gnttab_request_free_callback(struct gnttab_free_callback *callback, 619 void (*fn)(void *), void *arg, u16 count) 620 { 621 unsigned long flags; 622 struct gnttab_free_callback *cb; 623 624 spin_lock_irqsave(&gnttab_list_lock, flags); 625 626 /* Check if the callback is already on the list */ 627 cb = gnttab_free_callback_list; 628 while (cb) { 629 if (cb == callback) 630 goto out; 631 cb = cb->next; 632 } 633 634 callback->fn = fn; 635 callback->arg = arg; 636 callback->count = count; 637 callback->next = gnttab_free_callback_list; 638 gnttab_free_callback_list = callback; 639 check_free_callbacks(); 640 out: 641 spin_unlock_irqrestore(&gnttab_list_lock, flags); 642 } 643 EXPORT_SYMBOL_GPL(gnttab_request_free_callback); 644 645 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) 646 { 647 struct gnttab_free_callback **pcb; 648 unsigned long flags; 649 650 spin_lock_irqsave(&gnttab_list_lock, flags); 651 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { 652 if (*pcb == callback) { 653 *pcb = callback->next; 654 break; 655 } 656 } 657 spin_unlock_irqrestore(&gnttab_list_lock, flags); 658 } 659 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); 660 661 static unsigned int gnttab_frames(unsigned int frames, unsigned int align) 662 { 663 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) / 664 align; 665 } 666 667 static int grow_gnttab_list(unsigned int more_frames) 668 { 669 unsigned int new_nr_grant_frames, extra_entries, i; 670 unsigned int nr_glist_frames, new_nr_glist_frames; 671 unsigned int grefs_per_frame; 672 673 grefs_per_frame = gnttab_interface->grefs_per_grant_frame; 674 675 new_nr_grant_frames = nr_grant_frames + more_frames; 676 extra_entries = more_frames * grefs_per_frame; 677 678 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP); 679 new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP); 680 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { 681 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 682 if (!gnttab_list[i]) 683 goto grow_nomem; 684 } 685 686 687 for (i = grefs_per_frame * nr_grant_frames; 688 i < grefs_per_frame * new_nr_grant_frames - 1; i++) 689 gnttab_entry(i) = i + 1; 690 691 gnttab_entry(i) = gnttab_free_head; 692 gnttab_free_head = grefs_per_frame * nr_grant_frames; 693 gnttab_free_count += extra_entries; 694 695 nr_grant_frames = new_nr_grant_frames; 696 697 check_free_callbacks(); 698 699 return 0; 700 701 grow_nomem: 702 while (i-- > nr_glist_frames) 703 free_page((unsigned long) gnttab_list[i]); 704 return -ENOMEM; 705 } 706 707 static unsigned int __max_nr_grant_frames(void) 708 { 709 struct gnttab_query_size query; 710 int rc; 711 712 query.dom = DOMID_SELF; 713 714 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); 715 if ((rc < 0) || (query.status != GNTST_okay)) 716 return 4; /* Legacy max supported number of frames */ 717 718 return query.max_nr_frames; 719 } 720 721 unsigned int gnttab_max_grant_frames(void) 722 { 723 unsigned int xen_max = __max_nr_grant_frames(); 724 static unsigned int boot_max_nr_grant_frames; 725 726 /* First time, initialize it properly. */ 727 if (!boot_max_nr_grant_frames) 728 boot_max_nr_grant_frames = __max_nr_grant_frames(); 729 730 if (xen_max > boot_max_nr_grant_frames) 731 return boot_max_nr_grant_frames; 732 return xen_max; 733 } 734 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); 735 736 int gnttab_setup_auto_xlat_frames(phys_addr_t addr) 737 { 738 xen_pfn_t *pfn; 739 unsigned int max_nr_gframes = __max_nr_grant_frames(); 740 unsigned int i; 741 void *vaddr; 742 743 if (xen_auto_xlat_grant_frames.count) 744 return -EINVAL; 745 746 vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes); 747 if (vaddr == NULL) { 748 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n", 749 &addr); 750 return -ENOMEM; 751 } 752 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); 753 if (!pfn) { 754 xen_unmap(vaddr); 755 return -ENOMEM; 756 } 757 for (i = 0; i < max_nr_gframes; i++) 758 pfn[i] = XEN_PFN_DOWN(addr) + i; 759 760 xen_auto_xlat_grant_frames.vaddr = vaddr; 761 xen_auto_xlat_grant_frames.pfn = pfn; 762 xen_auto_xlat_grant_frames.count = max_nr_gframes; 763 764 return 0; 765 } 766 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames); 767 768 void gnttab_free_auto_xlat_frames(void) 769 { 770 if (!xen_auto_xlat_grant_frames.count) 771 return; 772 kfree(xen_auto_xlat_grant_frames.pfn); 773 xen_unmap(xen_auto_xlat_grant_frames.vaddr); 774 775 xen_auto_xlat_grant_frames.pfn = NULL; 776 xen_auto_xlat_grant_frames.count = 0; 777 xen_auto_xlat_grant_frames.vaddr = NULL; 778 } 779 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); 780 781 int gnttab_pages_set_private(int nr_pages, struct page **pages) 782 { 783 int i; 784 785 for (i = 0; i < nr_pages; i++) { 786 #if BITS_PER_LONG < 64 787 struct xen_page_foreign *foreign; 788 789 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL); 790 if (!foreign) 791 return -ENOMEM; 792 793 set_page_private(pages[i], (unsigned long)foreign); 794 #endif 795 SetPagePrivate(pages[i]); 796 } 797 798 return 0; 799 } 800 EXPORT_SYMBOL_GPL(gnttab_pages_set_private); 801 802 /** 803 * gnttab_alloc_pages - alloc pages suitable for grant mapping into 804 * @nr_pages: number of pages to alloc 805 * @pages: returns the pages 806 */ 807 int gnttab_alloc_pages(int nr_pages, struct page **pages) 808 { 809 int ret; 810 811 ret = xen_alloc_unpopulated_pages(nr_pages, pages); 812 if (ret < 0) 813 return ret; 814 815 ret = gnttab_pages_set_private(nr_pages, pages); 816 if (ret < 0) 817 gnttab_free_pages(nr_pages, pages); 818 819 return ret; 820 } 821 EXPORT_SYMBOL_GPL(gnttab_alloc_pages); 822 823 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC 824 static inline void cache_init(struct gnttab_page_cache *cache) 825 { 826 cache->pages = NULL; 827 } 828 829 static inline bool cache_empty(struct gnttab_page_cache *cache) 830 { 831 return !cache->pages; 832 } 833 834 static inline struct page *cache_deq(struct gnttab_page_cache *cache) 835 { 836 struct page *page; 837 838 page = cache->pages; 839 cache->pages = page->zone_device_data; 840 841 return page; 842 } 843 844 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page) 845 { 846 page->zone_device_data = cache->pages; 847 cache->pages = page; 848 } 849 #else 850 static inline void cache_init(struct gnttab_page_cache *cache) 851 { 852 INIT_LIST_HEAD(&cache->pages); 853 } 854 855 static inline bool cache_empty(struct gnttab_page_cache *cache) 856 { 857 return list_empty(&cache->pages); 858 } 859 860 static inline struct page *cache_deq(struct gnttab_page_cache *cache) 861 { 862 struct page *page; 863 864 page = list_first_entry(&cache->pages, struct page, lru); 865 list_del(&page->lru); 866 867 return page; 868 } 869 870 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page) 871 { 872 list_add(&page->lru, &cache->pages); 873 } 874 #endif 875 876 void gnttab_page_cache_init(struct gnttab_page_cache *cache) 877 { 878 spin_lock_init(&cache->lock); 879 cache_init(cache); 880 cache->num_pages = 0; 881 } 882 EXPORT_SYMBOL_GPL(gnttab_page_cache_init); 883 884 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page) 885 { 886 unsigned long flags; 887 888 spin_lock_irqsave(&cache->lock, flags); 889 890 if (cache_empty(cache)) { 891 spin_unlock_irqrestore(&cache->lock, flags); 892 return gnttab_alloc_pages(1, page); 893 } 894 895 page[0] = cache_deq(cache); 896 cache->num_pages--; 897 898 spin_unlock_irqrestore(&cache->lock, flags); 899 900 return 0; 901 } 902 EXPORT_SYMBOL_GPL(gnttab_page_cache_get); 903 904 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page, 905 unsigned int num) 906 { 907 unsigned long flags; 908 unsigned int i; 909 910 spin_lock_irqsave(&cache->lock, flags); 911 912 for (i = 0; i < num; i++) 913 cache_enq(cache, page[i]); 914 cache->num_pages += num; 915 916 spin_unlock_irqrestore(&cache->lock, flags); 917 } 918 EXPORT_SYMBOL_GPL(gnttab_page_cache_put); 919 920 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num) 921 { 922 struct page *page[10]; 923 unsigned int i = 0; 924 unsigned long flags; 925 926 spin_lock_irqsave(&cache->lock, flags); 927 928 while (cache->num_pages > num) { 929 page[i] = cache_deq(cache); 930 cache->num_pages--; 931 if (++i == ARRAY_SIZE(page)) { 932 spin_unlock_irqrestore(&cache->lock, flags); 933 gnttab_free_pages(i, page); 934 i = 0; 935 spin_lock_irqsave(&cache->lock, flags); 936 } 937 } 938 939 spin_unlock_irqrestore(&cache->lock, flags); 940 941 if (i != 0) 942 gnttab_free_pages(i, page); 943 } 944 EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink); 945 946 void gnttab_pages_clear_private(int nr_pages, struct page **pages) 947 { 948 int i; 949 950 for (i = 0; i < nr_pages; i++) { 951 if (PagePrivate(pages[i])) { 952 #if BITS_PER_LONG < 64 953 kfree((void *)page_private(pages[i])); 954 #endif 955 ClearPagePrivate(pages[i]); 956 } 957 } 958 } 959 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private); 960 961 /** 962 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() 963 * @nr_pages; number of pages to free 964 * @pages: the pages 965 */ 966 void gnttab_free_pages(int nr_pages, struct page **pages) 967 { 968 gnttab_pages_clear_private(nr_pages, pages); 969 xen_free_unpopulated_pages(nr_pages, pages); 970 } 971 EXPORT_SYMBOL_GPL(gnttab_free_pages); 972 973 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC 974 /** 975 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into 976 * @args: arguments to the function 977 */ 978 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args) 979 { 980 unsigned long pfn, start_pfn; 981 size_t size; 982 int i, ret; 983 984 size = args->nr_pages << PAGE_SHIFT; 985 if (args->coherent) 986 args->vaddr = dma_alloc_coherent(args->dev, size, 987 &args->dev_bus_addr, 988 GFP_KERNEL | __GFP_NOWARN); 989 else 990 args->vaddr = dma_alloc_wc(args->dev, size, 991 &args->dev_bus_addr, 992 GFP_KERNEL | __GFP_NOWARN); 993 if (!args->vaddr) { 994 pr_debug("Failed to allocate DMA buffer of size %zu\n", size); 995 return -ENOMEM; 996 } 997 998 start_pfn = __phys_to_pfn(args->dev_bus_addr); 999 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages; 1000 pfn++, i++) { 1001 struct page *page = pfn_to_page(pfn); 1002 1003 args->pages[i] = page; 1004 args->frames[i] = xen_page_to_gfn(page); 1005 xenmem_reservation_scrub_page(page); 1006 } 1007 1008 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages); 1009 1010 ret = xenmem_reservation_decrease(args->nr_pages, args->frames); 1011 if (ret != args->nr_pages) { 1012 pr_debug("Failed to decrease reservation for DMA buffer\n"); 1013 ret = -EFAULT; 1014 goto fail; 1015 } 1016 1017 ret = gnttab_pages_set_private(args->nr_pages, args->pages); 1018 if (ret < 0) 1019 goto fail; 1020 1021 return 0; 1022 1023 fail: 1024 gnttab_dma_free_pages(args); 1025 return ret; 1026 } 1027 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages); 1028 1029 /** 1030 * gnttab_dma_free_pages - free DMAable pages 1031 * @args: arguments to the function 1032 */ 1033 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args) 1034 { 1035 size_t size; 1036 int i, ret; 1037 1038 gnttab_pages_clear_private(args->nr_pages, args->pages); 1039 1040 for (i = 0; i < args->nr_pages; i++) 1041 args->frames[i] = page_to_xen_pfn(args->pages[i]); 1042 1043 ret = xenmem_reservation_increase(args->nr_pages, args->frames); 1044 if (ret != args->nr_pages) { 1045 pr_debug("Failed to increase reservation for DMA buffer\n"); 1046 ret = -EFAULT; 1047 } else { 1048 ret = 0; 1049 } 1050 1051 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages, 1052 args->frames); 1053 1054 size = args->nr_pages << PAGE_SHIFT; 1055 if (args->coherent) 1056 dma_free_coherent(args->dev, size, 1057 args->vaddr, args->dev_bus_addr); 1058 else 1059 dma_free_wc(args->dev, size, 1060 args->vaddr, args->dev_bus_addr); 1061 return ret; 1062 } 1063 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages); 1064 #endif 1065 1066 /* Handling of paged out grant targets (GNTST_eagain) */ 1067 #define MAX_DELAY 256 1068 static inline void 1069 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status, 1070 const char *func) 1071 { 1072 unsigned delay = 1; 1073 1074 do { 1075 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1)); 1076 if (*status == GNTST_eagain) 1077 msleep(delay++); 1078 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY)); 1079 1080 if (delay >= MAX_DELAY) { 1081 pr_err("%s: %s eagain grant\n", func, current->comm); 1082 *status = GNTST_bad_page; 1083 } 1084 } 1085 1086 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count) 1087 { 1088 struct gnttab_map_grant_ref *op; 1089 1090 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count)) 1091 BUG(); 1092 for (op = batch; op < batch + count; op++) 1093 if (op->status == GNTST_eagain) 1094 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op, 1095 &op->status, __func__); 1096 } 1097 EXPORT_SYMBOL_GPL(gnttab_batch_map); 1098 1099 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) 1100 { 1101 struct gnttab_copy *op; 1102 1103 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) 1104 BUG(); 1105 for (op = batch; op < batch + count; op++) 1106 if (op->status == GNTST_eagain) 1107 gnttab_retry_eagain_gop(GNTTABOP_copy, op, 1108 &op->status, __func__); 1109 } 1110 EXPORT_SYMBOL_GPL(gnttab_batch_copy); 1111 1112 void gnttab_foreach_grant_in_range(struct page *page, 1113 unsigned int offset, 1114 unsigned int len, 1115 xen_grant_fn_t fn, 1116 void *data) 1117 { 1118 unsigned int goffset; 1119 unsigned int glen; 1120 unsigned long xen_pfn; 1121 1122 len = min_t(unsigned int, PAGE_SIZE - offset, len); 1123 goffset = xen_offset_in_page(offset); 1124 1125 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset); 1126 1127 while (len) { 1128 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len); 1129 fn(pfn_to_gfn(xen_pfn), goffset, glen, data); 1130 1131 goffset = 0; 1132 xen_pfn++; 1133 len -= glen; 1134 } 1135 } 1136 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range); 1137 1138 void gnttab_foreach_grant(struct page **pages, 1139 unsigned int nr_grefs, 1140 xen_grant_fn_t fn, 1141 void *data) 1142 { 1143 unsigned int goffset = 0; 1144 unsigned long xen_pfn = 0; 1145 unsigned int i; 1146 1147 for (i = 0; i < nr_grefs; i++) { 1148 if ((i % XEN_PFN_PER_PAGE) == 0) { 1149 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]); 1150 goffset = 0; 1151 } 1152 1153 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data); 1154 1155 goffset += XEN_PAGE_SIZE; 1156 xen_pfn++; 1157 } 1158 } 1159 1160 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 1161 struct gnttab_map_grant_ref *kmap_ops, 1162 struct page **pages, unsigned int count) 1163 { 1164 int i, ret; 1165 1166 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 1167 if (ret) 1168 return ret; 1169 1170 for (i = 0; i < count; i++) { 1171 switch (map_ops[i].status) { 1172 case GNTST_okay: 1173 { 1174 struct xen_page_foreign *foreign; 1175 1176 SetPageForeign(pages[i]); 1177 foreign = xen_page_foreign(pages[i]); 1178 foreign->domid = map_ops[i].dom; 1179 foreign->gref = map_ops[i].ref; 1180 break; 1181 } 1182 1183 case GNTST_no_device_space: 1184 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n"); 1185 break; 1186 1187 case GNTST_eagain: 1188 /* Retry eagain maps */ 1189 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, 1190 map_ops + i, 1191 &map_ops[i].status, __func__); 1192 /* Test status in next loop iteration. */ 1193 i--; 1194 break; 1195 1196 default: 1197 break; 1198 } 1199 } 1200 1201 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); 1202 } 1203 EXPORT_SYMBOL_GPL(gnttab_map_refs); 1204 1205 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 1206 struct gnttab_unmap_grant_ref *kunmap_ops, 1207 struct page **pages, unsigned int count) 1208 { 1209 unsigned int i; 1210 int ret; 1211 1212 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 1213 if (ret) 1214 return ret; 1215 1216 for (i = 0; i < count; i++) 1217 ClearPageForeign(pages[i]); 1218 1219 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count); 1220 } 1221 EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 1222 1223 #define GNTTAB_UNMAP_REFS_DELAY 5 1224 1225 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); 1226 1227 static void gnttab_unmap_work(struct work_struct *work) 1228 { 1229 struct gntab_unmap_queue_data 1230 *unmap_data = container_of(work, 1231 struct gntab_unmap_queue_data, 1232 gnttab_work.work); 1233 if (unmap_data->age != UINT_MAX) 1234 unmap_data->age++; 1235 __gnttab_unmap_refs_async(unmap_data); 1236 } 1237 1238 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) 1239 { 1240 int ret; 1241 int pc; 1242 1243 for (pc = 0; pc < item->count; pc++) { 1244 if (page_count(item->pages[pc]) > 1) { 1245 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1); 1246 schedule_delayed_work(&item->gnttab_work, 1247 msecs_to_jiffies(delay)); 1248 return; 1249 } 1250 } 1251 1252 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops, 1253 item->pages, item->count); 1254 item->done(ret, item); 1255 } 1256 1257 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) 1258 { 1259 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work); 1260 item->age = 0; 1261 1262 __gnttab_unmap_refs_async(item); 1263 } 1264 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); 1265 1266 static void unmap_refs_callback(int result, 1267 struct gntab_unmap_queue_data *data) 1268 { 1269 struct unmap_refs_callback_data *d = data->data; 1270 1271 d->result = result; 1272 complete(&d->completion); 1273 } 1274 1275 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) 1276 { 1277 struct unmap_refs_callback_data data; 1278 1279 init_completion(&data.completion); 1280 item->data = &data; 1281 item->done = &unmap_refs_callback; 1282 gnttab_unmap_refs_async(item); 1283 wait_for_completion(&data.completion); 1284 1285 return data.result; 1286 } 1287 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); 1288 1289 static unsigned int nr_status_frames(unsigned int nr_grant_frames) 1290 { 1291 return gnttab_frames(nr_grant_frames, SPP); 1292 } 1293 1294 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 1295 { 1296 int rc; 1297 1298 rc = arch_gnttab_map_shared(frames, nr_gframes, 1299 gnttab_max_grant_frames(), 1300 &gnttab_shared.addr); 1301 BUG_ON(rc); 1302 1303 return 0; 1304 } 1305 1306 static void gnttab_unmap_frames_v1(void) 1307 { 1308 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 1309 } 1310 1311 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes) 1312 { 1313 uint64_t *sframes; 1314 unsigned int nr_sframes; 1315 struct gnttab_get_status_frames getframes; 1316 int rc; 1317 1318 nr_sframes = nr_status_frames(nr_gframes); 1319 1320 /* No need for kzalloc as it is initialized in following hypercall 1321 * GNTTABOP_get_status_frames. 1322 */ 1323 sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC); 1324 if (!sframes) 1325 return -ENOMEM; 1326 1327 getframes.dom = DOMID_SELF; 1328 getframes.nr_frames = nr_sframes; 1329 set_xen_guest_handle(getframes.frame_list, sframes); 1330 1331 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames, 1332 &getframes, 1); 1333 if (rc == -ENOSYS) { 1334 kfree(sframes); 1335 return -ENOSYS; 1336 } 1337 1338 BUG_ON(rc || getframes.status); 1339 1340 rc = arch_gnttab_map_status(sframes, nr_sframes, 1341 nr_status_frames(gnttab_max_grant_frames()), 1342 &grstatus); 1343 BUG_ON(rc); 1344 kfree(sframes); 1345 1346 rc = arch_gnttab_map_shared(frames, nr_gframes, 1347 gnttab_max_grant_frames(), 1348 &gnttab_shared.addr); 1349 BUG_ON(rc); 1350 1351 return 0; 1352 } 1353 1354 static void gnttab_unmap_frames_v2(void) 1355 { 1356 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 1357 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames)); 1358 } 1359 1360 static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 1361 { 1362 struct gnttab_setup_table setup; 1363 xen_pfn_t *frames; 1364 unsigned int nr_gframes = end_idx + 1; 1365 int rc; 1366 1367 if (xen_feature(XENFEAT_auto_translated_physmap)) { 1368 struct xen_add_to_physmap xatp; 1369 unsigned int i = end_idx; 1370 rc = 0; 1371 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes); 1372 /* 1373 * Loop backwards, so that the first hypercall has the largest 1374 * index, ensuring that the table will grow only once. 1375 */ 1376 do { 1377 xatp.domid = DOMID_SELF; 1378 xatp.idx = i; 1379 xatp.space = XENMAPSPACE_grant_table; 1380 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i]; 1381 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); 1382 if (rc != 0) { 1383 pr_warn("grant table add_to_physmap failed, err=%d\n", 1384 rc); 1385 break; 1386 } 1387 } while (i-- > start_idx); 1388 1389 return rc; 1390 } 1391 1392 /* No need for kzalloc as it is initialized in following hypercall 1393 * GNTTABOP_setup_table. 1394 */ 1395 frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC); 1396 if (!frames) 1397 return -ENOMEM; 1398 1399 setup.dom = DOMID_SELF; 1400 setup.nr_frames = nr_gframes; 1401 set_xen_guest_handle(setup.frame_list, frames); 1402 1403 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); 1404 if (rc == -ENOSYS) { 1405 kfree(frames); 1406 return -ENOSYS; 1407 } 1408 1409 BUG_ON(rc || setup.status); 1410 1411 rc = gnttab_interface->map_frames(frames, nr_gframes); 1412 1413 kfree(frames); 1414 1415 return rc; 1416 } 1417 1418 static const struct gnttab_ops gnttab_v1_ops = { 1419 .version = 1, 1420 .grefs_per_grant_frame = XEN_PAGE_SIZE / 1421 sizeof(struct grant_entry_v1), 1422 .map_frames = gnttab_map_frames_v1, 1423 .unmap_frames = gnttab_unmap_frames_v1, 1424 .update_entry = gnttab_update_entry_v1, 1425 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, 1426 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, 1427 .read_frame = gnttab_read_frame_v1, 1428 }; 1429 1430 static const struct gnttab_ops gnttab_v2_ops = { 1431 .version = 2, 1432 .grefs_per_grant_frame = XEN_PAGE_SIZE / 1433 sizeof(union grant_entry_v2), 1434 .map_frames = gnttab_map_frames_v2, 1435 .unmap_frames = gnttab_unmap_frames_v2, 1436 .update_entry = gnttab_update_entry_v2, 1437 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, 1438 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, 1439 .read_frame = gnttab_read_frame_v2, 1440 }; 1441 1442 static bool gnttab_need_v2(void) 1443 { 1444 #ifdef CONFIG_X86 1445 uint32_t base, width; 1446 1447 if (xen_pv_domain()) { 1448 base = xen_cpuid_base(); 1449 if (cpuid_eax(base) < 5) 1450 return false; /* Information not available, use V1. */ 1451 width = cpuid_ebx(base + 5) & 1452 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK; 1453 return width > 32 + PAGE_SHIFT; 1454 } 1455 #endif 1456 return !!(max_possible_pfn >> 32); 1457 } 1458 1459 static void gnttab_request_version(void) 1460 { 1461 long rc; 1462 struct gnttab_set_version gsv; 1463 1464 if (gnttab_need_v2()) 1465 gsv.version = 2; 1466 else 1467 gsv.version = 1; 1468 1469 /* Boot parameter overrides automatic selection. */ 1470 if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2) 1471 gsv.version = xen_gnttab_version; 1472 1473 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); 1474 if (rc == 0 && gsv.version == 2) 1475 gnttab_interface = &gnttab_v2_ops; 1476 else 1477 gnttab_interface = &gnttab_v1_ops; 1478 pr_info("Grant tables using version %d layout\n", 1479 gnttab_interface->version); 1480 } 1481 1482 static int gnttab_setup(void) 1483 { 1484 unsigned int max_nr_gframes; 1485 1486 max_nr_gframes = gnttab_max_grant_frames(); 1487 if (max_nr_gframes < nr_grant_frames) 1488 return -ENOSYS; 1489 1490 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) { 1491 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr; 1492 if (gnttab_shared.addr == NULL) { 1493 pr_warn("gnttab share frames is not mapped!\n"); 1494 return -ENOMEM; 1495 } 1496 } 1497 return gnttab_map(0, nr_grant_frames - 1); 1498 } 1499 1500 int gnttab_resume(void) 1501 { 1502 gnttab_request_version(); 1503 return gnttab_setup(); 1504 } 1505 1506 int gnttab_suspend(void) 1507 { 1508 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1509 gnttab_interface->unmap_frames(); 1510 return 0; 1511 } 1512 1513 static int gnttab_expand(unsigned int req_entries) 1514 { 1515 int rc; 1516 unsigned int cur, extra; 1517 1518 cur = nr_grant_frames; 1519 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) / 1520 gnttab_interface->grefs_per_grant_frame); 1521 if (cur + extra > gnttab_max_grant_frames()) { 1522 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached" 1523 " cur=%u extra=%u limit=%u" 1524 " gnttab_free_count=%u req_entries=%u\n", 1525 cur, extra, gnttab_max_grant_frames(), 1526 gnttab_free_count, req_entries); 1527 return -ENOSPC; 1528 } 1529 1530 rc = gnttab_map(cur, cur + extra - 1); 1531 if (rc == 0) 1532 rc = grow_gnttab_list(extra); 1533 1534 return rc; 1535 } 1536 1537 int gnttab_init(void) 1538 { 1539 int i; 1540 unsigned long max_nr_grant_frames; 1541 unsigned int max_nr_glist_frames, nr_glist_frames; 1542 unsigned int nr_init_grefs; 1543 int ret; 1544 1545 gnttab_request_version(); 1546 max_nr_grant_frames = gnttab_max_grant_frames(); 1547 nr_grant_frames = 1; 1548 1549 /* Determine the maximum number of frames required for the 1550 * grant reference free list on the current hypervisor. 1551 */ 1552 max_nr_glist_frames = (max_nr_grant_frames * 1553 gnttab_interface->grefs_per_grant_frame / RPP); 1554 1555 gnttab_list = kmalloc_array(max_nr_glist_frames, 1556 sizeof(grant_ref_t *), 1557 GFP_KERNEL); 1558 if (gnttab_list == NULL) 1559 return -ENOMEM; 1560 1561 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP); 1562 for (i = 0; i < nr_glist_frames; i++) { 1563 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1564 if (gnttab_list[i] == NULL) { 1565 ret = -ENOMEM; 1566 goto ini_nomem; 1567 } 1568 } 1569 1570 ret = arch_gnttab_init(max_nr_grant_frames, 1571 nr_status_frames(max_nr_grant_frames)); 1572 if (ret < 0) 1573 goto ini_nomem; 1574 1575 if (gnttab_setup() < 0) { 1576 ret = -ENODEV; 1577 goto ini_nomem; 1578 } 1579 1580 nr_init_grefs = nr_grant_frames * 1581 gnttab_interface->grefs_per_grant_frame; 1582 1583 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1584 gnttab_entry(i) = i + 1; 1585 1586 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; 1587 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; 1588 gnttab_free_head = NR_RESERVED_ENTRIES; 1589 1590 printk("Grant table initialized\n"); 1591 return 0; 1592 1593 ini_nomem: 1594 for (i--; i >= 0; i--) 1595 free_page((unsigned long)gnttab_list[i]); 1596 kfree(gnttab_list); 1597 return ret; 1598 } 1599 EXPORT_SYMBOL_GPL(gnttab_init); 1600 1601 static int __gnttab_init(void) 1602 { 1603 if (!xen_domain()) 1604 return -ENODEV; 1605 1606 /* Delay grant-table initialization in the PV on HVM case */ 1607 if (xen_hvm_domain() && !xen_pvh_domain()) 1608 return 0; 1609 1610 return gnttab_init(); 1611 } 1612 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called 1613 * beforehand to initialize xen_auto_xlat_grant_frames. */ 1614 core_initcall_sync(__gnttab_init); 1615