1 /****************************************************************************** 2 * grant_table.c 3 * 4 * Granting foreign access to our memory reservation. 5 * 6 * Copyright (c) 2005-2006, Christopher Clark 7 * Copyright (c) 2004-2005, K A Fraser 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35 36 #include <linux/bootmem.h> 37 #include <linux/sched.h> 38 #include <linux/mm.h> 39 #include <linux/slab.h> 40 #include <linux/vmalloc.h> 41 #include <linux/uaccess.h> 42 #include <linux/io.h> 43 #include <linux/delay.h> 44 #include <linux/hardirq.h> 45 #include <linux/workqueue.h> 46 #include <linux/ratelimit.h> 47 #include <linux/moduleparam.h> 48 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC 49 #include <linux/dma-mapping.h> 50 #endif 51 52 #include <xen/xen.h> 53 #include <xen/interface/xen.h> 54 #include <xen/page.h> 55 #include <xen/grant_table.h> 56 #include <xen/interface/memory.h> 57 #include <xen/hvc-console.h> 58 #include <xen/swiotlb-xen.h> 59 #include <xen/balloon.h> 60 #ifdef CONFIG_X86 61 #include <asm/xen/cpuid.h> 62 #endif 63 #include <xen/mem-reservation.h> 64 #include <asm/xen/hypercall.h> 65 #include <asm/xen/interface.h> 66 67 #include <asm/pgtable.h> 68 #include <asm/sync_bitops.h> 69 70 /* External tools reserve first few grant table entries. */ 71 #define NR_RESERVED_ENTRIES 8 72 #define GNTTAB_LIST_END 0xffffffff 73 74 static grant_ref_t **gnttab_list; 75 static unsigned int nr_grant_frames; 76 static int gnttab_free_count; 77 static grant_ref_t gnttab_free_head; 78 static DEFINE_SPINLOCK(gnttab_list_lock); 79 struct grant_frames xen_auto_xlat_grant_frames; 80 static unsigned int xen_gnttab_version; 81 module_param_named(version, xen_gnttab_version, uint, 0); 82 83 static union { 84 struct grant_entry_v1 *v1; 85 union grant_entry_v2 *v2; 86 void *addr; 87 } gnttab_shared; 88 89 /*This is a structure of function pointers for grant table*/ 90 struct gnttab_ops { 91 /* 92 * Version of the grant interface. 93 */ 94 unsigned int version; 95 /* 96 * Grant refs per grant frame. 97 */ 98 unsigned int grefs_per_grant_frame; 99 /* 100 * Mapping a list of frames for storing grant entries. Frames parameter 101 * is used to store grant table address when grant table being setup, 102 * nr_gframes is the number of frames to map grant table. Returning 103 * GNTST_okay means success and negative value means failure. 104 */ 105 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes); 106 /* 107 * Release a list of frames which are mapped in map_frames for grant 108 * entry status. 109 */ 110 void (*unmap_frames)(void); 111 /* 112 * Introducing a valid entry into the grant table, granting the frame of 113 * this grant entry to domain for accessing or transfering. Ref 114 * parameter is reference of this introduced grant entry, domid is id of 115 * granted domain, frame is the page frame to be granted, and flags is 116 * status of the grant entry to be updated. 117 */ 118 void (*update_entry)(grant_ref_t ref, domid_t domid, 119 unsigned long frame, unsigned flags); 120 /* 121 * Stop granting a grant entry to domain for accessing. Ref parameter is 122 * reference of a grant entry whose grant access will be stopped, 123 * readonly is not in use in this function. If the grant entry is 124 * currently mapped for reading or writing, just return failure(==0) 125 * directly and don't tear down the grant access. Otherwise, stop grant 126 * access for this entry and return success(==1). 127 */ 128 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly); 129 /* 130 * Stop granting a grant entry to domain for transfer. Ref parameter is 131 * reference of a grant entry whose grant transfer will be stopped. If 132 * tranfer has not started, just reclaim the grant entry and return 133 * failure(==0). Otherwise, wait for the transfer to complete and then 134 * return the frame. 135 */ 136 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); 137 /* 138 * Query the status of a grant entry. Ref parameter is reference of 139 * queried grant entry, return value is the status of queried entry. 140 * Detailed status(writing/reading) can be gotten from the return value 141 * by bit operations. 142 */ 143 int (*query_foreign_access)(grant_ref_t ref); 144 }; 145 146 struct unmap_refs_callback_data { 147 struct completion completion; 148 int result; 149 }; 150 151 static const struct gnttab_ops *gnttab_interface; 152 153 /* This reflects status of grant entries, so act as a global value. */ 154 static grant_status_t *grstatus; 155 156 static struct gnttab_free_callback *gnttab_free_callback_list; 157 158 static int gnttab_expand(unsigned int req_entries); 159 160 #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) 161 #define SPP (PAGE_SIZE / sizeof(grant_status_t)) 162 163 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) 164 { 165 return &gnttab_list[(entry) / RPP][(entry) % RPP]; 166 } 167 /* This can be used as an l-value */ 168 #define gnttab_entry(entry) (*__gnttab_entry(entry)) 169 170 static int get_free_entries(unsigned count) 171 { 172 unsigned long flags; 173 int ref, rc = 0; 174 grant_ref_t head; 175 176 spin_lock_irqsave(&gnttab_list_lock, flags); 177 178 if ((gnttab_free_count < count) && 179 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { 180 spin_unlock_irqrestore(&gnttab_list_lock, flags); 181 return rc; 182 } 183 184 ref = head = gnttab_free_head; 185 gnttab_free_count -= count; 186 while (count-- > 1) 187 head = gnttab_entry(head); 188 gnttab_free_head = gnttab_entry(head); 189 gnttab_entry(head) = GNTTAB_LIST_END; 190 191 spin_unlock_irqrestore(&gnttab_list_lock, flags); 192 193 return ref; 194 } 195 196 static void do_free_callbacks(void) 197 { 198 struct gnttab_free_callback *callback, *next; 199 200 callback = gnttab_free_callback_list; 201 gnttab_free_callback_list = NULL; 202 203 while (callback != NULL) { 204 next = callback->next; 205 if (gnttab_free_count >= callback->count) { 206 callback->next = NULL; 207 callback->fn(callback->arg); 208 } else { 209 callback->next = gnttab_free_callback_list; 210 gnttab_free_callback_list = callback; 211 } 212 callback = next; 213 } 214 } 215 216 static inline void check_free_callbacks(void) 217 { 218 if (unlikely(gnttab_free_callback_list)) 219 do_free_callbacks(); 220 } 221 222 static void put_free_entry(grant_ref_t ref) 223 { 224 unsigned long flags; 225 spin_lock_irqsave(&gnttab_list_lock, flags); 226 gnttab_entry(ref) = gnttab_free_head; 227 gnttab_free_head = ref; 228 gnttab_free_count++; 229 check_free_callbacks(); 230 spin_unlock_irqrestore(&gnttab_list_lock, flags); 231 } 232 233 /* 234 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. 235 * Introducing a valid entry into the grant table: 236 * 1. Write ent->domid. 237 * 2. Write ent->frame: 238 * GTF_permit_access: Frame to which access is permitted. 239 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new 240 * frame, or zero if none. 241 * 3. Write memory barrier (WMB). 242 * 4. Write ent->flags, inc. valid type. 243 */ 244 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, 245 unsigned long frame, unsigned flags) 246 { 247 gnttab_shared.v1[ref].domid = domid; 248 gnttab_shared.v1[ref].frame = frame; 249 wmb(); 250 gnttab_shared.v1[ref].flags = flags; 251 } 252 253 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid, 254 unsigned long frame, unsigned int flags) 255 { 256 gnttab_shared.v2[ref].hdr.domid = domid; 257 gnttab_shared.v2[ref].full_page.frame = frame; 258 wmb(); /* Hypervisor concurrent accesses. */ 259 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags; 260 } 261 262 /* 263 * Public grant-issuing interface functions 264 */ 265 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, 266 unsigned long frame, int readonly) 267 { 268 gnttab_interface->update_entry(ref, domid, frame, 269 GTF_permit_access | (readonly ? GTF_readonly : 0)); 270 } 271 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); 272 273 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, 274 int readonly) 275 { 276 int ref; 277 278 ref = get_free_entries(1); 279 if (unlikely(ref < 0)) 280 return -ENOSPC; 281 282 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly); 283 284 return ref; 285 } 286 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 287 288 static int gnttab_query_foreign_access_v1(grant_ref_t ref) 289 { 290 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); 291 } 292 293 static int gnttab_query_foreign_access_v2(grant_ref_t ref) 294 { 295 return grstatus[ref] & (GTF_reading|GTF_writing); 296 } 297 298 int gnttab_query_foreign_access(grant_ref_t ref) 299 { 300 return gnttab_interface->query_foreign_access(ref); 301 } 302 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); 303 304 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) 305 { 306 u16 flags, nflags; 307 u16 *pflags; 308 309 pflags = &gnttab_shared.v1[ref].flags; 310 nflags = *pflags; 311 do { 312 flags = nflags; 313 if (flags & (GTF_reading|GTF_writing)) 314 return 0; 315 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); 316 317 return 1; 318 } 319 320 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) 321 { 322 gnttab_shared.v2[ref].hdr.flags = 0; 323 mb(); /* Concurrent access by hypervisor. */ 324 if (grstatus[ref] & (GTF_reading|GTF_writing)) { 325 return 0; 326 } else { 327 /* 328 * The read of grstatus needs to have acquire semantics. 329 * On x86, reads already have that, and we just need to 330 * protect against compiler reorderings. 331 * On other architectures we may need a full barrier. 332 */ 333 #ifdef CONFIG_X86 334 barrier(); 335 #else 336 mb(); 337 #endif 338 } 339 340 return 1; 341 } 342 343 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 344 { 345 return gnttab_interface->end_foreign_access_ref(ref, readonly); 346 } 347 348 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 349 { 350 if (_gnttab_end_foreign_access_ref(ref, readonly)) 351 return 1; 352 pr_warn("WARNING: g.e. %#x still in use!\n", ref); 353 return 0; 354 } 355 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); 356 357 struct deferred_entry { 358 struct list_head list; 359 grant_ref_t ref; 360 bool ro; 361 uint16_t warn_delay; 362 struct page *page; 363 }; 364 static LIST_HEAD(deferred_list); 365 static void gnttab_handle_deferred(struct timer_list *); 366 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred); 367 368 static void gnttab_handle_deferred(struct timer_list *unused) 369 { 370 unsigned int nr = 10; 371 struct deferred_entry *first = NULL; 372 unsigned long flags; 373 374 spin_lock_irqsave(&gnttab_list_lock, flags); 375 while (nr--) { 376 struct deferred_entry *entry 377 = list_first_entry(&deferred_list, 378 struct deferred_entry, list); 379 380 if (entry == first) 381 break; 382 list_del(&entry->list); 383 spin_unlock_irqrestore(&gnttab_list_lock, flags); 384 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { 385 put_free_entry(entry->ref); 386 if (entry->page) { 387 pr_debug("freeing g.e. %#x (pfn %#lx)\n", 388 entry->ref, page_to_pfn(entry->page)); 389 put_page(entry->page); 390 } else 391 pr_info("freeing g.e. %#x\n", entry->ref); 392 kfree(entry); 393 entry = NULL; 394 } else { 395 if (!--entry->warn_delay) 396 pr_info("g.e. %#x still pending\n", entry->ref); 397 if (!first) 398 first = entry; 399 } 400 spin_lock_irqsave(&gnttab_list_lock, flags); 401 if (entry) 402 list_add_tail(&entry->list, &deferred_list); 403 else if (list_empty(&deferred_list)) 404 break; 405 } 406 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { 407 deferred_timer.expires = jiffies + HZ; 408 add_timer(&deferred_timer); 409 } 410 spin_unlock_irqrestore(&gnttab_list_lock, flags); 411 } 412 413 static void gnttab_add_deferred(grant_ref_t ref, bool readonly, 414 struct page *page) 415 { 416 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 417 const char *what = KERN_WARNING "leaking"; 418 419 if (entry) { 420 unsigned long flags; 421 422 entry->ref = ref; 423 entry->ro = readonly; 424 entry->page = page; 425 entry->warn_delay = 60; 426 spin_lock_irqsave(&gnttab_list_lock, flags); 427 list_add_tail(&entry->list, &deferred_list); 428 if (!timer_pending(&deferred_timer)) { 429 deferred_timer.expires = jiffies + HZ; 430 add_timer(&deferred_timer); 431 } 432 spin_unlock_irqrestore(&gnttab_list_lock, flags); 433 what = KERN_DEBUG "deferring"; 434 } 435 printk("%s g.e. %#x (pfn %#lx)\n", 436 what, ref, page ? page_to_pfn(page) : -1); 437 } 438 439 void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 440 unsigned long page) 441 { 442 if (gnttab_end_foreign_access_ref(ref, readonly)) { 443 put_free_entry(ref); 444 if (page != 0) 445 put_page(virt_to_page(page)); 446 } else 447 gnttab_add_deferred(ref, readonly, 448 page ? virt_to_page(page) : NULL); 449 } 450 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 451 452 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) 453 { 454 int ref; 455 456 ref = get_free_entries(1); 457 if (unlikely(ref < 0)) 458 return -ENOSPC; 459 gnttab_grant_foreign_transfer_ref(ref, domid, pfn); 460 461 return ref; 462 } 463 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); 464 465 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, 466 unsigned long pfn) 467 { 468 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); 469 } 470 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); 471 472 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) 473 { 474 unsigned long frame; 475 u16 flags; 476 u16 *pflags; 477 478 pflags = &gnttab_shared.v1[ref].flags; 479 480 /* 481 * If a transfer is not even yet started, try to reclaim the grant 482 * reference and return failure (== 0). 483 */ 484 while (!((flags = *pflags) & GTF_transfer_committed)) { 485 if (sync_cmpxchg(pflags, flags, 0) == flags) 486 return 0; 487 cpu_relax(); 488 } 489 490 /* If a transfer is in progress then wait until it is completed. */ 491 while (!(flags & GTF_transfer_completed)) { 492 flags = *pflags; 493 cpu_relax(); 494 } 495 496 rmb(); /* Read the frame number /after/ reading completion status. */ 497 frame = gnttab_shared.v1[ref].frame; 498 BUG_ON(frame == 0); 499 500 return frame; 501 } 502 503 static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref) 504 { 505 unsigned long frame; 506 u16 flags; 507 u16 *pflags; 508 509 pflags = &gnttab_shared.v2[ref].hdr.flags; 510 511 /* 512 * If a transfer is not even yet started, try to reclaim the grant 513 * reference and return failure (== 0). 514 */ 515 while (!((flags = *pflags) & GTF_transfer_committed)) { 516 if (sync_cmpxchg(pflags, flags, 0) == flags) 517 return 0; 518 cpu_relax(); 519 } 520 521 /* If a transfer is in progress then wait until it is completed. */ 522 while (!(flags & GTF_transfer_completed)) { 523 flags = *pflags; 524 cpu_relax(); 525 } 526 527 rmb(); /* Read the frame number /after/ reading completion status. */ 528 frame = gnttab_shared.v2[ref].full_page.frame; 529 BUG_ON(frame == 0); 530 531 return frame; 532 } 533 534 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) 535 { 536 return gnttab_interface->end_foreign_transfer_ref(ref); 537 } 538 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); 539 540 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) 541 { 542 unsigned long frame = gnttab_end_foreign_transfer_ref(ref); 543 put_free_entry(ref); 544 return frame; 545 } 546 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); 547 548 void gnttab_free_grant_reference(grant_ref_t ref) 549 { 550 put_free_entry(ref); 551 } 552 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); 553 554 void gnttab_free_grant_references(grant_ref_t head) 555 { 556 grant_ref_t ref; 557 unsigned long flags; 558 int count = 1; 559 if (head == GNTTAB_LIST_END) 560 return; 561 spin_lock_irqsave(&gnttab_list_lock, flags); 562 ref = head; 563 while (gnttab_entry(ref) != GNTTAB_LIST_END) { 564 ref = gnttab_entry(ref); 565 count++; 566 } 567 gnttab_entry(ref) = gnttab_free_head; 568 gnttab_free_head = head; 569 gnttab_free_count += count; 570 check_free_callbacks(); 571 spin_unlock_irqrestore(&gnttab_list_lock, flags); 572 } 573 EXPORT_SYMBOL_GPL(gnttab_free_grant_references); 574 575 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) 576 { 577 int h = get_free_entries(count); 578 579 if (h < 0) 580 return -ENOSPC; 581 582 *head = h; 583 584 return 0; 585 } 586 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); 587 588 int gnttab_empty_grant_references(const grant_ref_t *private_head) 589 { 590 return (*private_head == GNTTAB_LIST_END); 591 } 592 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); 593 594 int gnttab_claim_grant_reference(grant_ref_t *private_head) 595 { 596 grant_ref_t g = *private_head; 597 if (unlikely(g == GNTTAB_LIST_END)) 598 return -ENOSPC; 599 *private_head = gnttab_entry(g); 600 return g; 601 } 602 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); 603 604 void gnttab_release_grant_reference(grant_ref_t *private_head, 605 grant_ref_t release) 606 { 607 gnttab_entry(release) = *private_head; 608 *private_head = release; 609 } 610 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); 611 612 void gnttab_request_free_callback(struct gnttab_free_callback *callback, 613 void (*fn)(void *), void *arg, u16 count) 614 { 615 unsigned long flags; 616 struct gnttab_free_callback *cb; 617 618 spin_lock_irqsave(&gnttab_list_lock, flags); 619 620 /* Check if the callback is already on the list */ 621 cb = gnttab_free_callback_list; 622 while (cb) { 623 if (cb == callback) 624 goto out; 625 cb = cb->next; 626 } 627 628 callback->fn = fn; 629 callback->arg = arg; 630 callback->count = count; 631 callback->next = gnttab_free_callback_list; 632 gnttab_free_callback_list = callback; 633 check_free_callbacks(); 634 out: 635 spin_unlock_irqrestore(&gnttab_list_lock, flags); 636 } 637 EXPORT_SYMBOL_GPL(gnttab_request_free_callback); 638 639 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) 640 { 641 struct gnttab_free_callback **pcb; 642 unsigned long flags; 643 644 spin_lock_irqsave(&gnttab_list_lock, flags); 645 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { 646 if (*pcb == callback) { 647 *pcb = callback->next; 648 break; 649 } 650 } 651 spin_unlock_irqrestore(&gnttab_list_lock, flags); 652 } 653 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); 654 655 static unsigned int gnttab_frames(unsigned int frames, unsigned int align) 656 { 657 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) / 658 align; 659 } 660 661 static int grow_gnttab_list(unsigned int more_frames) 662 { 663 unsigned int new_nr_grant_frames, extra_entries, i; 664 unsigned int nr_glist_frames, new_nr_glist_frames; 665 unsigned int grefs_per_frame; 666 667 BUG_ON(gnttab_interface == NULL); 668 grefs_per_frame = gnttab_interface->grefs_per_grant_frame; 669 670 new_nr_grant_frames = nr_grant_frames + more_frames; 671 extra_entries = more_frames * grefs_per_frame; 672 673 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP); 674 new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP); 675 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { 676 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 677 if (!gnttab_list[i]) 678 goto grow_nomem; 679 } 680 681 682 for (i = grefs_per_frame * nr_grant_frames; 683 i < grefs_per_frame * new_nr_grant_frames - 1; i++) 684 gnttab_entry(i) = i + 1; 685 686 gnttab_entry(i) = gnttab_free_head; 687 gnttab_free_head = grefs_per_frame * nr_grant_frames; 688 gnttab_free_count += extra_entries; 689 690 nr_grant_frames = new_nr_grant_frames; 691 692 check_free_callbacks(); 693 694 return 0; 695 696 grow_nomem: 697 while (i-- > nr_glist_frames) 698 free_page((unsigned long) gnttab_list[i]); 699 return -ENOMEM; 700 } 701 702 static unsigned int __max_nr_grant_frames(void) 703 { 704 struct gnttab_query_size query; 705 int rc; 706 707 query.dom = DOMID_SELF; 708 709 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); 710 if ((rc < 0) || (query.status != GNTST_okay)) 711 return 4; /* Legacy max supported number of frames */ 712 713 return query.max_nr_frames; 714 } 715 716 unsigned int gnttab_max_grant_frames(void) 717 { 718 unsigned int xen_max = __max_nr_grant_frames(); 719 static unsigned int boot_max_nr_grant_frames; 720 721 /* First time, initialize it properly. */ 722 if (!boot_max_nr_grant_frames) 723 boot_max_nr_grant_frames = __max_nr_grant_frames(); 724 725 if (xen_max > boot_max_nr_grant_frames) 726 return boot_max_nr_grant_frames; 727 return xen_max; 728 } 729 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); 730 731 int gnttab_setup_auto_xlat_frames(phys_addr_t addr) 732 { 733 xen_pfn_t *pfn; 734 unsigned int max_nr_gframes = __max_nr_grant_frames(); 735 unsigned int i; 736 void *vaddr; 737 738 if (xen_auto_xlat_grant_frames.count) 739 return -EINVAL; 740 741 vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes); 742 if (vaddr == NULL) { 743 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n", 744 &addr); 745 return -ENOMEM; 746 } 747 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); 748 if (!pfn) { 749 xen_unmap(vaddr); 750 return -ENOMEM; 751 } 752 for (i = 0; i < max_nr_gframes; i++) 753 pfn[i] = XEN_PFN_DOWN(addr) + i; 754 755 xen_auto_xlat_grant_frames.vaddr = vaddr; 756 xen_auto_xlat_grant_frames.pfn = pfn; 757 xen_auto_xlat_grant_frames.count = max_nr_gframes; 758 759 return 0; 760 } 761 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames); 762 763 void gnttab_free_auto_xlat_frames(void) 764 { 765 if (!xen_auto_xlat_grant_frames.count) 766 return; 767 kfree(xen_auto_xlat_grant_frames.pfn); 768 xen_unmap(xen_auto_xlat_grant_frames.vaddr); 769 770 xen_auto_xlat_grant_frames.pfn = NULL; 771 xen_auto_xlat_grant_frames.count = 0; 772 xen_auto_xlat_grant_frames.vaddr = NULL; 773 } 774 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); 775 776 int gnttab_pages_set_private(int nr_pages, struct page **pages) 777 { 778 int i; 779 780 for (i = 0; i < nr_pages; i++) { 781 #if BITS_PER_LONG < 64 782 struct xen_page_foreign *foreign; 783 784 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL); 785 if (!foreign) 786 return -ENOMEM; 787 788 set_page_private(pages[i], (unsigned long)foreign); 789 #endif 790 SetPagePrivate(pages[i]); 791 } 792 793 return 0; 794 } 795 EXPORT_SYMBOL_GPL(gnttab_pages_set_private); 796 797 /** 798 * gnttab_alloc_pages - alloc pages suitable for grant mapping into 799 * @nr_pages: number of pages to alloc 800 * @pages: returns the pages 801 */ 802 int gnttab_alloc_pages(int nr_pages, struct page **pages) 803 { 804 int ret; 805 806 ret = alloc_xenballooned_pages(nr_pages, pages); 807 if (ret < 0) 808 return ret; 809 810 ret = gnttab_pages_set_private(nr_pages, pages); 811 if (ret < 0) 812 gnttab_free_pages(nr_pages, pages); 813 814 return ret; 815 } 816 EXPORT_SYMBOL_GPL(gnttab_alloc_pages); 817 818 void gnttab_pages_clear_private(int nr_pages, struct page **pages) 819 { 820 int i; 821 822 for (i = 0; i < nr_pages; i++) { 823 if (PagePrivate(pages[i])) { 824 #if BITS_PER_LONG < 64 825 kfree((void *)page_private(pages[i])); 826 #endif 827 ClearPagePrivate(pages[i]); 828 } 829 } 830 } 831 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private); 832 833 /** 834 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() 835 * @nr_pages; number of pages to free 836 * @pages: the pages 837 */ 838 void gnttab_free_pages(int nr_pages, struct page **pages) 839 { 840 gnttab_pages_clear_private(nr_pages, pages); 841 free_xenballooned_pages(nr_pages, pages); 842 } 843 EXPORT_SYMBOL_GPL(gnttab_free_pages); 844 845 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC 846 /** 847 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into 848 * @args: arguments to the function 849 */ 850 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args) 851 { 852 unsigned long pfn, start_pfn; 853 size_t size; 854 int i, ret; 855 856 size = args->nr_pages << PAGE_SHIFT; 857 if (args->coherent) 858 args->vaddr = dma_alloc_coherent(args->dev, size, 859 &args->dev_bus_addr, 860 GFP_KERNEL | __GFP_NOWARN); 861 else 862 args->vaddr = dma_alloc_wc(args->dev, size, 863 &args->dev_bus_addr, 864 GFP_KERNEL | __GFP_NOWARN); 865 if (!args->vaddr) { 866 pr_debug("Failed to allocate DMA buffer of size %zu\n", size); 867 return -ENOMEM; 868 } 869 870 start_pfn = __phys_to_pfn(args->dev_bus_addr); 871 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages; 872 pfn++, i++) { 873 struct page *page = pfn_to_page(pfn); 874 875 args->pages[i] = page; 876 args->frames[i] = xen_page_to_gfn(page); 877 xenmem_reservation_scrub_page(page); 878 } 879 880 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages); 881 882 ret = xenmem_reservation_decrease(args->nr_pages, args->frames); 883 if (ret != args->nr_pages) { 884 pr_debug("Failed to decrease reservation for DMA buffer\n"); 885 ret = -EFAULT; 886 goto fail; 887 } 888 889 ret = gnttab_pages_set_private(args->nr_pages, args->pages); 890 if (ret < 0) 891 goto fail; 892 893 return 0; 894 895 fail: 896 gnttab_dma_free_pages(args); 897 return ret; 898 } 899 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages); 900 901 /** 902 * gnttab_dma_free_pages - free DMAable pages 903 * @args: arguments to the function 904 */ 905 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args) 906 { 907 size_t size; 908 int i, ret; 909 910 gnttab_pages_clear_private(args->nr_pages, args->pages); 911 912 for (i = 0; i < args->nr_pages; i++) 913 args->frames[i] = page_to_xen_pfn(args->pages[i]); 914 915 ret = xenmem_reservation_increase(args->nr_pages, args->frames); 916 if (ret != args->nr_pages) { 917 pr_debug("Failed to decrease reservation for DMA buffer\n"); 918 ret = -EFAULT; 919 } else { 920 ret = 0; 921 } 922 923 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages, 924 args->frames); 925 926 size = args->nr_pages << PAGE_SHIFT; 927 if (args->coherent) 928 dma_free_coherent(args->dev, size, 929 args->vaddr, args->dev_bus_addr); 930 else 931 dma_free_wc(args->dev, size, 932 args->vaddr, args->dev_bus_addr); 933 return ret; 934 } 935 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages); 936 #endif 937 938 /* Handling of paged out grant targets (GNTST_eagain) */ 939 #define MAX_DELAY 256 940 static inline void 941 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status, 942 const char *func) 943 { 944 unsigned delay = 1; 945 946 do { 947 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1)); 948 if (*status == GNTST_eagain) 949 msleep(delay++); 950 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY)); 951 952 if (delay >= MAX_DELAY) { 953 pr_err("%s: %s eagain grant\n", func, current->comm); 954 *status = GNTST_bad_page; 955 } 956 } 957 958 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count) 959 { 960 struct gnttab_map_grant_ref *op; 961 962 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count)) 963 BUG(); 964 for (op = batch; op < batch + count; op++) 965 if (op->status == GNTST_eagain) 966 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op, 967 &op->status, __func__); 968 } 969 EXPORT_SYMBOL_GPL(gnttab_batch_map); 970 971 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) 972 { 973 struct gnttab_copy *op; 974 975 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) 976 BUG(); 977 for (op = batch; op < batch + count; op++) 978 if (op->status == GNTST_eagain) 979 gnttab_retry_eagain_gop(GNTTABOP_copy, op, 980 &op->status, __func__); 981 } 982 EXPORT_SYMBOL_GPL(gnttab_batch_copy); 983 984 void gnttab_foreach_grant_in_range(struct page *page, 985 unsigned int offset, 986 unsigned int len, 987 xen_grant_fn_t fn, 988 void *data) 989 { 990 unsigned int goffset; 991 unsigned int glen; 992 unsigned long xen_pfn; 993 994 len = min_t(unsigned int, PAGE_SIZE - offset, len); 995 goffset = xen_offset_in_page(offset); 996 997 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset); 998 999 while (len) { 1000 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len); 1001 fn(pfn_to_gfn(xen_pfn), goffset, glen, data); 1002 1003 goffset = 0; 1004 xen_pfn++; 1005 len -= glen; 1006 } 1007 } 1008 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range); 1009 1010 void gnttab_foreach_grant(struct page **pages, 1011 unsigned int nr_grefs, 1012 xen_grant_fn_t fn, 1013 void *data) 1014 { 1015 unsigned int goffset = 0; 1016 unsigned long xen_pfn = 0; 1017 unsigned int i; 1018 1019 for (i = 0; i < nr_grefs; i++) { 1020 if ((i % XEN_PFN_PER_PAGE) == 0) { 1021 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]); 1022 goffset = 0; 1023 } 1024 1025 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data); 1026 1027 goffset += XEN_PAGE_SIZE; 1028 xen_pfn++; 1029 } 1030 } 1031 1032 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 1033 struct gnttab_map_grant_ref *kmap_ops, 1034 struct page **pages, unsigned int count) 1035 { 1036 int i, ret; 1037 1038 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 1039 if (ret) 1040 return ret; 1041 1042 for (i = 0; i < count; i++) { 1043 /* Retry eagain maps */ 1044 if (map_ops[i].status == GNTST_eagain) 1045 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 1046 &map_ops[i].status, __func__); 1047 1048 if (map_ops[i].status == GNTST_okay) { 1049 struct xen_page_foreign *foreign; 1050 1051 SetPageForeign(pages[i]); 1052 foreign = xen_page_foreign(pages[i]); 1053 foreign->domid = map_ops[i].dom; 1054 foreign->gref = map_ops[i].ref; 1055 } 1056 } 1057 1058 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); 1059 } 1060 EXPORT_SYMBOL_GPL(gnttab_map_refs); 1061 1062 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 1063 struct gnttab_unmap_grant_ref *kunmap_ops, 1064 struct page **pages, unsigned int count) 1065 { 1066 unsigned int i; 1067 int ret; 1068 1069 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 1070 if (ret) 1071 return ret; 1072 1073 for (i = 0; i < count; i++) 1074 ClearPageForeign(pages[i]); 1075 1076 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count); 1077 } 1078 EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 1079 1080 #define GNTTAB_UNMAP_REFS_DELAY 5 1081 1082 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); 1083 1084 static void gnttab_unmap_work(struct work_struct *work) 1085 { 1086 struct gntab_unmap_queue_data 1087 *unmap_data = container_of(work, 1088 struct gntab_unmap_queue_data, 1089 gnttab_work.work); 1090 if (unmap_data->age != UINT_MAX) 1091 unmap_data->age++; 1092 __gnttab_unmap_refs_async(unmap_data); 1093 } 1094 1095 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) 1096 { 1097 int ret; 1098 int pc; 1099 1100 for (pc = 0; pc < item->count; pc++) { 1101 if (page_count(item->pages[pc]) > 1) { 1102 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1); 1103 schedule_delayed_work(&item->gnttab_work, 1104 msecs_to_jiffies(delay)); 1105 return; 1106 } 1107 } 1108 1109 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops, 1110 item->pages, item->count); 1111 item->done(ret, item); 1112 } 1113 1114 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) 1115 { 1116 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work); 1117 item->age = 0; 1118 1119 __gnttab_unmap_refs_async(item); 1120 } 1121 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); 1122 1123 static void unmap_refs_callback(int result, 1124 struct gntab_unmap_queue_data *data) 1125 { 1126 struct unmap_refs_callback_data *d = data->data; 1127 1128 d->result = result; 1129 complete(&d->completion); 1130 } 1131 1132 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) 1133 { 1134 struct unmap_refs_callback_data data; 1135 1136 init_completion(&data.completion); 1137 item->data = &data; 1138 item->done = &unmap_refs_callback; 1139 gnttab_unmap_refs_async(item); 1140 wait_for_completion(&data.completion); 1141 1142 return data.result; 1143 } 1144 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); 1145 1146 static unsigned int nr_status_frames(unsigned int nr_grant_frames) 1147 { 1148 BUG_ON(gnttab_interface == NULL); 1149 return gnttab_frames(nr_grant_frames, SPP); 1150 } 1151 1152 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 1153 { 1154 int rc; 1155 1156 rc = arch_gnttab_map_shared(frames, nr_gframes, 1157 gnttab_max_grant_frames(), 1158 &gnttab_shared.addr); 1159 BUG_ON(rc); 1160 1161 return 0; 1162 } 1163 1164 static void gnttab_unmap_frames_v1(void) 1165 { 1166 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 1167 } 1168 1169 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes) 1170 { 1171 uint64_t *sframes; 1172 unsigned int nr_sframes; 1173 struct gnttab_get_status_frames getframes; 1174 int rc; 1175 1176 nr_sframes = nr_status_frames(nr_gframes); 1177 1178 /* No need for kzalloc as it is initialized in following hypercall 1179 * GNTTABOP_get_status_frames. 1180 */ 1181 sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC); 1182 if (!sframes) 1183 return -ENOMEM; 1184 1185 getframes.dom = DOMID_SELF; 1186 getframes.nr_frames = nr_sframes; 1187 set_xen_guest_handle(getframes.frame_list, sframes); 1188 1189 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames, 1190 &getframes, 1); 1191 if (rc == -ENOSYS) { 1192 kfree(sframes); 1193 return -ENOSYS; 1194 } 1195 1196 BUG_ON(rc || getframes.status); 1197 1198 rc = arch_gnttab_map_status(sframes, nr_sframes, 1199 nr_status_frames(gnttab_max_grant_frames()), 1200 &grstatus); 1201 BUG_ON(rc); 1202 kfree(sframes); 1203 1204 rc = arch_gnttab_map_shared(frames, nr_gframes, 1205 gnttab_max_grant_frames(), 1206 &gnttab_shared.addr); 1207 BUG_ON(rc); 1208 1209 return 0; 1210 } 1211 1212 static void gnttab_unmap_frames_v2(void) 1213 { 1214 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 1215 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames)); 1216 } 1217 1218 static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 1219 { 1220 struct gnttab_setup_table setup; 1221 xen_pfn_t *frames; 1222 unsigned int nr_gframes = end_idx + 1; 1223 int rc; 1224 1225 if (xen_feature(XENFEAT_auto_translated_physmap)) { 1226 struct xen_add_to_physmap xatp; 1227 unsigned int i = end_idx; 1228 rc = 0; 1229 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes); 1230 /* 1231 * Loop backwards, so that the first hypercall has the largest 1232 * index, ensuring that the table will grow only once. 1233 */ 1234 do { 1235 xatp.domid = DOMID_SELF; 1236 xatp.idx = i; 1237 xatp.space = XENMAPSPACE_grant_table; 1238 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i]; 1239 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); 1240 if (rc != 0) { 1241 pr_warn("grant table add_to_physmap failed, err=%d\n", 1242 rc); 1243 break; 1244 } 1245 } while (i-- > start_idx); 1246 1247 return rc; 1248 } 1249 1250 /* No need for kzalloc as it is initialized in following hypercall 1251 * GNTTABOP_setup_table. 1252 */ 1253 frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC); 1254 if (!frames) 1255 return -ENOMEM; 1256 1257 setup.dom = DOMID_SELF; 1258 setup.nr_frames = nr_gframes; 1259 set_xen_guest_handle(setup.frame_list, frames); 1260 1261 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); 1262 if (rc == -ENOSYS) { 1263 kfree(frames); 1264 return -ENOSYS; 1265 } 1266 1267 BUG_ON(rc || setup.status); 1268 1269 rc = gnttab_interface->map_frames(frames, nr_gframes); 1270 1271 kfree(frames); 1272 1273 return rc; 1274 } 1275 1276 static const struct gnttab_ops gnttab_v1_ops = { 1277 .version = 1, 1278 .grefs_per_grant_frame = XEN_PAGE_SIZE / 1279 sizeof(struct grant_entry_v1), 1280 .map_frames = gnttab_map_frames_v1, 1281 .unmap_frames = gnttab_unmap_frames_v1, 1282 .update_entry = gnttab_update_entry_v1, 1283 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, 1284 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, 1285 .query_foreign_access = gnttab_query_foreign_access_v1, 1286 }; 1287 1288 static const struct gnttab_ops gnttab_v2_ops = { 1289 .version = 2, 1290 .grefs_per_grant_frame = XEN_PAGE_SIZE / 1291 sizeof(union grant_entry_v2), 1292 .map_frames = gnttab_map_frames_v2, 1293 .unmap_frames = gnttab_unmap_frames_v2, 1294 .update_entry = gnttab_update_entry_v2, 1295 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, 1296 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, 1297 .query_foreign_access = gnttab_query_foreign_access_v2, 1298 }; 1299 1300 static bool gnttab_need_v2(void) 1301 { 1302 #ifdef CONFIG_X86 1303 uint32_t base, width; 1304 1305 if (xen_pv_domain()) { 1306 base = xen_cpuid_base(); 1307 if (cpuid_eax(base) < 5) 1308 return false; /* Information not available, use V1. */ 1309 width = cpuid_ebx(base + 5) & 1310 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK; 1311 return width > 32 + PAGE_SHIFT; 1312 } 1313 #endif 1314 return !!(max_possible_pfn >> 32); 1315 } 1316 1317 static void gnttab_request_version(void) 1318 { 1319 long rc; 1320 struct gnttab_set_version gsv; 1321 1322 if (gnttab_need_v2()) 1323 gsv.version = 2; 1324 else 1325 gsv.version = 1; 1326 1327 /* Boot parameter overrides automatic selection. */ 1328 if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2) 1329 gsv.version = xen_gnttab_version; 1330 1331 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); 1332 if (rc == 0 && gsv.version == 2) 1333 gnttab_interface = &gnttab_v2_ops; 1334 else 1335 gnttab_interface = &gnttab_v1_ops; 1336 pr_info("Grant tables using version %d layout\n", 1337 gnttab_interface->version); 1338 } 1339 1340 static int gnttab_setup(void) 1341 { 1342 unsigned int max_nr_gframes; 1343 1344 max_nr_gframes = gnttab_max_grant_frames(); 1345 if (max_nr_gframes < nr_grant_frames) 1346 return -ENOSYS; 1347 1348 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) { 1349 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr; 1350 if (gnttab_shared.addr == NULL) { 1351 pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n", 1352 (unsigned long)xen_auto_xlat_grant_frames.vaddr); 1353 return -ENOMEM; 1354 } 1355 } 1356 return gnttab_map(0, nr_grant_frames - 1); 1357 } 1358 1359 int gnttab_resume(void) 1360 { 1361 gnttab_request_version(); 1362 return gnttab_setup(); 1363 } 1364 1365 int gnttab_suspend(void) 1366 { 1367 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1368 gnttab_interface->unmap_frames(); 1369 return 0; 1370 } 1371 1372 static int gnttab_expand(unsigned int req_entries) 1373 { 1374 int rc; 1375 unsigned int cur, extra; 1376 1377 BUG_ON(gnttab_interface == NULL); 1378 cur = nr_grant_frames; 1379 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) / 1380 gnttab_interface->grefs_per_grant_frame); 1381 if (cur + extra > gnttab_max_grant_frames()) { 1382 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached" 1383 " cur=%u extra=%u limit=%u" 1384 " gnttab_free_count=%u req_entries=%u\n", 1385 cur, extra, gnttab_max_grant_frames(), 1386 gnttab_free_count, req_entries); 1387 return -ENOSPC; 1388 } 1389 1390 rc = gnttab_map(cur, cur + extra - 1); 1391 if (rc == 0) 1392 rc = grow_gnttab_list(extra); 1393 1394 return rc; 1395 } 1396 1397 int gnttab_init(void) 1398 { 1399 int i; 1400 unsigned long max_nr_grant_frames; 1401 unsigned int max_nr_glist_frames, nr_glist_frames; 1402 unsigned int nr_init_grefs; 1403 int ret; 1404 1405 gnttab_request_version(); 1406 max_nr_grant_frames = gnttab_max_grant_frames(); 1407 nr_grant_frames = 1; 1408 1409 /* Determine the maximum number of frames required for the 1410 * grant reference free list on the current hypervisor. 1411 */ 1412 BUG_ON(gnttab_interface == NULL); 1413 max_nr_glist_frames = (max_nr_grant_frames * 1414 gnttab_interface->grefs_per_grant_frame / RPP); 1415 1416 gnttab_list = kmalloc_array(max_nr_glist_frames, 1417 sizeof(grant_ref_t *), 1418 GFP_KERNEL); 1419 if (gnttab_list == NULL) 1420 return -ENOMEM; 1421 1422 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP); 1423 for (i = 0; i < nr_glist_frames; i++) { 1424 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1425 if (gnttab_list[i] == NULL) { 1426 ret = -ENOMEM; 1427 goto ini_nomem; 1428 } 1429 } 1430 1431 ret = arch_gnttab_init(max_nr_grant_frames, 1432 nr_status_frames(max_nr_grant_frames)); 1433 if (ret < 0) 1434 goto ini_nomem; 1435 1436 if (gnttab_setup() < 0) { 1437 ret = -ENODEV; 1438 goto ini_nomem; 1439 } 1440 1441 nr_init_grefs = nr_grant_frames * 1442 gnttab_interface->grefs_per_grant_frame; 1443 1444 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1445 gnttab_entry(i) = i + 1; 1446 1447 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; 1448 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; 1449 gnttab_free_head = NR_RESERVED_ENTRIES; 1450 1451 printk("Grant table initialized\n"); 1452 return 0; 1453 1454 ini_nomem: 1455 for (i--; i >= 0; i--) 1456 free_page((unsigned long)gnttab_list[i]); 1457 kfree(gnttab_list); 1458 return ret; 1459 } 1460 EXPORT_SYMBOL_GPL(gnttab_init); 1461 1462 static int __gnttab_init(void) 1463 { 1464 if (!xen_domain()) 1465 return -ENODEV; 1466 1467 /* Delay grant-table initialization in the PV on HVM case */ 1468 if (xen_hvm_domain() && !xen_pvh_domain()) 1469 return 0; 1470 1471 return gnttab_init(); 1472 } 1473 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called 1474 * beforehand to initialize xen_auto_xlat_grant_frames. */ 1475 core_initcall_sync(__gnttab_init); 1476