1 /****************************************************************************** 2 * grant_table.c 3 * 4 * Granting foreign access to our memory reservation. 5 * 6 * Copyright (c) 2005-2006, Christopher Clark 7 * Copyright (c) 2004-2005, K A Fraser 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35 36 #include <linux/memblock.h> 37 #include <linux/sched.h> 38 #include <linux/mm.h> 39 #include <linux/slab.h> 40 #include <linux/vmalloc.h> 41 #include <linux/uaccess.h> 42 #include <linux/io.h> 43 #include <linux/delay.h> 44 #include <linux/hardirq.h> 45 #include <linux/workqueue.h> 46 #include <linux/ratelimit.h> 47 #include <linux/moduleparam.h> 48 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC 49 #include <linux/dma-mapping.h> 50 #endif 51 52 #include <xen/xen.h> 53 #include <xen/interface/xen.h> 54 #include <xen/page.h> 55 #include <xen/grant_table.h> 56 #include <xen/interface/memory.h> 57 #include <xen/hvc-console.h> 58 #include <xen/swiotlb-xen.h> 59 #include <xen/balloon.h> 60 #ifdef CONFIG_X86 61 #include <asm/xen/cpuid.h> 62 #endif 63 #include <xen/mem-reservation.h> 64 #include <asm/xen/hypercall.h> 65 #include <asm/xen/interface.h> 66 67 #include <asm/sync_bitops.h> 68 69 /* External tools reserve first few grant table entries. */ 70 #define NR_RESERVED_ENTRIES 8 71 #define GNTTAB_LIST_END 0xffffffff 72 73 static grant_ref_t **gnttab_list; 74 static unsigned int nr_grant_frames; 75 static int gnttab_free_count; 76 static grant_ref_t gnttab_free_head; 77 static DEFINE_SPINLOCK(gnttab_list_lock); 78 struct grant_frames xen_auto_xlat_grant_frames; 79 static unsigned int xen_gnttab_version; 80 module_param_named(version, xen_gnttab_version, uint, 0); 81 82 static union { 83 struct grant_entry_v1 *v1; 84 union grant_entry_v2 *v2; 85 void *addr; 86 } gnttab_shared; 87 88 /*This is a structure of function pointers for grant table*/ 89 struct gnttab_ops { 90 /* 91 * Version of the grant interface. 92 */ 93 unsigned int version; 94 /* 95 * Grant refs per grant frame. 96 */ 97 unsigned int grefs_per_grant_frame; 98 /* 99 * Mapping a list of frames for storing grant entries. Frames parameter 100 * is used to store grant table address when grant table being setup, 101 * nr_gframes is the number of frames to map grant table. Returning 102 * GNTST_okay means success and negative value means failure. 103 */ 104 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes); 105 /* 106 * Release a list of frames which are mapped in map_frames for grant 107 * entry status. 108 */ 109 void (*unmap_frames)(void); 110 /* 111 * Introducing a valid entry into the grant table, granting the frame of 112 * this grant entry to domain for accessing or transfering. Ref 113 * parameter is reference of this introduced grant entry, domid is id of 114 * granted domain, frame is the page frame to be granted, and flags is 115 * status of the grant entry to be updated. 116 */ 117 void (*update_entry)(grant_ref_t ref, domid_t domid, 118 unsigned long frame, unsigned flags); 119 /* 120 * Stop granting a grant entry to domain for accessing. Ref parameter is 121 * reference of a grant entry whose grant access will be stopped, 122 * readonly is not in use in this function. If the grant entry is 123 * currently mapped for reading or writing, just return failure(==0) 124 * directly and don't tear down the grant access. Otherwise, stop grant 125 * access for this entry and return success(==1). 126 */ 127 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly); 128 /* 129 * Stop granting a grant entry to domain for transfer. Ref parameter is 130 * reference of a grant entry whose grant transfer will be stopped. If 131 * tranfer has not started, just reclaim the grant entry and return 132 * failure(==0). Otherwise, wait for the transfer to complete and then 133 * return the frame. 134 */ 135 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); 136 /* 137 * Query the status of a grant entry. Ref parameter is reference of 138 * queried grant entry, return value is the status of queried entry. 139 * Detailed status(writing/reading) can be gotten from the return value 140 * by bit operations. 141 */ 142 int (*query_foreign_access)(grant_ref_t ref); 143 }; 144 145 struct unmap_refs_callback_data { 146 struct completion completion; 147 int result; 148 }; 149 150 static const struct gnttab_ops *gnttab_interface; 151 152 /* This reflects status of grant entries, so act as a global value. */ 153 static grant_status_t *grstatus; 154 155 static struct gnttab_free_callback *gnttab_free_callback_list; 156 157 static int gnttab_expand(unsigned int req_entries); 158 159 #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) 160 #define SPP (PAGE_SIZE / sizeof(grant_status_t)) 161 162 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) 163 { 164 return &gnttab_list[(entry) / RPP][(entry) % RPP]; 165 } 166 /* This can be used as an l-value */ 167 #define gnttab_entry(entry) (*__gnttab_entry(entry)) 168 169 static int get_free_entries(unsigned count) 170 { 171 unsigned long flags; 172 int ref, rc = 0; 173 grant_ref_t head; 174 175 spin_lock_irqsave(&gnttab_list_lock, flags); 176 177 if ((gnttab_free_count < count) && 178 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { 179 spin_unlock_irqrestore(&gnttab_list_lock, flags); 180 return rc; 181 } 182 183 ref = head = gnttab_free_head; 184 gnttab_free_count -= count; 185 while (count-- > 1) 186 head = gnttab_entry(head); 187 gnttab_free_head = gnttab_entry(head); 188 gnttab_entry(head) = GNTTAB_LIST_END; 189 190 spin_unlock_irqrestore(&gnttab_list_lock, flags); 191 192 return ref; 193 } 194 195 static void do_free_callbacks(void) 196 { 197 struct gnttab_free_callback *callback, *next; 198 199 callback = gnttab_free_callback_list; 200 gnttab_free_callback_list = NULL; 201 202 while (callback != NULL) { 203 next = callback->next; 204 if (gnttab_free_count >= callback->count) { 205 callback->next = NULL; 206 callback->fn(callback->arg); 207 } else { 208 callback->next = gnttab_free_callback_list; 209 gnttab_free_callback_list = callback; 210 } 211 callback = next; 212 } 213 } 214 215 static inline void check_free_callbacks(void) 216 { 217 if (unlikely(gnttab_free_callback_list)) 218 do_free_callbacks(); 219 } 220 221 static void put_free_entry(grant_ref_t ref) 222 { 223 unsigned long flags; 224 spin_lock_irqsave(&gnttab_list_lock, flags); 225 gnttab_entry(ref) = gnttab_free_head; 226 gnttab_free_head = ref; 227 gnttab_free_count++; 228 check_free_callbacks(); 229 spin_unlock_irqrestore(&gnttab_list_lock, flags); 230 } 231 232 /* 233 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. 234 * Introducing a valid entry into the grant table: 235 * 1. Write ent->domid. 236 * 2. Write ent->frame: 237 * GTF_permit_access: Frame to which access is permitted. 238 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new 239 * frame, or zero if none. 240 * 3. Write memory barrier (WMB). 241 * 4. Write ent->flags, inc. valid type. 242 */ 243 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, 244 unsigned long frame, unsigned flags) 245 { 246 gnttab_shared.v1[ref].domid = domid; 247 gnttab_shared.v1[ref].frame = frame; 248 wmb(); 249 gnttab_shared.v1[ref].flags = flags; 250 } 251 252 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid, 253 unsigned long frame, unsigned int flags) 254 { 255 gnttab_shared.v2[ref].hdr.domid = domid; 256 gnttab_shared.v2[ref].full_page.frame = frame; 257 wmb(); /* Hypervisor concurrent accesses. */ 258 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags; 259 } 260 261 /* 262 * Public grant-issuing interface functions 263 */ 264 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, 265 unsigned long frame, int readonly) 266 { 267 gnttab_interface->update_entry(ref, domid, frame, 268 GTF_permit_access | (readonly ? GTF_readonly : 0)); 269 } 270 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); 271 272 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, 273 int readonly) 274 { 275 int ref; 276 277 ref = get_free_entries(1); 278 if (unlikely(ref < 0)) 279 return -ENOSPC; 280 281 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly); 282 283 return ref; 284 } 285 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 286 287 static int gnttab_query_foreign_access_v1(grant_ref_t ref) 288 { 289 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); 290 } 291 292 static int gnttab_query_foreign_access_v2(grant_ref_t ref) 293 { 294 return grstatus[ref] & (GTF_reading|GTF_writing); 295 } 296 297 int gnttab_query_foreign_access(grant_ref_t ref) 298 { 299 return gnttab_interface->query_foreign_access(ref); 300 } 301 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); 302 303 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) 304 { 305 u16 flags, nflags; 306 u16 *pflags; 307 308 pflags = &gnttab_shared.v1[ref].flags; 309 nflags = *pflags; 310 do { 311 flags = nflags; 312 if (flags & (GTF_reading|GTF_writing)) 313 return 0; 314 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); 315 316 return 1; 317 } 318 319 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) 320 { 321 gnttab_shared.v2[ref].hdr.flags = 0; 322 mb(); /* Concurrent access by hypervisor. */ 323 if (grstatus[ref] & (GTF_reading|GTF_writing)) { 324 return 0; 325 } else { 326 /* 327 * The read of grstatus needs to have acquire semantics. 328 * On x86, reads already have that, and we just need to 329 * protect against compiler reorderings. 330 * On other architectures we may need a full barrier. 331 */ 332 #ifdef CONFIG_X86 333 barrier(); 334 #else 335 mb(); 336 #endif 337 } 338 339 return 1; 340 } 341 342 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 343 { 344 return gnttab_interface->end_foreign_access_ref(ref, readonly); 345 } 346 347 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 348 { 349 if (_gnttab_end_foreign_access_ref(ref, readonly)) 350 return 1; 351 pr_warn("WARNING: g.e. %#x still in use!\n", ref); 352 return 0; 353 } 354 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); 355 356 struct deferred_entry { 357 struct list_head list; 358 grant_ref_t ref; 359 bool ro; 360 uint16_t warn_delay; 361 struct page *page; 362 }; 363 static LIST_HEAD(deferred_list); 364 static void gnttab_handle_deferred(struct timer_list *); 365 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred); 366 367 static void gnttab_handle_deferred(struct timer_list *unused) 368 { 369 unsigned int nr = 10; 370 struct deferred_entry *first = NULL; 371 unsigned long flags; 372 373 spin_lock_irqsave(&gnttab_list_lock, flags); 374 while (nr--) { 375 struct deferred_entry *entry 376 = list_first_entry(&deferred_list, 377 struct deferred_entry, list); 378 379 if (entry == first) 380 break; 381 list_del(&entry->list); 382 spin_unlock_irqrestore(&gnttab_list_lock, flags); 383 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { 384 put_free_entry(entry->ref); 385 if (entry->page) { 386 pr_debug("freeing g.e. %#x (pfn %#lx)\n", 387 entry->ref, page_to_pfn(entry->page)); 388 put_page(entry->page); 389 } else 390 pr_info("freeing g.e. %#x\n", entry->ref); 391 kfree(entry); 392 entry = NULL; 393 } else { 394 if (!--entry->warn_delay) 395 pr_info("g.e. %#x still pending\n", entry->ref); 396 if (!first) 397 first = entry; 398 } 399 spin_lock_irqsave(&gnttab_list_lock, flags); 400 if (entry) 401 list_add_tail(&entry->list, &deferred_list); 402 else if (list_empty(&deferred_list)) 403 break; 404 } 405 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { 406 deferred_timer.expires = jiffies + HZ; 407 add_timer(&deferred_timer); 408 } 409 spin_unlock_irqrestore(&gnttab_list_lock, flags); 410 } 411 412 static void gnttab_add_deferred(grant_ref_t ref, bool readonly, 413 struct page *page) 414 { 415 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 416 const char *what = KERN_WARNING "leaking"; 417 418 if (entry) { 419 unsigned long flags; 420 421 entry->ref = ref; 422 entry->ro = readonly; 423 entry->page = page; 424 entry->warn_delay = 60; 425 spin_lock_irqsave(&gnttab_list_lock, flags); 426 list_add_tail(&entry->list, &deferred_list); 427 if (!timer_pending(&deferred_timer)) { 428 deferred_timer.expires = jiffies + HZ; 429 add_timer(&deferred_timer); 430 } 431 spin_unlock_irqrestore(&gnttab_list_lock, flags); 432 what = KERN_DEBUG "deferring"; 433 } 434 printk("%s g.e. %#x (pfn %#lx)\n", 435 what, ref, page ? page_to_pfn(page) : -1); 436 } 437 438 void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 439 unsigned long page) 440 { 441 if (gnttab_end_foreign_access_ref(ref, readonly)) { 442 put_free_entry(ref); 443 if (page != 0) 444 put_page(virt_to_page(page)); 445 } else 446 gnttab_add_deferred(ref, readonly, 447 page ? virt_to_page(page) : NULL); 448 } 449 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 450 451 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) 452 { 453 int ref; 454 455 ref = get_free_entries(1); 456 if (unlikely(ref < 0)) 457 return -ENOSPC; 458 gnttab_grant_foreign_transfer_ref(ref, domid, pfn); 459 460 return ref; 461 } 462 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); 463 464 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, 465 unsigned long pfn) 466 { 467 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); 468 } 469 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); 470 471 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) 472 { 473 unsigned long frame; 474 u16 flags; 475 u16 *pflags; 476 477 pflags = &gnttab_shared.v1[ref].flags; 478 479 /* 480 * If a transfer is not even yet started, try to reclaim the grant 481 * reference and return failure (== 0). 482 */ 483 while (!((flags = *pflags) & GTF_transfer_committed)) { 484 if (sync_cmpxchg(pflags, flags, 0) == flags) 485 return 0; 486 cpu_relax(); 487 } 488 489 /* If a transfer is in progress then wait until it is completed. */ 490 while (!(flags & GTF_transfer_completed)) { 491 flags = *pflags; 492 cpu_relax(); 493 } 494 495 rmb(); /* Read the frame number /after/ reading completion status. */ 496 frame = gnttab_shared.v1[ref].frame; 497 BUG_ON(frame == 0); 498 499 return frame; 500 } 501 502 static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref) 503 { 504 unsigned long frame; 505 u16 flags; 506 u16 *pflags; 507 508 pflags = &gnttab_shared.v2[ref].hdr.flags; 509 510 /* 511 * If a transfer is not even yet started, try to reclaim the grant 512 * reference and return failure (== 0). 513 */ 514 while (!((flags = *pflags) & GTF_transfer_committed)) { 515 if (sync_cmpxchg(pflags, flags, 0) == flags) 516 return 0; 517 cpu_relax(); 518 } 519 520 /* If a transfer is in progress then wait until it is completed. */ 521 while (!(flags & GTF_transfer_completed)) { 522 flags = *pflags; 523 cpu_relax(); 524 } 525 526 rmb(); /* Read the frame number /after/ reading completion status. */ 527 frame = gnttab_shared.v2[ref].full_page.frame; 528 BUG_ON(frame == 0); 529 530 return frame; 531 } 532 533 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) 534 { 535 return gnttab_interface->end_foreign_transfer_ref(ref); 536 } 537 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); 538 539 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) 540 { 541 unsigned long frame = gnttab_end_foreign_transfer_ref(ref); 542 put_free_entry(ref); 543 return frame; 544 } 545 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); 546 547 void gnttab_free_grant_reference(grant_ref_t ref) 548 { 549 put_free_entry(ref); 550 } 551 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); 552 553 void gnttab_free_grant_references(grant_ref_t head) 554 { 555 grant_ref_t ref; 556 unsigned long flags; 557 int count = 1; 558 if (head == GNTTAB_LIST_END) 559 return; 560 spin_lock_irqsave(&gnttab_list_lock, flags); 561 ref = head; 562 while (gnttab_entry(ref) != GNTTAB_LIST_END) { 563 ref = gnttab_entry(ref); 564 count++; 565 } 566 gnttab_entry(ref) = gnttab_free_head; 567 gnttab_free_head = head; 568 gnttab_free_count += count; 569 check_free_callbacks(); 570 spin_unlock_irqrestore(&gnttab_list_lock, flags); 571 } 572 EXPORT_SYMBOL_GPL(gnttab_free_grant_references); 573 574 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) 575 { 576 int h = get_free_entries(count); 577 578 if (h < 0) 579 return -ENOSPC; 580 581 *head = h; 582 583 return 0; 584 } 585 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); 586 587 int gnttab_empty_grant_references(const grant_ref_t *private_head) 588 { 589 return (*private_head == GNTTAB_LIST_END); 590 } 591 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); 592 593 int gnttab_claim_grant_reference(grant_ref_t *private_head) 594 { 595 grant_ref_t g = *private_head; 596 if (unlikely(g == GNTTAB_LIST_END)) 597 return -ENOSPC; 598 *private_head = gnttab_entry(g); 599 return g; 600 } 601 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); 602 603 void gnttab_release_grant_reference(grant_ref_t *private_head, 604 grant_ref_t release) 605 { 606 gnttab_entry(release) = *private_head; 607 *private_head = release; 608 } 609 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); 610 611 void gnttab_request_free_callback(struct gnttab_free_callback *callback, 612 void (*fn)(void *), void *arg, u16 count) 613 { 614 unsigned long flags; 615 struct gnttab_free_callback *cb; 616 617 spin_lock_irqsave(&gnttab_list_lock, flags); 618 619 /* Check if the callback is already on the list */ 620 cb = gnttab_free_callback_list; 621 while (cb) { 622 if (cb == callback) 623 goto out; 624 cb = cb->next; 625 } 626 627 callback->fn = fn; 628 callback->arg = arg; 629 callback->count = count; 630 callback->next = gnttab_free_callback_list; 631 gnttab_free_callback_list = callback; 632 check_free_callbacks(); 633 out: 634 spin_unlock_irqrestore(&gnttab_list_lock, flags); 635 } 636 EXPORT_SYMBOL_GPL(gnttab_request_free_callback); 637 638 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) 639 { 640 struct gnttab_free_callback **pcb; 641 unsigned long flags; 642 643 spin_lock_irqsave(&gnttab_list_lock, flags); 644 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { 645 if (*pcb == callback) { 646 *pcb = callback->next; 647 break; 648 } 649 } 650 spin_unlock_irqrestore(&gnttab_list_lock, flags); 651 } 652 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); 653 654 static unsigned int gnttab_frames(unsigned int frames, unsigned int align) 655 { 656 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) / 657 align; 658 } 659 660 static int grow_gnttab_list(unsigned int more_frames) 661 { 662 unsigned int new_nr_grant_frames, extra_entries, i; 663 unsigned int nr_glist_frames, new_nr_glist_frames; 664 unsigned int grefs_per_frame; 665 666 grefs_per_frame = gnttab_interface->grefs_per_grant_frame; 667 668 new_nr_grant_frames = nr_grant_frames + more_frames; 669 extra_entries = more_frames * grefs_per_frame; 670 671 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP); 672 new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP); 673 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { 674 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 675 if (!gnttab_list[i]) 676 goto grow_nomem; 677 } 678 679 680 for (i = grefs_per_frame * nr_grant_frames; 681 i < grefs_per_frame * new_nr_grant_frames - 1; i++) 682 gnttab_entry(i) = i + 1; 683 684 gnttab_entry(i) = gnttab_free_head; 685 gnttab_free_head = grefs_per_frame * nr_grant_frames; 686 gnttab_free_count += extra_entries; 687 688 nr_grant_frames = new_nr_grant_frames; 689 690 check_free_callbacks(); 691 692 return 0; 693 694 grow_nomem: 695 while (i-- > nr_glist_frames) 696 free_page((unsigned long) gnttab_list[i]); 697 return -ENOMEM; 698 } 699 700 static unsigned int __max_nr_grant_frames(void) 701 { 702 struct gnttab_query_size query; 703 int rc; 704 705 query.dom = DOMID_SELF; 706 707 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); 708 if ((rc < 0) || (query.status != GNTST_okay)) 709 return 4; /* Legacy max supported number of frames */ 710 711 return query.max_nr_frames; 712 } 713 714 unsigned int gnttab_max_grant_frames(void) 715 { 716 unsigned int xen_max = __max_nr_grant_frames(); 717 static unsigned int boot_max_nr_grant_frames; 718 719 /* First time, initialize it properly. */ 720 if (!boot_max_nr_grant_frames) 721 boot_max_nr_grant_frames = __max_nr_grant_frames(); 722 723 if (xen_max > boot_max_nr_grant_frames) 724 return boot_max_nr_grant_frames; 725 return xen_max; 726 } 727 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); 728 729 int gnttab_setup_auto_xlat_frames(phys_addr_t addr) 730 { 731 xen_pfn_t *pfn; 732 unsigned int max_nr_gframes = __max_nr_grant_frames(); 733 unsigned int i; 734 void *vaddr; 735 736 if (xen_auto_xlat_grant_frames.count) 737 return -EINVAL; 738 739 vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes); 740 if (vaddr == NULL) { 741 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n", 742 &addr); 743 return -ENOMEM; 744 } 745 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); 746 if (!pfn) { 747 xen_unmap(vaddr); 748 return -ENOMEM; 749 } 750 for (i = 0; i < max_nr_gframes; i++) 751 pfn[i] = XEN_PFN_DOWN(addr) + i; 752 753 xen_auto_xlat_grant_frames.vaddr = vaddr; 754 xen_auto_xlat_grant_frames.pfn = pfn; 755 xen_auto_xlat_grant_frames.count = max_nr_gframes; 756 757 return 0; 758 } 759 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames); 760 761 void gnttab_free_auto_xlat_frames(void) 762 { 763 if (!xen_auto_xlat_grant_frames.count) 764 return; 765 kfree(xen_auto_xlat_grant_frames.pfn); 766 xen_unmap(xen_auto_xlat_grant_frames.vaddr); 767 768 xen_auto_xlat_grant_frames.pfn = NULL; 769 xen_auto_xlat_grant_frames.count = 0; 770 xen_auto_xlat_grant_frames.vaddr = NULL; 771 } 772 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); 773 774 int gnttab_pages_set_private(int nr_pages, struct page **pages) 775 { 776 int i; 777 778 for (i = 0; i < nr_pages; i++) { 779 #if BITS_PER_LONG < 64 780 struct xen_page_foreign *foreign; 781 782 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL); 783 if (!foreign) 784 return -ENOMEM; 785 786 set_page_private(pages[i], (unsigned long)foreign); 787 #endif 788 SetPagePrivate(pages[i]); 789 } 790 791 return 0; 792 } 793 EXPORT_SYMBOL_GPL(gnttab_pages_set_private); 794 795 /** 796 * gnttab_alloc_pages - alloc pages suitable for grant mapping into 797 * @nr_pages: number of pages to alloc 798 * @pages: returns the pages 799 */ 800 int gnttab_alloc_pages(int nr_pages, struct page **pages) 801 { 802 int ret; 803 804 ret = alloc_xenballooned_pages(nr_pages, pages); 805 if (ret < 0) 806 return ret; 807 808 ret = gnttab_pages_set_private(nr_pages, pages); 809 if (ret < 0) 810 gnttab_free_pages(nr_pages, pages); 811 812 return ret; 813 } 814 EXPORT_SYMBOL_GPL(gnttab_alloc_pages); 815 816 void gnttab_pages_clear_private(int nr_pages, struct page **pages) 817 { 818 int i; 819 820 for (i = 0; i < nr_pages; i++) { 821 if (PagePrivate(pages[i])) { 822 #if BITS_PER_LONG < 64 823 kfree((void *)page_private(pages[i])); 824 #endif 825 ClearPagePrivate(pages[i]); 826 } 827 } 828 } 829 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private); 830 831 /** 832 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() 833 * @nr_pages; number of pages to free 834 * @pages: the pages 835 */ 836 void gnttab_free_pages(int nr_pages, struct page **pages) 837 { 838 gnttab_pages_clear_private(nr_pages, pages); 839 free_xenballooned_pages(nr_pages, pages); 840 } 841 EXPORT_SYMBOL_GPL(gnttab_free_pages); 842 843 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC 844 /** 845 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into 846 * @args: arguments to the function 847 */ 848 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args) 849 { 850 unsigned long pfn, start_pfn; 851 size_t size; 852 int i, ret; 853 854 size = args->nr_pages << PAGE_SHIFT; 855 if (args->coherent) 856 args->vaddr = dma_alloc_coherent(args->dev, size, 857 &args->dev_bus_addr, 858 GFP_KERNEL | __GFP_NOWARN); 859 else 860 args->vaddr = dma_alloc_wc(args->dev, size, 861 &args->dev_bus_addr, 862 GFP_KERNEL | __GFP_NOWARN); 863 if (!args->vaddr) { 864 pr_debug("Failed to allocate DMA buffer of size %zu\n", size); 865 return -ENOMEM; 866 } 867 868 start_pfn = __phys_to_pfn(args->dev_bus_addr); 869 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages; 870 pfn++, i++) { 871 struct page *page = pfn_to_page(pfn); 872 873 args->pages[i] = page; 874 args->frames[i] = xen_page_to_gfn(page); 875 xenmem_reservation_scrub_page(page); 876 } 877 878 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages); 879 880 ret = xenmem_reservation_decrease(args->nr_pages, args->frames); 881 if (ret != args->nr_pages) { 882 pr_debug("Failed to decrease reservation for DMA buffer\n"); 883 ret = -EFAULT; 884 goto fail; 885 } 886 887 ret = gnttab_pages_set_private(args->nr_pages, args->pages); 888 if (ret < 0) 889 goto fail; 890 891 return 0; 892 893 fail: 894 gnttab_dma_free_pages(args); 895 return ret; 896 } 897 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages); 898 899 /** 900 * gnttab_dma_free_pages - free DMAable pages 901 * @args: arguments to the function 902 */ 903 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args) 904 { 905 size_t size; 906 int i, ret; 907 908 gnttab_pages_clear_private(args->nr_pages, args->pages); 909 910 for (i = 0; i < args->nr_pages; i++) 911 args->frames[i] = page_to_xen_pfn(args->pages[i]); 912 913 ret = xenmem_reservation_increase(args->nr_pages, args->frames); 914 if (ret != args->nr_pages) { 915 pr_debug("Failed to increase reservation for DMA buffer\n"); 916 ret = -EFAULT; 917 } else { 918 ret = 0; 919 } 920 921 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages, 922 args->frames); 923 924 size = args->nr_pages << PAGE_SHIFT; 925 if (args->coherent) 926 dma_free_coherent(args->dev, size, 927 args->vaddr, args->dev_bus_addr); 928 else 929 dma_free_wc(args->dev, size, 930 args->vaddr, args->dev_bus_addr); 931 return ret; 932 } 933 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages); 934 #endif 935 936 /* Handling of paged out grant targets (GNTST_eagain) */ 937 #define MAX_DELAY 256 938 static inline void 939 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status, 940 const char *func) 941 { 942 unsigned delay = 1; 943 944 do { 945 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1)); 946 if (*status == GNTST_eagain) 947 msleep(delay++); 948 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY)); 949 950 if (delay >= MAX_DELAY) { 951 pr_err("%s: %s eagain grant\n", func, current->comm); 952 *status = GNTST_bad_page; 953 } 954 } 955 956 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count) 957 { 958 struct gnttab_map_grant_ref *op; 959 960 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count)) 961 BUG(); 962 for (op = batch; op < batch + count; op++) 963 if (op->status == GNTST_eagain) 964 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op, 965 &op->status, __func__); 966 } 967 EXPORT_SYMBOL_GPL(gnttab_batch_map); 968 969 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) 970 { 971 struct gnttab_copy *op; 972 973 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) 974 BUG(); 975 for (op = batch; op < batch + count; op++) 976 if (op->status == GNTST_eagain) 977 gnttab_retry_eagain_gop(GNTTABOP_copy, op, 978 &op->status, __func__); 979 } 980 EXPORT_SYMBOL_GPL(gnttab_batch_copy); 981 982 void gnttab_foreach_grant_in_range(struct page *page, 983 unsigned int offset, 984 unsigned int len, 985 xen_grant_fn_t fn, 986 void *data) 987 { 988 unsigned int goffset; 989 unsigned int glen; 990 unsigned long xen_pfn; 991 992 len = min_t(unsigned int, PAGE_SIZE - offset, len); 993 goffset = xen_offset_in_page(offset); 994 995 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset); 996 997 while (len) { 998 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len); 999 fn(pfn_to_gfn(xen_pfn), goffset, glen, data); 1000 1001 goffset = 0; 1002 xen_pfn++; 1003 len -= glen; 1004 } 1005 } 1006 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range); 1007 1008 void gnttab_foreach_grant(struct page **pages, 1009 unsigned int nr_grefs, 1010 xen_grant_fn_t fn, 1011 void *data) 1012 { 1013 unsigned int goffset = 0; 1014 unsigned long xen_pfn = 0; 1015 unsigned int i; 1016 1017 for (i = 0; i < nr_grefs; i++) { 1018 if ((i % XEN_PFN_PER_PAGE) == 0) { 1019 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]); 1020 goffset = 0; 1021 } 1022 1023 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data); 1024 1025 goffset += XEN_PAGE_SIZE; 1026 xen_pfn++; 1027 } 1028 } 1029 1030 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 1031 struct gnttab_map_grant_ref *kmap_ops, 1032 struct page **pages, unsigned int count) 1033 { 1034 int i, ret; 1035 1036 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 1037 if (ret) 1038 return ret; 1039 1040 for (i = 0; i < count; i++) { 1041 switch (map_ops[i].status) { 1042 case GNTST_okay: 1043 { 1044 struct xen_page_foreign *foreign; 1045 1046 SetPageForeign(pages[i]); 1047 foreign = xen_page_foreign(pages[i]); 1048 foreign->domid = map_ops[i].dom; 1049 foreign->gref = map_ops[i].ref; 1050 break; 1051 } 1052 1053 case GNTST_no_device_space: 1054 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n"); 1055 break; 1056 1057 case GNTST_eagain: 1058 /* Retry eagain maps */ 1059 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, 1060 map_ops + i, 1061 &map_ops[i].status, __func__); 1062 /* Test status in next loop iteration. */ 1063 i--; 1064 break; 1065 1066 default: 1067 break; 1068 } 1069 } 1070 1071 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); 1072 } 1073 EXPORT_SYMBOL_GPL(gnttab_map_refs); 1074 1075 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 1076 struct gnttab_unmap_grant_ref *kunmap_ops, 1077 struct page **pages, unsigned int count) 1078 { 1079 unsigned int i; 1080 int ret; 1081 1082 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 1083 if (ret) 1084 return ret; 1085 1086 for (i = 0; i < count; i++) 1087 ClearPageForeign(pages[i]); 1088 1089 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count); 1090 } 1091 EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 1092 1093 #define GNTTAB_UNMAP_REFS_DELAY 5 1094 1095 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); 1096 1097 static void gnttab_unmap_work(struct work_struct *work) 1098 { 1099 struct gntab_unmap_queue_data 1100 *unmap_data = container_of(work, 1101 struct gntab_unmap_queue_data, 1102 gnttab_work.work); 1103 if (unmap_data->age != UINT_MAX) 1104 unmap_data->age++; 1105 __gnttab_unmap_refs_async(unmap_data); 1106 } 1107 1108 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) 1109 { 1110 int ret; 1111 int pc; 1112 1113 for (pc = 0; pc < item->count; pc++) { 1114 if (page_count(item->pages[pc]) > 1) { 1115 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1); 1116 schedule_delayed_work(&item->gnttab_work, 1117 msecs_to_jiffies(delay)); 1118 return; 1119 } 1120 } 1121 1122 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops, 1123 item->pages, item->count); 1124 item->done(ret, item); 1125 } 1126 1127 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) 1128 { 1129 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work); 1130 item->age = 0; 1131 1132 __gnttab_unmap_refs_async(item); 1133 } 1134 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); 1135 1136 static void unmap_refs_callback(int result, 1137 struct gntab_unmap_queue_data *data) 1138 { 1139 struct unmap_refs_callback_data *d = data->data; 1140 1141 d->result = result; 1142 complete(&d->completion); 1143 } 1144 1145 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) 1146 { 1147 struct unmap_refs_callback_data data; 1148 1149 init_completion(&data.completion); 1150 item->data = &data; 1151 item->done = &unmap_refs_callback; 1152 gnttab_unmap_refs_async(item); 1153 wait_for_completion(&data.completion); 1154 1155 return data.result; 1156 } 1157 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); 1158 1159 static unsigned int nr_status_frames(unsigned int nr_grant_frames) 1160 { 1161 return gnttab_frames(nr_grant_frames, SPP); 1162 } 1163 1164 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 1165 { 1166 int rc; 1167 1168 rc = arch_gnttab_map_shared(frames, nr_gframes, 1169 gnttab_max_grant_frames(), 1170 &gnttab_shared.addr); 1171 BUG_ON(rc); 1172 1173 return 0; 1174 } 1175 1176 static void gnttab_unmap_frames_v1(void) 1177 { 1178 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 1179 } 1180 1181 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes) 1182 { 1183 uint64_t *sframes; 1184 unsigned int nr_sframes; 1185 struct gnttab_get_status_frames getframes; 1186 int rc; 1187 1188 nr_sframes = nr_status_frames(nr_gframes); 1189 1190 /* No need for kzalloc as it is initialized in following hypercall 1191 * GNTTABOP_get_status_frames. 1192 */ 1193 sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC); 1194 if (!sframes) 1195 return -ENOMEM; 1196 1197 getframes.dom = DOMID_SELF; 1198 getframes.nr_frames = nr_sframes; 1199 set_xen_guest_handle(getframes.frame_list, sframes); 1200 1201 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames, 1202 &getframes, 1); 1203 if (rc == -ENOSYS) { 1204 kfree(sframes); 1205 return -ENOSYS; 1206 } 1207 1208 BUG_ON(rc || getframes.status); 1209 1210 rc = arch_gnttab_map_status(sframes, nr_sframes, 1211 nr_status_frames(gnttab_max_grant_frames()), 1212 &grstatus); 1213 BUG_ON(rc); 1214 kfree(sframes); 1215 1216 rc = arch_gnttab_map_shared(frames, nr_gframes, 1217 gnttab_max_grant_frames(), 1218 &gnttab_shared.addr); 1219 BUG_ON(rc); 1220 1221 return 0; 1222 } 1223 1224 static void gnttab_unmap_frames_v2(void) 1225 { 1226 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 1227 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames)); 1228 } 1229 1230 static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 1231 { 1232 struct gnttab_setup_table setup; 1233 xen_pfn_t *frames; 1234 unsigned int nr_gframes = end_idx + 1; 1235 int rc; 1236 1237 if (xen_feature(XENFEAT_auto_translated_physmap)) { 1238 struct xen_add_to_physmap xatp; 1239 unsigned int i = end_idx; 1240 rc = 0; 1241 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes); 1242 /* 1243 * Loop backwards, so that the first hypercall has the largest 1244 * index, ensuring that the table will grow only once. 1245 */ 1246 do { 1247 xatp.domid = DOMID_SELF; 1248 xatp.idx = i; 1249 xatp.space = XENMAPSPACE_grant_table; 1250 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i]; 1251 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); 1252 if (rc != 0) { 1253 pr_warn("grant table add_to_physmap failed, err=%d\n", 1254 rc); 1255 break; 1256 } 1257 } while (i-- > start_idx); 1258 1259 return rc; 1260 } 1261 1262 /* No need for kzalloc as it is initialized in following hypercall 1263 * GNTTABOP_setup_table. 1264 */ 1265 frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC); 1266 if (!frames) 1267 return -ENOMEM; 1268 1269 setup.dom = DOMID_SELF; 1270 setup.nr_frames = nr_gframes; 1271 set_xen_guest_handle(setup.frame_list, frames); 1272 1273 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); 1274 if (rc == -ENOSYS) { 1275 kfree(frames); 1276 return -ENOSYS; 1277 } 1278 1279 BUG_ON(rc || setup.status); 1280 1281 rc = gnttab_interface->map_frames(frames, nr_gframes); 1282 1283 kfree(frames); 1284 1285 return rc; 1286 } 1287 1288 static const struct gnttab_ops gnttab_v1_ops = { 1289 .version = 1, 1290 .grefs_per_grant_frame = XEN_PAGE_SIZE / 1291 sizeof(struct grant_entry_v1), 1292 .map_frames = gnttab_map_frames_v1, 1293 .unmap_frames = gnttab_unmap_frames_v1, 1294 .update_entry = gnttab_update_entry_v1, 1295 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, 1296 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, 1297 .query_foreign_access = gnttab_query_foreign_access_v1, 1298 }; 1299 1300 static const struct gnttab_ops gnttab_v2_ops = { 1301 .version = 2, 1302 .grefs_per_grant_frame = XEN_PAGE_SIZE / 1303 sizeof(union grant_entry_v2), 1304 .map_frames = gnttab_map_frames_v2, 1305 .unmap_frames = gnttab_unmap_frames_v2, 1306 .update_entry = gnttab_update_entry_v2, 1307 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, 1308 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, 1309 .query_foreign_access = gnttab_query_foreign_access_v2, 1310 }; 1311 1312 static bool gnttab_need_v2(void) 1313 { 1314 #ifdef CONFIG_X86 1315 uint32_t base, width; 1316 1317 if (xen_pv_domain()) { 1318 base = xen_cpuid_base(); 1319 if (cpuid_eax(base) < 5) 1320 return false; /* Information not available, use V1. */ 1321 width = cpuid_ebx(base + 5) & 1322 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK; 1323 return width > 32 + PAGE_SHIFT; 1324 } 1325 #endif 1326 return !!(max_possible_pfn >> 32); 1327 } 1328 1329 static void gnttab_request_version(void) 1330 { 1331 long rc; 1332 struct gnttab_set_version gsv; 1333 1334 if (gnttab_need_v2()) 1335 gsv.version = 2; 1336 else 1337 gsv.version = 1; 1338 1339 /* Boot parameter overrides automatic selection. */ 1340 if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2) 1341 gsv.version = xen_gnttab_version; 1342 1343 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); 1344 if (rc == 0 && gsv.version == 2) 1345 gnttab_interface = &gnttab_v2_ops; 1346 else 1347 gnttab_interface = &gnttab_v1_ops; 1348 pr_info("Grant tables using version %d layout\n", 1349 gnttab_interface->version); 1350 } 1351 1352 static int gnttab_setup(void) 1353 { 1354 unsigned int max_nr_gframes; 1355 1356 max_nr_gframes = gnttab_max_grant_frames(); 1357 if (max_nr_gframes < nr_grant_frames) 1358 return -ENOSYS; 1359 1360 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) { 1361 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr; 1362 if (gnttab_shared.addr == NULL) { 1363 pr_warn("gnttab share frames is not mapped!\n"); 1364 return -ENOMEM; 1365 } 1366 } 1367 return gnttab_map(0, nr_grant_frames - 1); 1368 } 1369 1370 int gnttab_resume(void) 1371 { 1372 gnttab_request_version(); 1373 return gnttab_setup(); 1374 } 1375 1376 int gnttab_suspend(void) 1377 { 1378 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1379 gnttab_interface->unmap_frames(); 1380 return 0; 1381 } 1382 1383 static int gnttab_expand(unsigned int req_entries) 1384 { 1385 int rc; 1386 unsigned int cur, extra; 1387 1388 cur = nr_grant_frames; 1389 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) / 1390 gnttab_interface->grefs_per_grant_frame); 1391 if (cur + extra > gnttab_max_grant_frames()) { 1392 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached" 1393 " cur=%u extra=%u limit=%u" 1394 " gnttab_free_count=%u req_entries=%u\n", 1395 cur, extra, gnttab_max_grant_frames(), 1396 gnttab_free_count, req_entries); 1397 return -ENOSPC; 1398 } 1399 1400 rc = gnttab_map(cur, cur + extra - 1); 1401 if (rc == 0) 1402 rc = grow_gnttab_list(extra); 1403 1404 return rc; 1405 } 1406 1407 int gnttab_init(void) 1408 { 1409 int i; 1410 unsigned long max_nr_grant_frames; 1411 unsigned int max_nr_glist_frames, nr_glist_frames; 1412 unsigned int nr_init_grefs; 1413 int ret; 1414 1415 gnttab_request_version(); 1416 max_nr_grant_frames = gnttab_max_grant_frames(); 1417 nr_grant_frames = 1; 1418 1419 /* Determine the maximum number of frames required for the 1420 * grant reference free list on the current hypervisor. 1421 */ 1422 max_nr_glist_frames = (max_nr_grant_frames * 1423 gnttab_interface->grefs_per_grant_frame / RPP); 1424 1425 gnttab_list = kmalloc_array(max_nr_glist_frames, 1426 sizeof(grant_ref_t *), 1427 GFP_KERNEL); 1428 if (gnttab_list == NULL) 1429 return -ENOMEM; 1430 1431 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP); 1432 for (i = 0; i < nr_glist_frames; i++) { 1433 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1434 if (gnttab_list[i] == NULL) { 1435 ret = -ENOMEM; 1436 goto ini_nomem; 1437 } 1438 } 1439 1440 ret = arch_gnttab_init(max_nr_grant_frames, 1441 nr_status_frames(max_nr_grant_frames)); 1442 if (ret < 0) 1443 goto ini_nomem; 1444 1445 if (gnttab_setup() < 0) { 1446 ret = -ENODEV; 1447 goto ini_nomem; 1448 } 1449 1450 nr_init_grefs = nr_grant_frames * 1451 gnttab_interface->grefs_per_grant_frame; 1452 1453 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1454 gnttab_entry(i) = i + 1; 1455 1456 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; 1457 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; 1458 gnttab_free_head = NR_RESERVED_ENTRIES; 1459 1460 printk("Grant table initialized\n"); 1461 return 0; 1462 1463 ini_nomem: 1464 for (i--; i >= 0; i--) 1465 free_page((unsigned long)gnttab_list[i]); 1466 kfree(gnttab_list); 1467 return ret; 1468 } 1469 EXPORT_SYMBOL_GPL(gnttab_init); 1470 1471 static int __gnttab_init(void) 1472 { 1473 if (!xen_domain()) 1474 return -ENODEV; 1475 1476 /* Delay grant-table initialization in the PV on HVM case */ 1477 if (xen_hvm_domain() && !xen_pvh_domain()) 1478 return 0; 1479 1480 return gnttab_init(); 1481 } 1482 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called 1483 * beforehand to initialize xen_auto_xlat_grant_frames. */ 1484 core_initcall_sync(__gnttab_init); 1485