mmu.c (e0c1b8f9eba88173b30ba42eb492fd20582cf376) | mmu.c (92abe0f81e1385afd8f1dc66206b5be9a514899b) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 4 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 5 */ 6 7#include <linux/mman.h> 8#include <linux/kvm_host.h> --- 443 unchanged lines hidden (view full) --- 452 prot); 453 if (err) 454 return err; 455 } 456 457 return 0; 458} 459 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 4 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 5 */ 6 7#include <linux/mman.h> 8#include <linux/kvm_host.h> --- 443 unchanged lines hidden (view full) --- 452 prot); 453 if (err) 454 return err; 455 } 456 457 return 0; 458} 459 |
460static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, 461 unsigned long *haddr, 462 enum kvm_pgtable_prot prot) | 460 461/** 462 * hyp_alloc_private_va_range - Allocates a private VA range. 463 * @size: The size of the VA range to reserve. 464 * @haddr: The hypervisor virtual start address of the allocation. 465 * 466 * The private virtual address (VA) range is allocated below io_map_base 467 * and aligned based on the order of @size. 468 * 469 * Return: 0 on success or negative error code on failure. 470 */ 471int hyp_alloc_private_va_range(size_t size, unsigned long *haddr) |
463{ 464 unsigned long base; 465 int ret = 0; 466 | 472{ 473 unsigned long base; 474 int ret = 0; 475 |
467 if (!kvm_host_owns_hyp_mappings()) { 468 base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, 469 phys_addr, size, prot); 470 if (IS_ERR_OR_NULL((void *)base)) 471 return PTR_ERR((void *)base); 472 *haddr = base; 473 474 return 0; 475 } 476 | |
477 mutex_lock(&kvm_hyp_pgd_mutex); 478 479 /* 480 * This assumes that we have enough space below the idmap 481 * page to allocate our VAs. If not, the check below will 482 * kick. A potential alternative would be to detect that 483 * overflow and switch to an allocation above the idmap. 484 * 485 * The allocated size is always a multiple of PAGE_SIZE. 486 */ | 476 mutex_lock(&kvm_hyp_pgd_mutex); 477 478 /* 479 * This assumes that we have enough space below the idmap 480 * page to allocate our VAs. If not, the check below will 481 * kick. A potential alternative would be to detect that 482 * overflow and switch to an allocation above the idmap. 483 * 484 * The allocated size is always a multiple of PAGE_SIZE. 485 */ |
487 size = PAGE_ALIGN(size + offset_in_page(phys_addr)); 488 base = io_map_base - size; | 486 base = io_map_base - PAGE_ALIGN(size); |
489 | 487 |
488 /* Align the allocation based on the order of its size */ 489 base = ALIGN_DOWN(base, PAGE_SIZE << get_order(size)); 490 |
|
490 /* 491 * Verify that BIT(VA_BITS - 1) hasn't been flipped by 492 * allocating the new area, as it would indicate we've 493 * overflowed the idmap/IO address range. 494 */ 495 if ((base ^ io_map_base) & BIT(VA_BITS - 1)) 496 ret = -ENOMEM; 497 else | 491 /* 492 * Verify that BIT(VA_BITS - 1) hasn't been flipped by 493 * allocating the new area, as it would indicate we've 494 * overflowed the idmap/IO address range. 495 */ 496 if ((base ^ io_map_base) & BIT(VA_BITS - 1)) 497 ret = -ENOMEM; 498 else |
498 io_map_base = base; | 499 *haddr = io_map_base = base; |
499 500 mutex_unlock(&kvm_hyp_pgd_mutex); 501 | 500 501 mutex_unlock(&kvm_hyp_pgd_mutex); 502 |
503 return ret; 504} 505 506static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, 507 unsigned long *haddr, 508 enum kvm_pgtable_prot prot) 509{ 510 unsigned long addr; 511 int ret = 0; 512 513 if (!kvm_host_owns_hyp_mappings()) { 514 addr = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, 515 phys_addr, size, prot); 516 if (IS_ERR_VALUE(addr)) 517 return addr; 518 *haddr = addr; 519 520 return 0; 521 } 522 523 size = PAGE_ALIGN(size + offset_in_page(phys_addr)); 524 ret = hyp_alloc_private_va_range(size, &addr); |
|
502 if (ret) | 525 if (ret) |
503 goto out; | 526 return ret; |
504 | 527 |
505 ret = __create_hyp_mappings(base, size, phys_addr, prot); | 528 ret = __create_hyp_mappings(addr, size, phys_addr, prot); |
506 if (ret) | 529 if (ret) |
507 goto out; | 530 return ret; |
508 | 531 |
509 *haddr = base + offset_in_page(phys_addr); 510out: | 532 *haddr = addr + offset_in_page(phys_addr); |
511 return ret; 512} 513 514/** 515 * create_hyp_io_mappings - Map IO into both kernel and HYP 516 * @phys_addr: The physical start address which gets mapped 517 * @size: Size of the region being mapped 518 * @kaddr: Kernel VA for this mapping --- 1263 unchanged lines hidden --- | 533 return ret; 534} 535 536/** 537 * create_hyp_io_mappings - Map IO into both kernel and HYP 538 * @phys_addr: The physical start address which gets mapped 539 * @size: Size of the region being mapped 540 * @kaddr: Kernel VA for this mapping --- 1263 unchanged lines hidden --- |