1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include "amdgpu_vm.h" 24 #include "amdgpu_object.h" 25 #include "amdgpu_trace.h" 26 27 /** 28 * amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped 29 * 30 * @table: newly allocated or validated PD/PT 31 */ 32 static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) 33 { 34 table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 35 return amdgpu_bo_kmap(&table->bo, NULL); 36 } 37 38 /** 39 * amdgpu_vm_cpu_prepare - prepare page table update with the CPU 40 * 41 * @p: see amdgpu_vm_update_params definition 42 * @resv: reservation object with embedded fence 43 * @sync_mode: synchronization mode 44 * 45 * Returns: 46 * Negativ errno, 0 for success. 47 */ 48 static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, 49 struct dma_resv *resv, 50 enum amdgpu_sync_mode sync_mode) 51 { 52 if (!resv) 53 return 0; 54 55 return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true); 56 } 57 58 /** 59 * amdgpu_vm_cpu_update - helper to update page tables via CPU 60 * 61 * @p: see amdgpu_vm_update_params definition 62 * @vmbo: PD/PT to update 63 * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB 64 * @addr: dst addr to write into pe 65 * @count: number of page entries to update 66 * @incr: increase next addr by incr bytes 67 * @flags: hw access flags 68 * 69 * Write count number of PT/PD entries directly. 70 */ 71 static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, 72 struct amdgpu_bo_vm *vmbo, uint64_t pe, 73 uint64_t addr, unsigned count, uint32_t incr, 74 uint64_t flags) 75 { 76 unsigned int i; 77 uint64_t value; 78 long r; 79 80 r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL, 81 true, MAX_SCHEDULE_TIMEOUT); 82 if (r < 0) 83 return r; 84 85 pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo); 86 87 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); 88 89 for (i = 0; i < count; i++) { 90 value = p->pages_addr ? 91 amdgpu_vm_map_gart(p->pages_addr, addr) : 92 addr; 93 amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe, 94 i, value, flags); 95 addr += incr; 96 } 97 return 0; 98 } 99 100 /** 101 * amdgpu_vm_cpu_commit - commit page table update to the HW 102 * 103 * @p: see amdgpu_vm_update_params definition 104 * @fence: unused 105 * 106 * Make sure that the hardware sees the page table updates. 107 */ 108 static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p, 109 struct dma_fence **fence) 110 { 111 /* Flush HDP */ 112 mb(); 113 amdgpu_device_flush_hdp(p->adev, NULL); 114 return 0; 115 } 116 117 const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = { 118 .map_table = amdgpu_vm_cpu_map_table, 119 .prepare = amdgpu_vm_cpu_prepare, 120 .update = amdgpu_vm_cpu_update, 121 .commit = amdgpu_vm_cpu_commit 122 }; 123