1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include "amdgpu_vm.h" 24 #include "amdgpu_job.h" 25 #include "amdgpu_object.h" 26 #include "amdgpu_trace.h" 27 28 #define AMDGPU_VM_SDMA_MIN_NUM_DW 256u 29 #define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u) 30 31 /** 32 * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped 33 * 34 * @table: newly allocated or validated PD/PT 35 */ 36 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table) 37 { 38 int r; 39 40 r = amdgpu_ttm_alloc_gart(&table->tbo); 41 if (r) 42 return r; 43 44 if (table->shadow) 45 r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); 46 47 return r; 48 } 49 50 /** 51 * amdgpu_vm_sdma_prepare - prepare SDMA command submission 52 * 53 * @p: see amdgpu_vm_update_params definition 54 * @owner: owner we need to sync to 55 * @exclusive: exclusive move fence we need to sync to 56 * 57 * Returns: 58 * Negativ errno, 0 for success. 59 */ 60 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, 61 struct dma_resv *resv, 62 enum amdgpu_sync_mode sync_mode) 63 { 64 enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 65 : AMDGPU_IB_POOL_DELAYED; 66 unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; 67 int r; 68 69 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); 70 if (r) 71 return r; 72 73 p->num_dw_left = ndw; 74 75 if (!resv) 76 return 0; 77 78 return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm); 79 } 80 81 /** 82 * amdgpu_vm_sdma_commit - commit SDMA command submission 83 * 84 * @p: see amdgpu_vm_update_params definition 85 * @fence: resulting fence 86 * 87 * Returns: 88 * Negativ errno, 0 for success. 89 */ 90 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, 91 struct dma_fence **fence) 92 { 93 struct amdgpu_ib *ib = p->job->ibs; 94 struct drm_sched_entity *entity; 95 struct amdgpu_ring *ring; 96 struct dma_fence *f; 97 int r; 98 99 entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; 100 ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); 101 102 WARN_ON(ib->length_dw == 0); 103 amdgpu_ring_pad_ib(ring, ib); 104 WARN_ON(ib->length_dw > p->num_dw_left); 105 r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); 106 if (r) 107 goto error; 108 109 if (p->unlocked) { 110 struct dma_fence *tmp = dma_fence_get(f); 111 112 swap(p->vm->last_unlocked, f); 113 dma_fence_put(tmp); 114 } else { 115 amdgpu_bo_fence(p->vm->root.base.bo, f, true); 116 } 117 118 if (fence && !p->immediate) 119 swap(*fence, f); 120 dma_fence_put(f); 121 return 0; 122 123 error: 124 amdgpu_job_free(p->job); 125 return r; 126 } 127 128 /** 129 * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping 130 * 131 * @p: see amdgpu_vm_update_params definition 132 * @bo: PD/PT to update 133 * @pe: addr of the page entry 134 * @count: number of page entries to copy 135 * 136 * Traces the parameters and calls the DMA function to copy the PTEs. 137 */ 138 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, 139 struct amdgpu_bo *bo, uint64_t pe, 140 unsigned count) 141 { 142 struct amdgpu_ib *ib = p->job->ibs; 143 uint64_t src = ib->gpu_addr; 144 145 src += p->num_dw_left * 4; 146 147 pe += amdgpu_gmc_sign_extend(bo->tbo.offset); 148 trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate); 149 150 amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); 151 } 152 153 /** 154 * amdgpu_vm_sdma_set_ptes - helper to call the right asic function 155 * 156 * @p: see amdgpu_vm_update_params definition 157 * @bo: PD/PT to update 158 * @pe: addr of the page entry 159 * @addr: dst addr to write into pe 160 * @count: number of page entries to update 161 * @incr: increase next addr by incr bytes 162 * @flags: hw access flags 163 * 164 * Traces the parameters and calls the right asic functions 165 * to setup the page table using the DMA. 166 */ 167 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, 168 struct amdgpu_bo *bo, uint64_t pe, 169 uint64_t addr, unsigned count, 170 uint32_t incr, uint64_t flags) 171 { 172 struct amdgpu_ib *ib = p->job->ibs; 173 174 pe += amdgpu_gmc_sign_extend(bo->tbo.offset); 175 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); 176 if (count < 3) { 177 amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, 178 count, incr); 179 } else { 180 amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr, 181 count, incr, flags); 182 } 183 } 184 185 /** 186 * amdgpu_vm_sdma_update - execute VM update 187 * 188 * @p: see amdgpu_vm_update_params definition 189 * @bo: PD/PT to update 190 * @pe: addr of the page entry 191 * @addr: dst addr to write into pe 192 * @count: number of page entries to update 193 * @incr: increase next addr by incr bytes 194 * @flags: hw access flags 195 * 196 * Reserve space in the IB, setup mapping buffer on demand and write commands to 197 * the IB. 198 */ 199 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, 200 struct amdgpu_bo *bo, uint64_t pe, 201 uint64_t addr, unsigned count, uint32_t incr, 202 uint64_t flags) 203 { 204 enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 205 : AMDGPU_IB_POOL_DELAYED; 206 unsigned int i, ndw, nptes; 207 uint64_t *pte; 208 int r; 209 210 /* Wait for PD/PT moves to be completed */ 211 r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false); 212 if (r) 213 return r; 214 215 do { 216 ndw = p->num_dw_left; 217 ndw -= p->job->ibs->length_dw; 218 219 if (ndw < 32) { 220 r = amdgpu_vm_sdma_commit(p, NULL); 221 if (r) 222 return r; 223 224 /* estimate how many dw we need */ 225 ndw = 32; 226 if (p->pages_addr) 227 ndw += count * 2; 228 ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); 229 ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); 230 231 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, 232 &p->job); 233 if (r) 234 return r; 235 236 p->num_dw_left = ndw; 237 } 238 239 if (!p->pages_addr) { 240 /* set page commands needed */ 241 if (bo->shadow) 242 amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr, 243 count, incr, flags); 244 amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count, 245 incr, flags); 246 return 0; 247 } 248 249 /* copy commands needed */ 250 ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw * 251 (bo->shadow ? 2 : 1); 252 253 /* for padding */ 254 ndw -= 7; 255 256 nptes = min(count, ndw / 2); 257 258 /* Put the PTEs at the end of the IB. */ 259 p->num_dw_left -= nptes * 2; 260 pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]); 261 for (i = 0; i < nptes; ++i, addr += incr) { 262 pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr); 263 pte[i] |= flags; 264 } 265 266 if (bo->shadow) 267 amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes); 268 amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes); 269 270 pe += nptes * 8; 271 count -= nptes; 272 } while (count); 273 274 return 0; 275 } 276 277 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = { 278 .map_table = amdgpu_vm_sdma_map_table, 279 .prepare = amdgpu_vm_sdma_prepare, 280 .update = amdgpu_vm_sdma_update, 281 .commit = amdgpu_vm_sdma_commit 282 }; 283