1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "amdgpu_vm.h"
24 #include "amdgpu_job.h"
25 #include "amdgpu_object.h"
26 #include "amdgpu_trace.h"
27
28 #define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
29 #define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
30
31 /**
32 * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
33 *
34 * @table: newly allocated or validated PD/PT
35 */
amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm * table)36 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
37 {
38 int r;
39
40 r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
41 if (r)
42 return r;
43
44 if (table->shadow)
45 r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
46
47 return r;
48 }
49
50 /* Allocate a new job for @count PTE updates */
amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params * p,unsigned int count)51 static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
52 unsigned int count)
53 {
54 enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
55 : AMDGPU_IB_POOL_DELAYED;
56 struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
57 : &p->vm->delayed;
58 unsigned int ndw;
59 int r;
60
61 /* estimate how many dw we need */
62 ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
63 if (p->pages_addr)
64 ndw += count * 2;
65 ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
66
67 r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
68 ndw * 4, pool, &p->job);
69 if (r)
70 return r;
71
72 p->num_dw_left = ndw;
73 return 0;
74 }
75
76 /**
77 * amdgpu_vm_sdma_prepare - prepare SDMA command submission
78 *
79 * @p: see amdgpu_vm_update_params definition
80 * @resv: reservation object with embedded fence
81 * @sync_mode: synchronization mode
82 *
83 * Returns:
84 * Negativ errno, 0 for success.
85 */
amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params * p,struct dma_resv * resv,enum amdgpu_sync_mode sync_mode)86 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
87 struct dma_resv *resv,
88 enum amdgpu_sync_mode sync_mode)
89 {
90 struct amdgpu_sync sync;
91 int r;
92
93 r = amdgpu_vm_sdma_alloc_job(p, 0);
94 if (r)
95 return r;
96
97 if (!resv)
98 return 0;
99
100 amdgpu_sync_create(&sync);
101 r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
102 if (!r)
103 r = amdgpu_sync_push_to_job(&sync, p->job);
104 amdgpu_sync_free(&sync);
105
106 if (r) {
107 p->num_dw_left = 0;
108 amdgpu_job_free(p->job);
109 }
110 return r;
111 }
112
113 /**
114 * amdgpu_vm_sdma_commit - commit SDMA command submission
115 *
116 * @p: see amdgpu_vm_update_params definition
117 * @fence: resulting fence
118 *
119 * Returns:
120 * Negativ errno, 0 for success.
121 */
amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params * p,struct dma_fence ** fence)122 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
123 struct dma_fence **fence)
124 {
125 struct amdgpu_ib *ib = p->job->ibs;
126 struct amdgpu_ring *ring;
127 struct dma_fence *f;
128
129 ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
130 sched);
131
132 WARN_ON(ib->length_dw == 0);
133 amdgpu_ring_pad_ib(ring, ib);
134 WARN_ON(ib->length_dw > p->num_dw_left);
135 f = amdgpu_job_submit(p->job);
136
137 if (p->unlocked) {
138 struct dma_fence *tmp = dma_fence_get(f);
139
140 swap(p->vm->last_unlocked, tmp);
141 dma_fence_put(tmp);
142 } else {
143 dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
144 DMA_RESV_USAGE_BOOKKEEP);
145 }
146
147 if (fence && !p->immediate) {
148 /*
149 * Most hw generations now have a separate queue for page table
150 * updates, but when the queue is shared with userspace we need
151 * the extra CPU round trip to correctly flush the TLB.
152 */
153 set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
154 swap(*fence, f);
155 }
156 dma_fence_put(f);
157 return 0;
158 }
159
160 /**
161 * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
162 *
163 * @p: see amdgpu_vm_update_params definition
164 * @bo: PD/PT to update
165 * @pe: addr of the page entry
166 * @count: number of page entries to copy
167 *
168 * Traces the parameters and calls the DMA function to copy the PTEs.
169 */
amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params * p,struct amdgpu_bo * bo,uint64_t pe,unsigned count)170 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
171 struct amdgpu_bo *bo, uint64_t pe,
172 unsigned count)
173 {
174 struct amdgpu_ib *ib = p->job->ibs;
175 uint64_t src = ib->gpu_addr;
176
177 src += p->num_dw_left * 4;
178
179 pe += amdgpu_bo_gpu_offset_no_check(bo);
180 trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
181
182 amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
183 }
184
185 /**
186 * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
187 *
188 * @p: see amdgpu_vm_update_params definition
189 * @bo: PD/PT to update
190 * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
191 * @addr: dst addr to write into pe
192 * @count: number of page entries to update
193 * @incr: increase next addr by incr bytes
194 * @flags: hw access flags
195 *
196 * Traces the parameters and calls the right asic functions
197 * to setup the page table using the DMA.
198 */
amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params * p,struct amdgpu_bo * bo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)199 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
200 struct amdgpu_bo *bo, uint64_t pe,
201 uint64_t addr, unsigned count,
202 uint32_t incr, uint64_t flags)
203 {
204 struct amdgpu_ib *ib = p->job->ibs;
205
206 pe += amdgpu_bo_gpu_offset_no_check(bo);
207 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
208 if (count < 3) {
209 amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
210 count, incr);
211 } else {
212 amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
213 count, incr, flags);
214 }
215 }
216
217 /**
218 * amdgpu_vm_sdma_update - execute VM update
219 *
220 * @p: see amdgpu_vm_update_params definition
221 * @vmbo: PD/PT to update
222 * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
223 * @addr: dst addr to write into pe
224 * @count: number of page entries to update
225 * @incr: increase next addr by incr bytes
226 * @flags: hw access flags
227 *
228 * Reserve space in the IB, setup mapping buffer on demand and write commands to
229 * the IB.
230 */
amdgpu_vm_sdma_update(struct amdgpu_vm_update_params * p,struct amdgpu_bo_vm * vmbo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)231 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
232 struct amdgpu_bo_vm *vmbo, uint64_t pe,
233 uint64_t addr, unsigned count, uint32_t incr,
234 uint64_t flags)
235 {
236 struct amdgpu_bo *bo = &vmbo->bo;
237 struct dma_resv_iter cursor;
238 unsigned int i, ndw, nptes;
239 struct dma_fence *fence;
240 uint64_t *pte;
241 int r;
242
243 /* Wait for PD/PT moves to be completed */
244 dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
245 dma_resv_for_each_fence_unlocked(&cursor, fence) {
246 dma_fence_get(fence);
247 r = drm_sched_job_add_dependency(&p->job->base, fence);
248 if (r) {
249 dma_fence_put(fence);
250 dma_resv_iter_end(&cursor);
251 return r;
252 }
253 }
254 dma_resv_iter_end(&cursor);
255
256 do {
257 ndw = p->num_dw_left;
258 ndw -= p->job->ibs->length_dw;
259
260 if (ndw < 32) {
261 r = amdgpu_vm_sdma_commit(p, NULL);
262 if (r)
263 return r;
264
265 r = amdgpu_vm_sdma_alloc_job(p, count);
266 if (r)
267 return r;
268 }
269
270 if (!p->pages_addr) {
271 /* set page commands needed */
272 if (vmbo->shadow)
273 amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
274 count, incr, flags);
275 amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
276 incr, flags);
277 return 0;
278 }
279
280 /* copy commands needed */
281 ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
282 (vmbo->shadow ? 2 : 1);
283
284 /* for padding */
285 ndw -= 7;
286
287 nptes = min(count, ndw / 2);
288
289 /* Put the PTEs at the end of the IB. */
290 p->num_dw_left -= nptes * 2;
291 pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
292 for (i = 0; i < nptes; ++i, addr += incr) {
293 pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
294 pte[i] |= flags;
295 }
296
297 if (vmbo->shadow)
298 amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
299 amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
300
301 pe += nptes * 8;
302 count -= nptes;
303 } while (count);
304
305 return 0;
306 }
307
308 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
309 .map_table = amdgpu_vm_sdma_map_table,
310 .prepare = amdgpu_vm_sdma_prepare,
311 .update = amdgpu_vm_sdma_update,
312 .commit = amdgpu_vm_sdma_commit
313 };
314