1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_vm.h"
24 #include "amdgpu_job.h"
25 #include "amdgpu_object.h"
26 #include "amdgpu_trace.h"
27 
28 #define AMDGPU_VM_SDMA_MIN_NUM_DW	256u
29 #define AMDGPU_VM_SDMA_MAX_NUM_DW	(16u * 1024u)
30 
31 /**
32  * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
33  *
34  * @table: newly allocated or validated PD/PT
35  */
36 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
37 {
38 	int r;
39 
40 	r = amdgpu_ttm_alloc_gart(&table->tbo);
41 	if (r)
42 		return r;
43 
44 	if (table->shadow)
45 		r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
46 
47 	return r;
48 }
49 
50 /**
51  * amdgpu_vm_sdma_prepare - prepare SDMA command submission
52  *
53  * @p: see amdgpu_vm_update_params definition
54  * @owner: owner we need to sync to
55  * @exclusive: exclusive move fence we need to sync to
56  *
57  * Returns:
58  * Negativ errno, 0 for success.
59  */
60 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
61 				  struct dma_resv *resv,
62 				  enum amdgpu_sync_mode sync_mode)
63 {
64 	unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
65 	int r;
66 
67 	r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
68 	if (r)
69 		return r;
70 
71 	p->num_dw_left = ndw;
72 
73 	if (!resv)
74 		return 0;
75 
76 	return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
77 }
78 
79 /**
80  * amdgpu_vm_sdma_commit - commit SDMA command submission
81  *
82  * @p: see amdgpu_vm_update_params definition
83  * @fence: resulting fence
84  *
85  * Returns:
86  * Negativ errno, 0 for success.
87  */
88 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
89 				 struct dma_fence **fence)
90 {
91 	struct amdgpu_ib *ib = p->job->ibs;
92 	struct drm_sched_entity *entity;
93 	struct dma_fence *f, *tmp;
94 	struct amdgpu_ring *ring;
95 	int r;
96 
97 	entity = p->direct ? &p->vm->direct : &p->vm->delayed;
98 	ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
99 
100 	WARN_ON(ib->length_dw == 0);
101 	amdgpu_ring_pad_ib(ring, ib);
102 	WARN_ON(ib->length_dw > p->num_dw_left);
103 	r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
104 	if (r)
105 		goto error;
106 
107 	if (p->direct) {
108 		tmp = dma_fence_get(f);
109 		swap(p->vm->last_direct, tmp);
110 		dma_fence_put(tmp);
111 	} else {
112 		dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
113 	}
114 
115 	if (fence && !p->direct)
116 		swap(*fence, f);
117 	dma_fence_put(f);
118 	return 0;
119 
120 error:
121 	amdgpu_job_free(p->job);
122 	return r;
123 }
124 
125 /**
126  * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
127  *
128  * @p: see amdgpu_vm_update_params definition
129  * @bo: PD/PT to update
130  * @pe: addr of the page entry
131  * @count: number of page entries to copy
132  *
133  * Traces the parameters and calls the DMA function to copy the PTEs.
134  */
135 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
136 				     struct amdgpu_bo *bo, uint64_t pe,
137 				     unsigned count)
138 {
139 	struct amdgpu_ib *ib = p->job->ibs;
140 	uint64_t src = ib->gpu_addr;
141 
142 	src += p->num_dw_left * 4;
143 
144 	pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
145 	trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
146 
147 	amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
148 }
149 
150 /**
151  * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
152  *
153  * @p: see amdgpu_vm_update_params definition
154  * @bo: PD/PT to update
155  * @pe: addr of the page entry
156  * @addr: dst addr to write into pe
157  * @count: number of page entries to update
158  * @incr: increase next addr by incr bytes
159  * @flags: hw access flags
160  *
161  * Traces the parameters and calls the right asic functions
162  * to setup the page table using the DMA.
163  */
164 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
165 				    struct amdgpu_bo *bo, uint64_t pe,
166 				    uint64_t addr, unsigned count,
167 				    uint32_t incr, uint64_t flags)
168 {
169 	struct amdgpu_ib *ib = p->job->ibs;
170 
171 	pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
172 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
173 	if (count < 3) {
174 		amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
175 				    count, incr);
176 	} else {
177 		amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
178 				      count, incr, flags);
179 	}
180 }
181 
182 /**
183  * amdgpu_vm_sdma_update - execute VM update
184  *
185  * @p: see amdgpu_vm_update_params definition
186  * @bo: PD/PT to update
187  * @pe: addr of the page entry
188  * @addr: dst addr to write into pe
189  * @count: number of page entries to update
190  * @incr: increase next addr by incr bytes
191  * @flags: hw access flags
192  *
193  * Reserve space in the IB, setup mapping buffer on demand and write commands to
194  * the IB.
195  */
196 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
197 				 struct amdgpu_bo *bo, uint64_t pe,
198 				 uint64_t addr, unsigned count, uint32_t incr,
199 				 uint64_t flags)
200 {
201 	unsigned int i, ndw, nptes;
202 	uint64_t *pte;
203 	int r;
204 
205 	/* Wait for PD/PT moves to be completed */
206 	r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false);
207 	if (r)
208 		return r;
209 
210 	do {
211 		ndw = p->num_dw_left;
212 		ndw -= p->job->ibs->length_dw;
213 
214 		if (ndw < 32) {
215 			r = amdgpu_vm_sdma_commit(p, NULL);
216 			if (r)
217 				return r;
218 
219 			/* estimate how many dw we need */
220 			ndw = 32;
221 			if (p->pages_addr)
222 				ndw += count * 2;
223 			ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
224 			ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
225 
226 			r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
227 			if (r)
228 				return r;
229 
230 			p->num_dw_left = ndw;
231 		}
232 
233 		if (!p->pages_addr) {
234 			/* set page commands needed */
235 			if (bo->shadow)
236 				amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
237 							count, incr, flags);
238 			amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
239 						incr, flags);
240 			return 0;
241 		}
242 
243 		/* copy commands needed */
244 		ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
245 			(bo->shadow ? 2 : 1);
246 
247 		/* for padding */
248 		ndw -= 7;
249 
250 		nptes = min(count, ndw / 2);
251 
252 		/* Put the PTEs at the end of the IB. */
253 		p->num_dw_left -= nptes * 2;
254 		pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
255 		for (i = 0; i < nptes; ++i, addr += incr) {
256 			pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
257 			pte[i] |= flags;
258 		}
259 
260 		if (bo->shadow)
261 			amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
262 		amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
263 
264 		pe += nptes * 8;
265 		count -= nptes;
266 	} while (count);
267 
268 	return 0;
269 }
270 
271 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
272 	.map_table = amdgpu_vm_sdma_map_table,
273 	.prepare = amdgpu_vm_sdma_prepare,
274 	.update = amdgpu_vm_sdma_update,
275 	.commit = amdgpu_vm_sdma_commit
276 };
277