xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c (revision e5f586c763a079349398e2b0c7c271386193ac34)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 
26 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
27 {
28 	int r;
29 	void *ptr;
30 
31 	r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
32 				AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
33 				&adev->virt.csa_vmid0_addr, &ptr);
34 	if (r)
35 		return r;
36 
37 	memset(ptr, 0, AMDGPU_CSA_SIZE);
38 	return 0;
39 }
40 
41 /*
42  * amdgpu_map_static_csa should be called during amdgpu_vm_init
43  * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
44  * to this VM, and each command submission of GFX should use this virtual
45  * address within META_DATA init package to support SRIOV gfx preemption.
46  */
47 
48 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
49 {
50 	int r;
51 	struct amdgpu_bo_va *bo_va;
52 	struct ww_acquire_ctx ticket;
53 	struct list_head list;
54 	struct amdgpu_bo_list_entry pd;
55 	struct ttm_validate_buffer csa_tv;
56 
57 	INIT_LIST_HEAD(&list);
58 	INIT_LIST_HEAD(&csa_tv.head);
59 	csa_tv.bo = &adev->virt.csa_obj->tbo;
60 	csa_tv.shared = true;
61 
62 	list_add(&csa_tv.head, &list);
63 	amdgpu_vm_get_pd_bo(vm, &list, &pd);
64 
65 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
66 	if (r) {
67 		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
68 		return r;
69 	}
70 
71 	bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
72 	if (!bo_va) {
73 		ttm_eu_backoff_reservation(&ticket, &list);
74 		DRM_ERROR("failed to create bo_va for static CSA\n");
75 		return -ENOMEM;
76 	}
77 
78 	r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
79 				   AMDGPU_CSA_SIZE);
80 	if (r) {
81 		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
82 		amdgpu_vm_bo_rmv(adev, bo_va);
83 		ttm_eu_backoff_reservation(&ticket, &list);
84 		return r;
85 	}
86 
87 	r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
88 						AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
89 						AMDGPU_PTE_EXECUTABLE);
90 
91 	if (r) {
92 		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
93 		amdgpu_vm_bo_rmv(adev, bo_va);
94 		ttm_eu_backoff_reservation(&ticket, &list);
95 		return r;
96 	}
97 
98 	vm->csa_bo_va = bo_va;
99 	ttm_eu_backoff_reservation(&ticket, &list);
100 	return 0;
101 }
102 
103 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
104 {
105 	/* enable virtual display */
106 	adev->mode_info.num_crtc = 1;
107 	adev->enable_virtual_display = true;
108 
109 	mutex_init(&adev->virt.lock_kiq);
110 	mutex_init(&adev->virt.lock_reset);
111 }
112 
113 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
114 {
115 	signed long r;
116 	uint32_t val;
117 	struct dma_fence *f;
118 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
119 	struct amdgpu_ring *ring = &kiq->ring;
120 
121 	BUG_ON(!ring->funcs->emit_rreg);
122 
123 	mutex_lock(&adev->virt.lock_kiq);
124 	amdgpu_ring_alloc(ring, 32);
125 	amdgpu_ring_emit_hdp_flush(ring);
126 	amdgpu_ring_emit_rreg(ring, reg);
127 	amdgpu_ring_emit_hdp_invalidate(ring);
128 	amdgpu_fence_emit(ring, &f);
129 	amdgpu_ring_commit(ring);
130 	mutex_unlock(&adev->virt.lock_kiq);
131 
132 	r = dma_fence_wait(f, false);
133 	if (r)
134 		DRM_ERROR("wait for kiq fence error: %ld.\n", r);
135 	dma_fence_put(f);
136 
137 	val = adev->wb.wb[adev->virt.reg_val_offs];
138 
139 	return val;
140 }
141 
142 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
143 {
144 	signed long r;
145 	struct dma_fence *f;
146 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
147 	struct amdgpu_ring *ring = &kiq->ring;
148 
149 	BUG_ON(!ring->funcs->emit_wreg);
150 
151 	mutex_lock(&adev->virt.lock_kiq);
152 	amdgpu_ring_alloc(ring, 32);
153 	amdgpu_ring_emit_hdp_flush(ring);
154 	amdgpu_ring_emit_wreg(ring, reg, v);
155 	amdgpu_ring_emit_hdp_invalidate(ring);
156 	amdgpu_fence_emit(ring, &f);
157 	amdgpu_ring_commit(ring);
158 	mutex_unlock(&adev->virt.lock_kiq);
159 
160 	r = dma_fence_wait(f, false);
161 	if (r)
162 		DRM_ERROR("wait for kiq fence error: %ld.\n", r);
163 	dma_fence_put(f);
164 }
165 
166 /**
167  * amdgpu_virt_request_full_gpu() - request full gpu access
168  * @amdgpu:	amdgpu device.
169  * @init:	is driver init time.
170  * When start to init/fini driver, first need to request full gpu access.
171  * Return: Zero if request success, otherwise will return error.
172  */
173 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
174 {
175 	struct amdgpu_virt *virt = &adev->virt;
176 	int r;
177 
178 	if (virt->ops && virt->ops->req_full_gpu) {
179 		r = virt->ops->req_full_gpu(adev, init);
180 		if (r)
181 			return r;
182 
183 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
184 	}
185 
186 	return 0;
187 }
188 
189 /**
190  * amdgpu_virt_release_full_gpu() - release full gpu access
191  * @amdgpu:	amdgpu device.
192  * @init:	is driver init time.
193  * When finishing driver init/fini, need to release full gpu access.
194  * Return: Zero if release success, otherwise will returen error.
195  */
196 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
197 {
198 	struct amdgpu_virt *virt = &adev->virt;
199 	int r;
200 
201 	if (virt->ops && virt->ops->rel_full_gpu) {
202 		r = virt->ops->rel_full_gpu(adev, init);
203 		if (r)
204 			return r;
205 
206 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
207 	}
208 	return 0;
209 }
210 
211 /**
212  * amdgpu_virt_reset_gpu() - reset gpu
213  * @amdgpu:	amdgpu device.
214  * Send reset command to GPU hypervisor to reset GPU that VM is using
215  * Return: Zero if reset success, otherwise will return error.
216  */
217 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
218 {
219 	struct amdgpu_virt *virt = &adev->virt;
220 	int r;
221 
222 	if (virt->ops && virt->ops->reset_gpu) {
223 		r = virt->ops->reset_gpu(adev);
224 		if (r)
225 			return r;
226 
227 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
228 	}
229 
230 	return 0;
231 }
232