1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/module.h>
25 
26 #include <drm/drm_drv.h>
27 
28 #include "amdgpu.h"
29 
30 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
31 {
32 	/* By now all MMIO pages except mailbox are blocked */
33 	/* if blocking is enabled in hypervisor. Choose the */
34 	/* SCRATCH_REG0 to test. */
35 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
36 }
37 
38 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
39 {
40 	/* enable virtual display */
41 	if (adev->mode_info.num_crtc == 0)
42 		adev->mode_info.num_crtc = 1;
43 	adev->enable_virtual_display = true;
44 	adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
45 	adev->cg_flags = 0;
46 	adev->pg_flags = 0;
47 }
48 
49 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
50 					uint32_t reg0, uint32_t reg1,
51 					uint32_t ref, uint32_t mask)
52 {
53 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
54 	struct amdgpu_ring *ring = &kiq->ring;
55 	signed long r, cnt = 0;
56 	unsigned long flags;
57 	uint32_t seq;
58 
59 	spin_lock_irqsave(&kiq->ring_lock, flags);
60 	amdgpu_ring_alloc(ring, 32);
61 	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
62 					    ref, mask);
63 	amdgpu_fence_emit_polling(ring, &seq);
64 	amdgpu_ring_commit(ring);
65 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
66 
67 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
68 
69 	/* don't wait anymore for IRQ context */
70 	if (r < 1 && in_interrupt())
71 		goto failed_kiq;
72 
73 	might_sleep();
74 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
75 
76 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
77 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
78 	}
79 
80 	if (cnt > MAX_KIQ_REG_TRY)
81 		goto failed_kiq;
82 
83 	return;
84 
85 failed_kiq:
86 	pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
87 }
88 
89 /**
90  * amdgpu_virt_request_full_gpu() - request full gpu access
91  * @amdgpu:	amdgpu device.
92  * @init:	is driver init time.
93  * When start to init/fini driver, first need to request full gpu access.
94  * Return: Zero if request success, otherwise will return error.
95  */
96 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
97 {
98 	struct amdgpu_virt *virt = &adev->virt;
99 	int r;
100 
101 	if (virt->ops && virt->ops->req_full_gpu) {
102 		r = virt->ops->req_full_gpu(adev, init);
103 		if (r)
104 			return r;
105 
106 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
107 	}
108 
109 	return 0;
110 }
111 
112 /**
113  * amdgpu_virt_release_full_gpu() - release full gpu access
114  * @amdgpu:	amdgpu device.
115  * @init:	is driver init time.
116  * When finishing driver init/fini, need to release full gpu access.
117  * Return: Zero if release success, otherwise will returen error.
118  */
119 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
120 {
121 	struct amdgpu_virt *virt = &adev->virt;
122 	int r;
123 
124 	if (virt->ops && virt->ops->rel_full_gpu) {
125 		r = virt->ops->rel_full_gpu(adev, init);
126 		if (r)
127 			return r;
128 
129 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
130 	}
131 	return 0;
132 }
133 
134 /**
135  * amdgpu_virt_reset_gpu() - reset gpu
136  * @amdgpu:	amdgpu device.
137  * Send reset command to GPU hypervisor to reset GPU that VM is using
138  * Return: Zero if reset success, otherwise will return error.
139  */
140 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
141 {
142 	struct amdgpu_virt *virt = &adev->virt;
143 	int r;
144 
145 	if (virt->ops && virt->ops->reset_gpu) {
146 		r = virt->ops->reset_gpu(adev);
147 		if (r)
148 			return r;
149 
150 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
151 	}
152 
153 	return 0;
154 }
155 
156 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
157 {
158 	struct amdgpu_virt *virt = &adev->virt;
159 
160 	if (virt->ops && virt->ops->req_init_data)
161 		virt->ops->req_init_data(adev);
162 
163 	if (adev->virt.req_init_data_ver > 0)
164 		DRM_INFO("host supports REQ_INIT_DATA handshake\n");
165 	else
166 		DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
167 }
168 
169 /**
170  * amdgpu_virt_wait_reset() - wait for reset gpu completed
171  * @amdgpu:	amdgpu device.
172  * Wait for GPU reset completed.
173  * Return: Zero if reset success, otherwise will return error.
174  */
175 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
176 {
177 	struct amdgpu_virt *virt = &adev->virt;
178 
179 	if (!virt->ops || !virt->ops->wait_reset)
180 		return -EINVAL;
181 
182 	return virt->ops->wait_reset(adev);
183 }
184 
185 /**
186  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
187  * @amdgpu:	amdgpu device.
188  * MM table is used by UVD and VCE for its initialization
189  * Return: Zero if allocate success.
190  */
191 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
192 {
193 	int r;
194 
195 	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
196 		return 0;
197 
198 	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
199 				    AMDGPU_GEM_DOMAIN_VRAM,
200 				    &adev->virt.mm_table.bo,
201 				    &adev->virt.mm_table.gpu_addr,
202 				    (void *)&adev->virt.mm_table.cpu_addr);
203 	if (r) {
204 		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
205 		return r;
206 	}
207 
208 	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
209 	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
210 		 adev->virt.mm_table.gpu_addr,
211 		 adev->virt.mm_table.cpu_addr);
212 	return 0;
213 }
214 
215 /**
216  * amdgpu_virt_free_mm_table() - free mm table memory
217  * @amdgpu:	amdgpu device.
218  * Free MM table memory
219  */
220 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
221 {
222 	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
223 		return;
224 
225 	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
226 			      &adev->virt.mm_table.gpu_addr,
227 			      (void *)&adev->virt.mm_table.cpu_addr);
228 	adev->virt.mm_table.gpu_addr = 0;
229 }
230 
231 
232 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
233 					unsigned long obj_size,
234 					unsigned int key,
235 					unsigned int chksum)
236 {
237 	unsigned int ret = key;
238 	unsigned long i = 0;
239 	unsigned char *pos;
240 
241 	pos = (char *)obj;
242 	/* calculate checksum */
243 	for (i = 0; i < obj_size; ++i)
244 		ret += *(pos + i);
245 	/* minus the chksum itself */
246 	pos = (char *)&chksum;
247 	for (i = 0; i < sizeof(chksum); ++i)
248 		ret -= *(pos + i);
249 	return ret;
250 }
251 
252 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
253 {
254 	uint32_t pf2vf_size = 0;
255 	uint32_t checksum = 0;
256 	uint32_t checkval;
257 	char *str;
258 
259 	adev->virt.fw_reserve.p_pf2vf = NULL;
260 	adev->virt.fw_reserve.p_vf2pf = NULL;
261 
262 	if (adev->fw_vram_usage.va != NULL) {
263 		adev->virt.fw_reserve.p_pf2vf =
264 			(struct amd_sriov_msg_pf2vf_info_header *)(
265 			adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
266 		AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
267 		AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
268 		AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
269 
270 		/* pf2vf message must be in 4K */
271 		if (pf2vf_size > 0 && pf2vf_size < 4096) {
272 			checkval = amdgpu_virt_fw_reserve_get_checksum(
273 				adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
274 				adev->virt.fw_reserve.checksum_key, checksum);
275 			if (checkval == checksum) {
276 				adev->virt.fw_reserve.p_vf2pf =
277 					((void *)adev->virt.fw_reserve.p_pf2vf +
278 					pf2vf_size);
279 				memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
280 					sizeof(amdgim_vf2pf_info));
281 				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
282 					AMDGPU_FW_VRAM_VF2PF_VER);
283 				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
284 					sizeof(amdgim_vf2pf_info));
285 				AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
286 					&str);
287 #ifdef MODULE
288 				if (THIS_MODULE->version != NULL)
289 					strcpy(str, THIS_MODULE->version);
290 				else
291 #endif
292 					strcpy(str, "N/A");
293 				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
294 					0);
295 				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
296 					amdgpu_virt_fw_reserve_get_checksum(
297 					adev->virt.fw_reserve.p_vf2pf,
298 					pf2vf_size,
299 					adev->virt.fw_reserve.checksum_key, 0));
300 			}
301 		}
302 	}
303 }
304 
305 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
306 {
307 	uint32_t reg;
308 
309 	switch (adev->asic_type) {
310 	case CHIP_TONGA:
311 	case CHIP_FIJI:
312 		reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
313 		break;
314 	case CHIP_VEGA10:
315 	case CHIP_VEGA20:
316 	case CHIP_NAVI10:
317 	case CHIP_NAVI12:
318 	case CHIP_ARCTURUS:
319 		reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
320 		break;
321 	default: /* other chip doesn't support SRIOV */
322 		reg = 0;
323 		break;
324 	}
325 
326 	if (reg & 1)
327 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
328 
329 	if (reg & 0x80000000)
330 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
331 
332 	if (!reg) {
333 		if (is_virtual_machine())	/* passthrough mode exclus sriov mod */
334 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
335 	}
336 }
337 
338 bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
339 {
340 	return amdgpu_sriov_is_debug(adev) ? true : false;
341 }
342 
343 bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
344 {
345 	return amdgpu_sriov_is_normal(adev) ? true : false;
346 }
347 
348 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
349 {
350 	if (!amdgpu_sriov_vf(adev) ||
351 	    amdgpu_virt_access_debugfs_is_kiq(adev))
352 		return 0;
353 
354 	if (amdgpu_virt_access_debugfs_is_mmio(adev))
355 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
356 	else
357 		return -EPERM;
358 
359 	return 0;
360 }
361 
362 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
363 {
364 	if (amdgpu_sriov_vf(adev))
365 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
366 }
367