1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include "amdgpu.h"
30 #include <drm/drm_debugfs.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_sched.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "atom.h"
36 
37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h"
46 
47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
48 {
49 	struct amdgpu_gpu_instance *gpu_instance;
50 	int i;
51 
52 	mutex_lock(&mgpu_info.mutex);
53 
54 	for (i = 0; i < mgpu_info.num_gpu; i++) {
55 		gpu_instance = &(mgpu_info.gpu_ins[i]);
56 		if (gpu_instance->adev == adev) {
57 			mgpu_info.gpu_ins[i] =
58 				mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
59 			mgpu_info.num_gpu--;
60 			if (adev->flags & AMD_IS_APU)
61 				mgpu_info.num_apu--;
62 			else
63 				mgpu_info.num_dgpu--;
64 			break;
65 		}
66 	}
67 
68 	mutex_unlock(&mgpu_info.mutex);
69 }
70 
71 /**
72  * amdgpu_driver_unload_kms - Main unload function for KMS.
73  *
74  * @dev: drm dev pointer
75  *
76  * This is the main unload function for KMS (all asics).
77  * Returns 0 on success.
78  */
79 void amdgpu_driver_unload_kms(struct drm_device *dev)
80 {
81 	struct amdgpu_device *adev = dev->dev_private;
82 
83 	if (adev == NULL)
84 		return;
85 
86 	amdgpu_unregister_gpu_instance(adev);
87 
88 	if (adev->rmmio == NULL)
89 		goto done_free;
90 
91 	if (amdgpu_sriov_vf(adev))
92 		amdgpu_virt_request_full_gpu(adev, false);
93 
94 	if (amdgpu_device_is_px(dev)) {
95 		pm_runtime_get_sync(dev->dev);
96 		pm_runtime_forbid(dev->dev);
97 	}
98 
99 	amdgpu_acpi_fini(adev);
100 
101 	amdgpu_device_fini(adev);
102 
103 done_free:
104 	kfree(adev);
105 	dev->dev_private = NULL;
106 }
107 
108 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
109 {
110 	struct amdgpu_gpu_instance *gpu_instance;
111 
112 	mutex_lock(&mgpu_info.mutex);
113 
114 	if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
115 		DRM_ERROR("Cannot register more gpu instance\n");
116 		mutex_unlock(&mgpu_info.mutex);
117 		return;
118 	}
119 
120 	gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
121 	gpu_instance->adev = adev;
122 	gpu_instance->mgpu_fan_enabled = 0;
123 
124 	mgpu_info.num_gpu++;
125 	if (adev->flags & AMD_IS_APU)
126 		mgpu_info.num_apu++;
127 	else
128 		mgpu_info.num_dgpu++;
129 
130 	mutex_unlock(&mgpu_info.mutex);
131 }
132 
133 /**
134  * amdgpu_driver_load_kms - Main load function for KMS.
135  *
136  * @dev: drm dev pointer
137  * @flags: device flags
138  *
139  * This is the main load function for KMS (all asics).
140  * Returns 0 on success, error on failure.
141  */
142 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
143 {
144 	struct amdgpu_device *adev;
145 	int r, acpi_status;
146 
147 	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
148 	if (adev == NULL) {
149 		return -ENOMEM;
150 	}
151 	dev->dev_private = (void *)adev;
152 
153 	if ((amdgpu_runtime_pm != 0) &&
154 	    amdgpu_has_atpx() &&
155 	    (amdgpu_is_atpx_hybrid() ||
156 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
157 	    ((flags & AMD_IS_APU) == 0) &&
158 	    !pci_is_thunderbolt_attached(dev->pdev))
159 		flags |= AMD_IS_PX;
160 
161 	/* amdgpu_device_init should report only fatal error
162 	 * like memory allocation failure or iomapping failure,
163 	 * or memory manager initialization failure, it must
164 	 * properly initialize the GPU MC controller and permit
165 	 * VRAM allocation
166 	 */
167 	r = amdgpu_device_init(adev, dev, dev->pdev, flags);
168 	if (r) {
169 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
170 		goto out;
171 	}
172 
173 	/* Call ACPI methods: require modeset init
174 	 * but failure is not fatal
175 	 */
176 	if (!r) {
177 		acpi_status = amdgpu_acpi_init(adev);
178 		if (acpi_status)
179 			dev_dbg(&dev->pdev->dev,
180 				"Error during ACPI methods call\n");
181 	}
182 
183 	if (amdgpu_device_is_px(dev)) {
184 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
185 		pm_runtime_use_autosuspend(dev->dev);
186 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
187 		pm_runtime_set_active(dev->dev);
188 		pm_runtime_allow(dev->dev);
189 		pm_runtime_mark_last_busy(dev->dev);
190 		pm_runtime_put_autosuspend(dev->dev);
191 	}
192 
193 out:
194 	if (r) {
195 		/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
196 		if (adev->rmmio && amdgpu_device_is_px(dev))
197 			pm_runtime_put_noidle(dev->dev);
198 		amdgpu_driver_unload_kms(dev);
199 	}
200 
201 	return r;
202 }
203 
204 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
205 				struct drm_amdgpu_query_fw *query_fw,
206 				struct amdgpu_device *adev)
207 {
208 	switch (query_fw->fw_type) {
209 	case AMDGPU_INFO_FW_VCE:
210 		fw_info->ver = adev->vce.fw_version;
211 		fw_info->feature = adev->vce.fb_version;
212 		break;
213 	case AMDGPU_INFO_FW_UVD:
214 		fw_info->ver = adev->uvd.fw_version;
215 		fw_info->feature = 0;
216 		break;
217 	case AMDGPU_INFO_FW_VCN:
218 		fw_info->ver = adev->vcn.fw_version;
219 		fw_info->feature = 0;
220 		break;
221 	case AMDGPU_INFO_FW_GMC:
222 		fw_info->ver = adev->gmc.fw_version;
223 		fw_info->feature = 0;
224 		break;
225 	case AMDGPU_INFO_FW_GFX_ME:
226 		fw_info->ver = adev->gfx.me_fw_version;
227 		fw_info->feature = adev->gfx.me_feature_version;
228 		break;
229 	case AMDGPU_INFO_FW_GFX_PFP:
230 		fw_info->ver = adev->gfx.pfp_fw_version;
231 		fw_info->feature = adev->gfx.pfp_feature_version;
232 		break;
233 	case AMDGPU_INFO_FW_GFX_CE:
234 		fw_info->ver = adev->gfx.ce_fw_version;
235 		fw_info->feature = adev->gfx.ce_feature_version;
236 		break;
237 	case AMDGPU_INFO_FW_GFX_RLC:
238 		fw_info->ver = adev->gfx.rlc_fw_version;
239 		fw_info->feature = adev->gfx.rlc_feature_version;
240 		break;
241 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
242 		fw_info->ver = adev->gfx.rlc_srlc_fw_version;
243 		fw_info->feature = adev->gfx.rlc_srlc_feature_version;
244 		break;
245 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
246 		fw_info->ver = adev->gfx.rlc_srlg_fw_version;
247 		fw_info->feature = adev->gfx.rlc_srlg_feature_version;
248 		break;
249 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
250 		fw_info->ver = adev->gfx.rlc_srls_fw_version;
251 		fw_info->feature = adev->gfx.rlc_srls_feature_version;
252 		break;
253 	case AMDGPU_INFO_FW_GFX_MEC:
254 		if (query_fw->index == 0) {
255 			fw_info->ver = adev->gfx.mec_fw_version;
256 			fw_info->feature = adev->gfx.mec_feature_version;
257 		} else if (query_fw->index == 1) {
258 			fw_info->ver = adev->gfx.mec2_fw_version;
259 			fw_info->feature = adev->gfx.mec2_feature_version;
260 		} else
261 			return -EINVAL;
262 		break;
263 	case AMDGPU_INFO_FW_SMC:
264 		fw_info->ver = adev->pm.fw_version;
265 		fw_info->feature = 0;
266 		break;
267 	case AMDGPU_INFO_FW_TA:
268 		if (query_fw->index > 1)
269 			return -EINVAL;
270 		if (query_fw->index == 0) {
271 			fw_info->ver = adev->psp.ta_fw_version;
272 			fw_info->feature = adev->psp.ta_xgmi_ucode_version;
273 		} else {
274 			fw_info->ver = adev->psp.ta_fw_version;
275 			fw_info->feature = adev->psp.ta_ras_ucode_version;
276 		}
277 		break;
278 	case AMDGPU_INFO_FW_SDMA:
279 		if (query_fw->index >= adev->sdma.num_instances)
280 			return -EINVAL;
281 		fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
282 		fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
283 		break;
284 	case AMDGPU_INFO_FW_SOS:
285 		fw_info->ver = adev->psp.sos_fw_version;
286 		fw_info->feature = adev->psp.sos_feature_version;
287 		break;
288 	case AMDGPU_INFO_FW_ASD:
289 		fw_info->ver = adev->psp.asd_fw_version;
290 		fw_info->feature = adev->psp.asd_feature_version;
291 		break;
292 	case AMDGPU_INFO_FW_DMCU:
293 		fw_info->ver = adev->dm.dmcu_fw_version;
294 		fw_info->feature = 0;
295 		break;
296 	case AMDGPU_INFO_FW_DMCUB:
297 		fw_info->ver = adev->dm.dmcub_fw_version;
298 		fw_info->feature = 0;
299 		break;
300 	default:
301 		return -EINVAL;
302 	}
303 	return 0;
304 }
305 
306 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
307 			     struct drm_amdgpu_info *info,
308 			     struct drm_amdgpu_info_hw_ip *result)
309 {
310 	uint32_t ib_start_alignment = 0;
311 	uint32_t ib_size_alignment = 0;
312 	enum amd_ip_block_type type;
313 	unsigned int num_rings = 0;
314 	unsigned int i, j;
315 
316 	if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
317 		return -EINVAL;
318 
319 	switch (info->query_hw_ip.type) {
320 	case AMDGPU_HW_IP_GFX:
321 		type = AMD_IP_BLOCK_TYPE_GFX;
322 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
323 			if (adev->gfx.gfx_ring[i].sched.ready)
324 				++num_rings;
325 		ib_start_alignment = 32;
326 		ib_size_alignment = 32;
327 		break;
328 	case AMDGPU_HW_IP_COMPUTE:
329 		type = AMD_IP_BLOCK_TYPE_GFX;
330 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
331 			if (adev->gfx.compute_ring[i].sched.ready)
332 				++num_rings;
333 		ib_start_alignment = 32;
334 		ib_size_alignment = 32;
335 		break;
336 	case AMDGPU_HW_IP_DMA:
337 		type = AMD_IP_BLOCK_TYPE_SDMA;
338 		for (i = 0; i < adev->sdma.num_instances; i++)
339 			if (adev->sdma.instance[i].ring.sched.ready)
340 				++num_rings;
341 		ib_start_alignment = 256;
342 		ib_size_alignment = 4;
343 		break;
344 	case AMDGPU_HW_IP_UVD:
345 		type = AMD_IP_BLOCK_TYPE_UVD;
346 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
347 			if (adev->uvd.harvest_config & (1 << i))
348 				continue;
349 
350 			if (adev->uvd.inst[i].ring.sched.ready)
351 				++num_rings;
352 		}
353 		ib_start_alignment = 64;
354 		ib_size_alignment = 64;
355 		break;
356 	case AMDGPU_HW_IP_VCE:
357 		type = AMD_IP_BLOCK_TYPE_VCE;
358 		for (i = 0; i < adev->vce.num_rings; i++)
359 			if (adev->vce.ring[i].sched.ready)
360 				++num_rings;
361 		ib_start_alignment = 4;
362 		ib_size_alignment = 1;
363 		break;
364 	case AMDGPU_HW_IP_UVD_ENC:
365 		type = AMD_IP_BLOCK_TYPE_UVD;
366 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
367 			if (adev->uvd.harvest_config & (1 << i))
368 				continue;
369 
370 			for (j = 0; j < adev->uvd.num_enc_rings; j++)
371 				if (adev->uvd.inst[i].ring_enc[j].sched.ready)
372 					++num_rings;
373 		}
374 		ib_start_alignment = 64;
375 		ib_size_alignment = 64;
376 		break;
377 	case AMDGPU_HW_IP_VCN_DEC:
378 		type = AMD_IP_BLOCK_TYPE_VCN;
379 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
380 			if (adev->uvd.harvest_config & (1 << i))
381 				continue;
382 
383 			if (adev->vcn.inst[i].ring_dec.sched.ready)
384 				++num_rings;
385 		}
386 		ib_start_alignment = 16;
387 		ib_size_alignment = 16;
388 		break;
389 	case AMDGPU_HW_IP_VCN_ENC:
390 		type = AMD_IP_BLOCK_TYPE_VCN;
391 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
392 			if (adev->uvd.harvest_config & (1 << i))
393 				continue;
394 
395 			for (j = 0; j < adev->vcn.num_enc_rings; j++)
396 				if (adev->vcn.inst[i].ring_enc[j].sched.ready)
397 					++num_rings;
398 		}
399 		ib_start_alignment = 64;
400 		ib_size_alignment = 1;
401 		break;
402 	case AMDGPU_HW_IP_VCN_JPEG:
403 		type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
404 			AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
405 
406 		for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
407 			if (adev->jpeg.harvest_config & (1 << i))
408 				continue;
409 
410 			if (adev->jpeg.inst[i].ring_dec.sched.ready)
411 				++num_rings;
412 		}
413 		ib_start_alignment = 16;
414 		ib_size_alignment = 16;
415 		break;
416 	default:
417 		return -EINVAL;
418 	}
419 
420 	for (i = 0; i < adev->num_ip_blocks; i++)
421 		if (adev->ip_blocks[i].version->type == type &&
422 		    adev->ip_blocks[i].status.valid)
423 			break;
424 
425 	if (i == adev->num_ip_blocks)
426 		return 0;
427 
428 	num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
429 			num_rings);
430 
431 	result->hw_ip_version_major = adev->ip_blocks[i].version->major;
432 	result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
433 	result->capabilities_flags = 0;
434 	result->available_rings = (1 << num_rings) - 1;
435 	result->ib_start_alignment = ib_start_alignment;
436 	result->ib_size_alignment = ib_size_alignment;
437 	return 0;
438 }
439 
440 /*
441  * Userspace get information ioctl
442  */
443 /**
444  * amdgpu_info_ioctl - answer a device specific request.
445  *
446  * @adev: amdgpu device pointer
447  * @data: request object
448  * @filp: drm filp
449  *
450  * This function is used to pass device specific parameters to the userspace
451  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
452  * etc. (all asics).
453  * Returns 0 on success, -EINVAL on failure.
454  */
455 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
456 {
457 	struct amdgpu_device *adev = dev->dev_private;
458 	struct drm_amdgpu_info *info = data;
459 	struct amdgpu_mode_info *minfo = &adev->mode_info;
460 	void __user *out = (void __user *)(uintptr_t)info->return_pointer;
461 	uint32_t size = info->return_size;
462 	struct drm_crtc *crtc;
463 	uint32_t ui32 = 0;
464 	uint64_t ui64 = 0;
465 	int i, found;
466 	int ui32_size = sizeof(ui32);
467 
468 	if (!info->return_size || !info->return_pointer)
469 		return -EINVAL;
470 
471 	switch (info->query) {
472 	case AMDGPU_INFO_ACCEL_WORKING:
473 		ui32 = adev->accel_working;
474 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
475 	case AMDGPU_INFO_CRTC_FROM_ID:
476 		for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
477 			crtc = (struct drm_crtc *)minfo->crtcs[i];
478 			if (crtc && crtc->base.id == info->mode_crtc.id) {
479 				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
480 				ui32 = amdgpu_crtc->crtc_id;
481 				found = 1;
482 				break;
483 			}
484 		}
485 		if (!found) {
486 			DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
487 			return -EINVAL;
488 		}
489 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
490 	case AMDGPU_INFO_HW_IP_INFO: {
491 		struct drm_amdgpu_info_hw_ip ip = {};
492 		int ret;
493 
494 		ret = amdgpu_hw_ip_info(adev, info, &ip);
495 		if (ret)
496 			return ret;
497 
498 		ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
499 		return ret ? -EFAULT : 0;
500 	}
501 	case AMDGPU_INFO_HW_IP_COUNT: {
502 		enum amd_ip_block_type type;
503 		uint32_t count = 0;
504 
505 		switch (info->query_hw_ip.type) {
506 		case AMDGPU_HW_IP_GFX:
507 			type = AMD_IP_BLOCK_TYPE_GFX;
508 			break;
509 		case AMDGPU_HW_IP_COMPUTE:
510 			type = AMD_IP_BLOCK_TYPE_GFX;
511 			break;
512 		case AMDGPU_HW_IP_DMA:
513 			type = AMD_IP_BLOCK_TYPE_SDMA;
514 			break;
515 		case AMDGPU_HW_IP_UVD:
516 			type = AMD_IP_BLOCK_TYPE_UVD;
517 			break;
518 		case AMDGPU_HW_IP_VCE:
519 			type = AMD_IP_BLOCK_TYPE_VCE;
520 			break;
521 		case AMDGPU_HW_IP_UVD_ENC:
522 			type = AMD_IP_BLOCK_TYPE_UVD;
523 			break;
524 		case AMDGPU_HW_IP_VCN_DEC:
525 		case AMDGPU_HW_IP_VCN_ENC:
526 			type = AMD_IP_BLOCK_TYPE_VCN;
527 			break;
528 		case AMDGPU_HW_IP_VCN_JPEG:
529 			type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
530 				AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
531 			break;
532 		default:
533 			return -EINVAL;
534 		}
535 
536 		for (i = 0; i < adev->num_ip_blocks; i++)
537 			if (adev->ip_blocks[i].version->type == type &&
538 			    adev->ip_blocks[i].status.valid &&
539 			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
540 				count++;
541 
542 		return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
543 	}
544 	case AMDGPU_INFO_TIMESTAMP:
545 		ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
546 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
547 	case AMDGPU_INFO_FW_VERSION: {
548 		struct drm_amdgpu_info_firmware fw_info;
549 		int ret;
550 
551 		/* We only support one instance of each IP block right now. */
552 		if (info->query_fw.ip_instance != 0)
553 			return -EINVAL;
554 
555 		ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
556 		if (ret)
557 			return ret;
558 
559 		return copy_to_user(out, &fw_info,
560 				    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
561 	}
562 	case AMDGPU_INFO_NUM_BYTES_MOVED:
563 		ui64 = atomic64_read(&adev->num_bytes_moved);
564 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
565 	case AMDGPU_INFO_NUM_EVICTIONS:
566 		ui64 = atomic64_read(&adev->num_evictions);
567 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
568 	case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
569 		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
570 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
571 	case AMDGPU_INFO_VRAM_USAGE:
572 		ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
573 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
574 	case AMDGPU_INFO_VIS_VRAM_USAGE:
575 		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
576 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
577 	case AMDGPU_INFO_GTT_USAGE:
578 		ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
579 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
580 	case AMDGPU_INFO_GDS_CONFIG: {
581 		struct drm_amdgpu_info_gds gds_info;
582 
583 		memset(&gds_info, 0, sizeof(gds_info));
584 		gds_info.compute_partition_size = adev->gds.gds_size;
585 		gds_info.gds_total_size = adev->gds.gds_size;
586 		gds_info.gws_per_compute_partition = adev->gds.gws_size;
587 		gds_info.oa_per_compute_partition = adev->gds.oa_size;
588 		return copy_to_user(out, &gds_info,
589 				    min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
590 	}
591 	case AMDGPU_INFO_VRAM_GTT: {
592 		struct drm_amdgpu_info_vram_gtt vram_gtt;
593 
594 		vram_gtt.vram_size = adev->gmc.real_vram_size -
595 			atomic64_read(&adev->vram_pin_size) -
596 			AMDGPU_VM_RESERVED_VRAM;
597 		vram_gtt.vram_cpu_accessible_size =
598 			min(adev->gmc.visible_vram_size -
599 			    atomic64_read(&adev->visible_pin_size),
600 			    vram_gtt.vram_size);
601 		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
602 		vram_gtt.gtt_size *= PAGE_SIZE;
603 		vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
604 		return copy_to_user(out, &vram_gtt,
605 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
606 	}
607 	case AMDGPU_INFO_MEMORY: {
608 		struct drm_amdgpu_memory_info mem;
609 
610 		memset(&mem, 0, sizeof(mem));
611 		mem.vram.total_heap_size = adev->gmc.real_vram_size;
612 		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
613 			atomic64_read(&adev->vram_pin_size) -
614 			AMDGPU_VM_RESERVED_VRAM;
615 		mem.vram.heap_usage =
616 			amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
617 		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
618 
619 		mem.cpu_accessible_vram.total_heap_size =
620 			adev->gmc.visible_vram_size;
621 		mem.cpu_accessible_vram.usable_heap_size =
622 			min(adev->gmc.visible_vram_size -
623 			    atomic64_read(&adev->visible_pin_size),
624 			    mem.vram.usable_heap_size);
625 		mem.cpu_accessible_vram.heap_usage =
626 			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
627 		mem.cpu_accessible_vram.max_allocation =
628 			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
629 
630 		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
631 		mem.gtt.total_heap_size *= PAGE_SIZE;
632 		mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
633 			atomic64_read(&adev->gart_pin_size);
634 		mem.gtt.heap_usage =
635 			amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
636 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
637 
638 		return copy_to_user(out, &mem,
639 				    min((size_t)size, sizeof(mem)))
640 				    ? -EFAULT : 0;
641 	}
642 	case AMDGPU_INFO_READ_MMR_REG: {
643 		unsigned n, alloc_size;
644 		uint32_t *regs;
645 		unsigned se_num = (info->read_mmr_reg.instance >>
646 				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
647 				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
648 		unsigned sh_num = (info->read_mmr_reg.instance >>
649 				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
650 				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
651 
652 		/* set full masks if the userspace set all bits
653 		 * in the bitfields */
654 		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
655 			se_num = 0xffffffff;
656 		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
657 			sh_num = 0xffffffff;
658 
659 		if (info->read_mmr_reg.count > 128)
660 			return -EINVAL;
661 
662 		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
663 		if (!regs)
664 			return -ENOMEM;
665 		alloc_size = info->read_mmr_reg.count * sizeof(*regs);
666 
667 		for (i = 0; i < info->read_mmr_reg.count; i++)
668 			if (amdgpu_asic_read_register(adev, se_num, sh_num,
669 						      info->read_mmr_reg.dword_offset + i,
670 						      &regs[i])) {
671 				DRM_DEBUG_KMS("unallowed offset %#x\n",
672 					      info->read_mmr_reg.dword_offset + i);
673 				kfree(regs);
674 				return -EFAULT;
675 			}
676 		n = copy_to_user(out, regs, min(size, alloc_size));
677 		kfree(regs);
678 		return n ? -EFAULT : 0;
679 	}
680 	case AMDGPU_INFO_DEV_INFO: {
681 		struct drm_amdgpu_info_device dev_info = {};
682 		uint64_t vm_size;
683 
684 		dev_info.device_id = dev->pdev->device;
685 		dev_info.chip_rev = adev->rev_id;
686 		dev_info.external_rev = adev->external_rev_id;
687 		dev_info.pci_rev = dev->pdev->revision;
688 		dev_info.family = adev->family;
689 		dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
690 		dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
691 		/* return all clocks in KHz */
692 		dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
693 		if (adev->pm.dpm_enabled) {
694 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
695 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
696 		} else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
697 			   adev->virt.ops->get_pp_clk) {
698 			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
699 			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
700 		} else {
701 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
702 			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
703 		}
704 		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
705 		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
706 			adev->gfx.config.max_shader_engines;
707 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
708 		dev_info._pad = 0;
709 		dev_info.ids_flags = 0;
710 		if (adev->flags & AMD_IS_APU)
711 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
712 		if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
713 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
714 
715 		vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
716 		vm_size -= AMDGPU_VA_RESERVED_SIZE;
717 
718 		/* Older VCE FW versions are buggy and can handle only 40bits */
719 		if (adev->vce.fw_version &&
720 		    adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
721 			vm_size = min(vm_size, 1ULL << 40);
722 
723 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
724 		dev_info.virtual_address_max =
725 			min(vm_size, AMDGPU_GMC_HOLE_START);
726 
727 		if (vm_size > AMDGPU_GMC_HOLE_START) {
728 			dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
729 			dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
730 		}
731 		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
732 		dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
733 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
734 		dev_info.cu_active_number = adev->gfx.cu_info.number;
735 		dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
736 		dev_info.ce_ram_size = adev->gfx.ce_ram_size;
737 		memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
738 		       sizeof(adev->gfx.cu_info.ao_cu_bitmap));
739 		memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
740 		       sizeof(adev->gfx.cu_info.bitmap));
741 		dev_info.vram_type = adev->gmc.vram_type;
742 		dev_info.vram_bit_width = adev->gmc.vram_width;
743 		dev_info.vce_harvest_config = adev->vce.harvest_config;
744 		dev_info.gc_double_offchip_lds_buf =
745 			adev->gfx.config.double_offchip_lds_buf;
746 		dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
747 		dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
748 		dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
749 		dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
750 		dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
751 		dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
752 		dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
753 
754 		if (adev->family >= AMDGPU_FAMILY_NV)
755 			dev_info.pa_sc_tile_steering_override =
756 				adev->gfx.config.pa_sc_tile_steering_override;
757 
758 		dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
759 
760 		return copy_to_user(out, &dev_info,
761 				    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
762 	}
763 	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
764 		unsigned i;
765 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
766 		struct amd_vce_state *vce_state;
767 
768 		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
769 			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
770 			if (vce_state) {
771 				vce_clk_table.entries[i].sclk = vce_state->sclk;
772 				vce_clk_table.entries[i].mclk = vce_state->mclk;
773 				vce_clk_table.entries[i].eclk = vce_state->evclk;
774 				vce_clk_table.num_valid_entries++;
775 			}
776 		}
777 
778 		return copy_to_user(out, &vce_clk_table,
779 				    min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
780 	}
781 	case AMDGPU_INFO_VBIOS: {
782 		uint32_t bios_size = adev->bios_size;
783 
784 		switch (info->vbios_info.type) {
785 		case AMDGPU_INFO_VBIOS_SIZE:
786 			return copy_to_user(out, &bios_size,
787 					min((size_t)size, sizeof(bios_size)))
788 					? -EFAULT : 0;
789 		case AMDGPU_INFO_VBIOS_IMAGE: {
790 			uint8_t *bios;
791 			uint32_t bios_offset = info->vbios_info.offset;
792 
793 			if (bios_offset >= bios_size)
794 				return -EINVAL;
795 
796 			bios = adev->bios + bios_offset;
797 			return copy_to_user(out, bios,
798 					    min((size_t)size, (size_t)(bios_size - bios_offset)))
799 					? -EFAULT : 0;
800 		}
801 		default:
802 			DRM_DEBUG_KMS("Invalid request %d\n",
803 					info->vbios_info.type);
804 			return -EINVAL;
805 		}
806 	}
807 	case AMDGPU_INFO_NUM_HANDLES: {
808 		struct drm_amdgpu_info_num_handles handle;
809 
810 		switch (info->query_hw_ip.type) {
811 		case AMDGPU_HW_IP_UVD:
812 			/* Starting Polaris, we support unlimited UVD handles */
813 			if (adev->asic_type < CHIP_POLARIS10) {
814 				handle.uvd_max_handles = adev->uvd.max_handles;
815 				handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
816 
817 				return copy_to_user(out, &handle,
818 					min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
819 			} else {
820 				return -ENODATA;
821 			}
822 
823 			break;
824 		default:
825 			return -EINVAL;
826 		}
827 	}
828 	case AMDGPU_INFO_SENSOR: {
829 		if (!adev->pm.dpm_enabled)
830 			return -ENOENT;
831 
832 		switch (info->sensor_info.type) {
833 		case AMDGPU_INFO_SENSOR_GFX_SCLK:
834 			/* get sclk in Mhz */
835 			if (amdgpu_dpm_read_sensor(adev,
836 						   AMDGPU_PP_SENSOR_GFX_SCLK,
837 						   (void *)&ui32, &ui32_size)) {
838 				return -EINVAL;
839 			}
840 			ui32 /= 100;
841 			break;
842 		case AMDGPU_INFO_SENSOR_GFX_MCLK:
843 			/* get mclk in Mhz */
844 			if (amdgpu_dpm_read_sensor(adev,
845 						   AMDGPU_PP_SENSOR_GFX_MCLK,
846 						   (void *)&ui32, &ui32_size)) {
847 				return -EINVAL;
848 			}
849 			ui32 /= 100;
850 			break;
851 		case AMDGPU_INFO_SENSOR_GPU_TEMP:
852 			/* get temperature in millidegrees C */
853 			if (amdgpu_dpm_read_sensor(adev,
854 						   AMDGPU_PP_SENSOR_GPU_TEMP,
855 						   (void *)&ui32, &ui32_size)) {
856 				return -EINVAL;
857 			}
858 			break;
859 		case AMDGPU_INFO_SENSOR_GPU_LOAD:
860 			/* get GPU load */
861 			if (amdgpu_dpm_read_sensor(adev,
862 						   AMDGPU_PP_SENSOR_GPU_LOAD,
863 						   (void *)&ui32, &ui32_size)) {
864 				return -EINVAL;
865 			}
866 			break;
867 		case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
868 			/* get average GPU power */
869 			if (amdgpu_dpm_read_sensor(adev,
870 						   AMDGPU_PP_SENSOR_GPU_POWER,
871 						   (void *)&ui32, &ui32_size)) {
872 				return -EINVAL;
873 			}
874 			ui32 >>= 8;
875 			break;
876 		case AMDGPU_INFO_SENSOR_VDDNB:
877 			/* get VDDNB in millivolts */
878 			if (amdgpu_dpm_read_sensor(adev,
879 						   AMDGPU_PP_SENSOR_VDDNB,
880 						   (void *)&ui32, &ui32_size)) {
881 				return -EINVAL;
882 			}
883 			break;
884 		case AMDGPU_INFO_SENSOR_VDDGFX:
885 			/* get VDDGFX in millivolts */
886 			if (amdgpu_dpm_read_sensor(adev,
887 						   AMDGPU_PP_SENSOR_VDDGFX,
888 						   (void *)&ui32, &ui32_size)) {
889 				return -EINVAL;
890 			}
891 			break;
892 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
893 			/* get stable pstate sclk in Mhz */
894 			if (amdgpu_dpm_read_sensor(adev,
895 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
896 						   (void *)&ui32, &ui32_size)) {
897 				return -EINVAL;
898 			}
899 			ui32 /= 100;
900 			break;
901 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
902 			/* get stable pstate mclk in Mhz */
903 			if (amdgpu_dpm_read_sensor(adev,
904 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
905 						   (void *)&ui32, &ui32_size)) {
906 				return -EINVAL;
907 			}
908 			ui32 /= 100;
909 			break;
910 		default:
911 			DRM_DEBUG_KMS("Invalid request %d\n",
912 				      info->sensor_info.type);
913 			return -EINVAL;
914 		}
915 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
916 	}
917 	case AMDGPU_INFO_VRAM_LOST_COUNTER:
918 		ui32 = atomic_read(&adev->vram_lost_counter);
919 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
920 	case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
921 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
922 		uint64_t ras_mask;
923 
924 		if (!ras)
925 			return -EINVAL;
926 		ras_mask = (uint64_t)ras->supported << 32 | ras->features;
927 
928 		return copy_to_user(out, &ras_mask,
929 				min_t(u64, size, sizeof(ras_mask))) ?
930 			-EFAULT : 0;
931 	}
932 	default:
933 		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
934 		return -EINVAL;
935 	}
936 	return 0;
937 }
938 
939 
940 /*
941  * Outdated mess for old drm with Xorg being in charge (void function now).
942  */
943 /**
944  * amdgpu_driver_lastclose_kms - drm callback for last close
945  *
946  * @dev: drm dev pointer
947  *
948  * Switch vga_switcheroo state after last close (all asics).
949  */
950 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
951 {
952 	drm_fb_helper_lastclose(dev);
953 	vga_switcheroo_process_delayed_switch();
954 }
955 
956 /**
957  * amdgpu_driver_open_kms - drm callback for open
958  *
959  * @dev: drm dev pointer
960  * @file_priv: drm file
961  *
962  * On device open, init vm on cayman+ (all asics).
963  * Returns 0 on success, error on failure.
964  */
965 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
966 {
967 	struct amdgpu_device *adev = dev->dev_private;
968 	struct amdgpu_fpriv *fpriv;
969 	int r, pasid;
970 
971 	/* Ensure IB tests are run on ring */
972 	flush_delayed_work(&adev->delayed_init_work);
973 
974 
975 	if (amdgpu_ras_intr_triggered()) {
976 		DRM_ERROR("RAS Intr triggered, device disabled!!");
977 		return -EHWPOISON;
978 	}
979 
980 	file_priv->driver_priv = NULL;
981 
982 	r = pm_runtime_get_sync(dev->dev);
983 	if (r < 0)
984 		return r;
985 
986 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
987 	if (unlikely(!fpriv)) {
988 		r = -ENOMEM;
989 		goto out_suspend;
990 	}
991 
992 	pasid = amdgpu_pasid_alloc(16);
993 	if (pasid < 0) {
994 		dev_warn(adev->dev, "No more PASIDs available!");
995 		pasid = 0;
996 	}
997 	r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
998 	if (r)
999 		goto error_pasid;
1000 
1001 	fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1002 	if (!fpriv->prt_va) {
1003 		r = -ENOMEM;
1004 		goto error_vm;
1005 	}
1006 
1007 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1008 		uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1009 
1010 		r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1011 						&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1012 		if (r)
1013 			goto error_vm;
1014 	}
1015 
1016 	mutex_init(&fpriv->bo_list_lock);
1017 	idr_init(&fpriv->bo_list_handles);
1018 
1019 	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1020 
1021 	file_priv->driver_priv = fpriv;
1022 	goto out_suspend;
1023 
1024 error_vm:
1025 	amdgpu_vm_fini(adev, &fpriv->vm);
1026 
1027 error_pasid:
1028 	if (pasid)
1029 		amdgpu_pasid_free(pasid);
1030 
1031 	kfree(fpriv);
1032 
1033 out_suspend:
1034 	pm_runtime_mark_last_busy(dev->dev);
1035 	pm_runtime_put_autosuspend(dev->dev);
1036 
1037 	return r;
1038 }
1039 
1040 /**
1041  * amdgpu_driver_postclose_kms - drm callback for post close
1042  *
1043  * @dev: drm dev pointer
1044  * @file_priv: drm file
1045  *
1046  * On device post close, tear down vm on cayman+ (all asics).
1047  */
1048 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1049 				 struct drm_file *file_priv)
1050 {
1051 	struct amdgpu_device *adev = dev->dev_private;
1052 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1053 	struct amdgpu_bo_list *list;
1054 	struct amdgpu_bo *pd;
1055 	unsigned int pasid;
1056 	int handle;
1057 
1058 	if (!fpriv)
1059 		return;
1060 
1061 	pm_runtime_get_sync(dev->dev);
1062 
1063 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1064 		amdgpu_uvd_free_handles(adev, file_priv);
1065 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1066 		amdgpu_vce_free_handles(adev, file_priv);
1067 
1068 	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1069 
1070 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1071 		/* TODO: how to handle reserve failure */
1072 		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1073 		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1074 		fpriv->csa_va = NULL;
1075 		amdgpu_bo_unreserve(adev->virt.csa_obj);
1076 	}
1077 
1078 	pasid = fpriv->vm.pasid;
1079 	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1080 
1081 	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1082 	amdgpu_vm_fini(adev, &fpriv->vm);
1083 
1084 	if (pasid)
1085 		amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1086 	amdgpu_bo_unref(&pd);
1087 
1088 	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1089 		amdgpu_bo_list_put(list);
1090 
1091 	idr_destroy(&fpriv->bo_list_handles);
1092 	mutex_destroy(&fpriv->bo_list_lock);
1093 
1094 	kfree(fpriv);
1095 	file_priv->driver_priv = NULL;
1096 
1097 	pm_runtime_mark_last_busy(dev->dev);
1098 	pm_runtime_put_autosuspend(dev->dev);
1099 }
1100 
1101 /*
1102  * VBlank related functions.
1103  */
1104 /**
1105  * amdgpu_get_vblank_counter_kms - get frame count
1106  *
1107  * @dev: drm dev pointer
1108  * @pipe: crtc to get the frame count from
1109  *
1110  * Gets the frame count on the requested crtc (all asics).
1111  * Returns frame count on success, -EINVAL on failure.
1112  */
1113 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1114 {
1115 	struct amdgpu_device *adev = dev->dev_private;
1116 	int vpos, hpos, stat;
1117 	u32 count;
1118 
1119 	if (pipe >= adev->mode_info.num_crtc) {
1120 		DRM_ERROR("Invalid crtc %u\n", pipe);
1121 		return -EINVAL;
1122 	}
1123 
1124 	/* The hw increments its frame counter at start of vsync, not at start
1125 	 * of vblank, as is required by DRM core vblank counter handling.
1126 	 * Cook the hw count here to make it appear to the caller as if it
1127 	 * incremented at start of vblank. We measure distance to start of
1128 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1129 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1130 	 * result by 1 to give the proper appearance to caller.
1131 	 */
1132 	if (adev->mode_info.crtcs[pipe]) {
1133 		/* Repeat readout if needed to provide stable result if
1134 		 * we cross start of vsync during the queries.
1135 		 */
1136 		do {
1137 			count = amdgpu_display_vblank_get_counter(adev, pipe);
1138 			/* Ask amdgpu_display_get_crtc_scanoutpos to return
1139 			 * vpos as distance to start of vblank, instead of
1140 			 * regular vertical scanout pos.
1141 			 */
1142 			stat = amdgpu_display_get_crtc_scanoutpos(
1143 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1144 				&vpos, &hpos, NULL, NULL,
1145 				&adev->mode_info.crtcs[pipe]->base.hwmode);
1146 		} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1147 
1148 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1149 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1150 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1151 		} else {
1152 			DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1153 				      pipe, vpos);
1154 
1155 			/* Bump counter if we are at >= leading edge of vblank,
1156 			 * but before vsync where vpos would turn negative and
1157 			 * the hw counter really increments.
1158 			 */
1159 			if (vpos >= 0)
1160 				count++;
1161 		}
1162 	} else {
1163 		/* Fallback to use value as is. */
1164 		count = amdgpu_display_vblank_get_counter(adev, pipe);
1165 		DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1166 	}
1167 
1168 	return count;
1169 }
1170 
1171 /**
1172  * amdgpu_enable_vblank_kms - enable vblank interrupt
1173  *
1174  * @dev: drm dev pointer
1175  * @pipe: crtc to enable vblank interrupt for
1176  *
1177  * Enable the interrupt on the requested crtc (all asics).
1178  * Returns 0 on success, -EINVAL on failure.
1179  */
1180 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1181 {
1182 	struct amdgpu_device *adev = dev->dev_private;
1183 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1184 
1185 	return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1186 }
1187 
1188 /**
1189  * amdgpu_disable_vblank_kms - disable vblank interrupt
1190  *
1191  * @dev: drm dev pointer
1192  * @pipe: crtc to disable vblank interrupt for
1193  *
1194  * Disable the interrupt on the requested crtc (all asics).
1195  */
1196 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1197 {
1198 	struct amdgpu_device *adev = dev->dev_private;
1199 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1200 
1201 	amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1202 }
1203 
1204 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1205 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1206 	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1207 	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1208 	DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1209 	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1210 	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1211 	/* KMS */
1212 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1213 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1214 	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1215 	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1216 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1217 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1218 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1219 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1220 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1221 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1222 };
1223 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1224 
1225 /*
1226  * Debugfs info
1227  */
1228 #if defined(CONFIG_DEBUG_FS)
1229 
1230 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1231 {
1232 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1233 	struct drm_device *dev = node->minor->dev;
1234 	struct amdgpu_device *adev = dev->dev_private;
1235 	struct drm_amdgpu_info_firmware fw_info;
1236 	struct drm_amdgpu_query_fw query_fw;
1237 	struct atom_context *ctx = adev->mode_info.atom_context;
1238 	int ret, i;
1239 
1240 	/* VCE */
1241 	query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1242 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1243 	if (ret)
1244 		return ret;
1245 	seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1246 		   fw_info.feature, fw_info.ver);
1247 
1248 	/* UVD */
1249 	query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1250 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1251 	if (ret)
1252 		return ret;
1253 	seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1254 		   fw_info.feature, fw_info.ver);
1255 
1256 	/* GMC */
1257 	query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1258 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1259 	if (ret)
1260 		return ret;
1261 	seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1262 		   fw_info.feature, fw_info.ver);
1263 
1264 	/* ME */
1265 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1266 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1267 	if (ret)
1268 		return ret;
1269 	seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1270 		   fw_info.feature, fw_info.ver);
1271 
1272 	/* PFP */
1273 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1274 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1275 	if (ret)
1276 		return ret;
1277 	seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1278 		   fw_info.feature, fw_info.ver);
1279 
1280 	/* CE */
1281 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1282 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1283 	if (ret)
1284 		return ret;
1285 	seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1286 		   fw_info.feature, fw_info.ver);
1287 
1288 	/* RLC */
1289 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1290 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1291 	if (ret)
1292 		return ret;
1293 	seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1294 		   fw_info.feature, fw_info.ver);
1295 
1296 	/* RLC SAVE RESTORE LIST CNTL */
1297 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1298 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1299 	if (ret)
1300 		return ret;
1301 	seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1302 		   fw_info.feature, fw_info.ver);
1303 
1304 	/* RLC SAVE RESTORE LIST GPM MEM */
1305 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1306 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1307 	if (ret)
1308 		return ret;
1309 	seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1310 		   fw_info.feature, fw_info.ver);
1311 
1312 	/* RLC SAVE RESTORE LIST SRM MEM */
1313 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1314 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1315 	if (ret)
1316 		return ret;
1317 	seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1318 		   fw_info.feature, fw_info.ver);
1319 
1320 	/* MEC */
1321 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1322 	query_fw.index = 0;
1323 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1324 	if (ret)
1325 		return ret;
1326 	seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1327 		   fw_info.feature, fw_info.ver);
1328 
1329 	/* MEC2 */
1330 	if (adev->asic_type == CHIP_KAVERI ||
1331 	    (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1332 		query_fw.index = 1;
1333 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1334 		if (ret)
1335 			return ret;
1336 		seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1337 			   fw_info.feature, fw_info.ver);
1338 	}
1339 
1340 	/* PSP SOS */
1341 	query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1342 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1343 	if (ret)
1344 		return ret;
1345 	seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1346 		   fw_info.feature, fw_info.ver);
1347 
1348 
1349 	/* PSP ASD */
1350 	query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1351 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1352 	if (ret)
1353 		return ret;
1354 	seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1355 		   fw_info.feature, fw_info.ver);
1356 
1357 	query_fw.fw_type = AMDGPU_INFO_FW_TA;
1358 	for (i = 0; i < 2; i++) {
1359 		query_fw.index = i;
1360 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1361 		if (ret)
1362 			continue;
1363 		seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1364 				i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1365 	}
1366 
1367 	/* SMC */
1368 	query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1369 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1370 	if (ret)
1371 		return ret;
1372 	seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1373 		   fw_info.feature, fw_info.ver);
1374 
1375 	/* SDMA */
1376 	query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1377 	for (i = 0; i < adev->sdma.num_instances; i++) {
1378 		query_fw.index = i;
1379 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1380 		if (ret)
1381 			return ret;
1382 		seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1383 			   i, fw_info.feature, fw_info.ver);
1384 	}
1385 
1386 	/* VCN */
1387 	query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1388 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1389 	if (ret)
1390 		return ret;
1391 	seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1392 		   fw_info.feature, fw_info.ver);
1393 
1394 	/* DMCU */
1395 	query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1396 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1397 	if (ret)
1398 		return ret;
1399 	seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1400 		   fw_info.feature, fw_info.ver);
1401 
1402 	/* DMCUB */
1403 	query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1404 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1405 	if (ret)
1406 		return ret;
1407 	seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1408 		   fw_info.feature, fw_info.ver);
1409 
1410 
1411 	seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1412 
1413 	return 0;
1414 }
1415 
1416 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1417 	{"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1418 };
1419 #endif
1420 
1421 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1422 {
1423 #if defined(CONFIG_DEBUG_FS)
1424 	return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1425 					ARRAY_SIZE(amdgpu_firmware_info_list));
1426 #else
1427 	return 0;
1428 #endif
1429 }
1430