1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include "amdgpu.h"
30 #include <drm/drm_debugfs.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_sched.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "atom.h"
36 
37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h"
46 
47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
48 {
49 	struct amdgpu_gpu_instance *gpu_instance;
50 	int i;
51 
52 	mutex_lock(&mgpu_info.mutex);
53 
54 	for (i = 0; i < mgpu_info.num_gpu; i++) {
55 		gpu_instance = &(mgpu_info.gpu_ins[i]);
56 		if (gpu_instance->adev == adev) {
57 			mgpu_info.gpu_ins[i] =
58 				mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
59 			mgpu_info.num_gpu--;
60 			if (adev->flags & AMD_IS_APU)
61 				mgpu_info.num_apu--;
62 			else
63 				mgpu_info.num_dgpu--;
64 			break;
65 		}
66 	}
67 
68 	mutex_unlock(&mgpu_info.mutex);
69 }
70 
71 /**
72  * amdgpu_driver_unload_kms - Main unload function for KMS.
73  *
74  * @dev: drm dev pointer
75  *
76  * This is the main unload function for KMS (all asics).
77  * Returns 0 on success.
78  */
79 void amdgpu_driver_unload_kms(struct drm_device *dev)
80 {
81 	struct amdgpu_device *adev = dev->dev_private;
82 
83 	if (adev == NULL)
84 		return;
85 
86 	amdgpu_unregister_gpu_instance(adev);
87 
88 	if (adev->rmmio == NULL)
89 		goto done_free;
90 
91 	if (amdgpu_sriov_vf(adev))
92 		amdgpu_virt_request_full_gpu(adev, false);
93 
94 	if (amdgpu_device_is_px(dev)) {
95 		pm_runtime_get_sync(dev->dev);
96 		pm_runtime_forbid(dev->dev);
97 	}
98 
99 	amdgpu_acpi_fini(adev);
100 
101 	amdgpu_device_fini(adev);
102 
103 done_free:
104 	kfree(adev);
105 	dev->dev_private = NULL;
106 }
107 
108 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
109 {
110 	struct amdgpu_gpu_instance *gpu_instance;
111 
112 	mutex_lock(&mgpu_info.mutex);
113 
114 	if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
115 		DRM_ERROR("Cannot register more gpu instance\n");
116 		mutex_unlock(&mgpu_info.mutex);
117 		return;
118 	}
119 
120 	gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
121 	gpu_instance->adev = adev;
122 	gpu_instance->mgpu_fan_enabled = 0;
123 
124 	mgpu_info.num_gpu++;
125 	if (adev->flags & AMD_IS_APU)
126 		mgpu_info.num_apu++;
127 	else
128 		mgpu_info.num_dgpu++;
129 
130 	mutex_unlock(&mgpu_info.mutex);
131 }
132 
133 /**
134  * amdgpu_driver_load_kms - Main load function for KMS.
135  *
136  * @dev: drm dev pointer
137  * @flags: device flags
138  *
139  * This is the main load function for KMS (all asics).
140  * Returns 0 on success, error on failure.
141  */
142 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
143 {
144 	struct amdgpu_device *adev;
145 	int r, acpi_status;
146 
147 #ifdef CONFIG_DRM_AMDGPU_SI
148 	if (!amdgpu_si_support) {
149 		switch (flags & AMD_ASIC_MASK) {
150 		case CHIP_TAHITI:
151 		case CHIP_PITCAIRN:
152 		case CHIP_VERDE:
153 		case CHIP_OLAND:
154 		case CHIP_HAINAN:
155 			dev_info(dev->dev,
156 				 "SI support provided by radeon.\n");
157 			dev_info(dev->dev,
158 				 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
159 				);
160 			return -ENODEV;
161 		}
162 	}
163 #endif
164 #ifdef CONFIG_DRM_AMDGPU_CIK
165 	if (!amdgpu_cik_support) {
166 		switch (flags & AMD_ASIC_MASK) {
167 		case CHIP_KAVERI:
168 		case CHIP_BONAIRE:
169 		case CHIP_HAWAII:
170 		case CHIP_KABINI:
171 		case CHIP_MULLINS:
172 			dev_info(dev->dev,
173 				 "CIK support provided by radeon.\n");
174 			dev_info(dev->dev,
175 				 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
176 				);
177 			return -ENODEV;
178 		}
179 	}
180 #endif
181 
182 	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
183 	if (adev == NULL) {
184 		return -ENOMEM;
185 	}
186 	dev->dev_private = (void *)adev;
187 
188 	if ((amdgpu_runtime_pm != 0) &&
189 	    amdgpu_has_atpx() &&
190 	    (amdgpu_is_atpx_hybrid() ||
191 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
192 	    ((flags & AMD_IS_APU) == 0) &&
193 	    !pci_is_thunderbolt_attached(dev->pdev))
194 		flags |= AMD_IS_PX;
195 
196 	/* amdgpu_device_init should report only fatal error
197 	 * like memory allocation failure or iomapping failure,
198 	 * or memory manager initialization failure, it must
199 	 * properly initialize the GPU MC controller and permit
200 	 * VRAM allocation
201 	 */
202 	r = amdgpu_device_init(adev, dev, dev->pdev, flags);
203 	if (r) {
204 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
205 		goto out;
206 	}
207 
208 	/* Call ACPI methods: require modeset init
209 	 * but failure is not fatal
210 	 */
211 	if (!r) {
212 		acpi_status = amdgpu_acpi_init(adev);
213 		if (acpi_status)
214 			dev_dbg(&dev->pdev->dev,
215 				"Error during ACPI methods call\n");
216 	}
217 
218 	if (amdgpu_device_is_px(dev)) {
219 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
220 		pm_runtime_use_autosuspend(dev->dev);
221 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
222 		pm_runtime_set_active(dev->dev);
223 		pm_runtime_allow(dev->dev);
224 		pm_runtime_mark_last_busy(dev->dev);
225 		pm_runtime_put_autosuspend(dev->dev);
226 	}
227 
228 	amdgpu_register_gpu_instance(adev);
229 out:
230 	if (r) {
231 		/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
232 		if (adev->rmmio && amdgpu_device_is_px(dev))
233 			pm_runtime_put_noidle(dev->dev);
234 		amdgpu_driver_unload_kms(dev);
235 	}
236 
237 	return r;
238 }
239 
240 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
241 				struct drm_amdgpu_query_fw *query_fw,
242 				struct amdgpu_device *adev)
243 {
244 	switch (query_fw->fw_type) {
245 	case AMDGPU_INFO_FW_VCE:
246 		fw_info->ver = adev->vce.fw_version;
247 		fw_info->feature = adev->vce.fb_version;
248 		break;
249 	case AMDGPU_INFO_FW_UVD:
250 		fw_info->ver = adev->uvd.fw_version;
251 		fw_info->feature = 0;
252 		break;
253 	case AMDGPU_INFO_FW_VCN:
254 		fw_info->ver = adev->vcn.fw_version;
255 		fw_info->feature = 0;
256 		break;
257 	case AMDGPU_INFO_FW_GMC:
258 		fw_info->ver = adev->gmc.fw_version;
259 		fw_info->feature = 0;
260 		break;
261 	case AMDGPU_INFO_FW_GFX_ME:
262 		fw_info->ver = adev->gfx.me_fw_version;
263 		fw_info->feature = adev->gfx.me_feature_version;
264 		break;
265 	case AMDGPU_INFO_FW_GFX_PFP:
266 		fw_info->ver = adev->gfx.pfp_fw_version;
267 		fw_info->feature = adev->gfx.pfp_feature_version;
268 		break;
269 	case AMDGPU_INFO_FW_GFX_CE:
270 		fw_info->ver = adev->gfx.ce_fw_version;
271 		fw_info->feature = adev->gfx.ce_feature_version;
272 		break;
273 	case AMDGPU_INFO_FW_GFX_RLC:
274 		fw_info->ver = adev->gfx.rlc_fw_version;
275 		fw_info->feature = adev->gfx.rlc_feature_version;
276 		break;
277 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
278 		fw_info->ver = adev->gfx.rlc_srlc_fw_version;
279 		fw_info->feature = adev->gfx.rlc_srlc_feature_version;
280 		break;
281 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
282 		fw_info->ver = adev->gfx.rlc_srlg_fw_version;
283 		fw_info->feature = adev->gfx.rlc_srlg_feature_version;
284 		break;
285 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
286 		fw_info->ver = adev->gfx.rlc_srls_fw_version;
287 		fw_info->feature = adev->gfx.rlc_srls_feature_version;
288 		break;
289 	case AMDGPU_INFO_FW_GFX_MEC:
290 		if (query_fw->index == 0) {
291 			fw_info->ver = adev->gfx.mec_fw_version;
292 			fw_info->feature = adev->gfx.mec_feature_version;
293 		} else if (query_fw->index == 1) {
294 			fw_info->ver = adev->gfx.mec2_fw_version;
295 			fw_info->feature = adev->gfx.mec2_feature_version;
296 		} else
297 			return -EINVAL;
298 		break;
299 	case AMDGPU_INFO_FW_SMC:
300 		fw_info->ver = adev->pm.fw_version;
301 		fw_info->feature = 0;
302 		break;
303 	case AMDGPU_INFO_FW_TA:
304 		if (query_fw->index > 1)
305 			return -EINVAL;
306 		if (query_fw->index == 0) {
307 			fw_info->ver = adev->psp.ta_fw_version;
308 			fw_info->feature = adev->psp.ta_xgmi_ucode_version;
309 		} else {
310 			fw_info->ver = adev->psp.ta_fw_version;
311 			fw_info->feature = adev->psp.ta_ras_ucode_version;
312 		}
313 		break;
314 	case AMDGPU_INFO_FW_SDMA:
315 		if (query_fw->index >= adev->sdma.num_instances)
316 			return -EINVAL;
317 		fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
318 		fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
319 		break;
320 	case AMDGPU_INFO_FW_SOS:
321 		fw_info->ver = adev->psp.sos_fw_version;
322 		fw_info->feature = adev->psp.sos_feature_version;
323 		break;
324 	case AMDGPU_INFO_FW_ASD:
325 		fw_info->ver = adev->psp.asd_fw_version;
326 		fw_info->feature = adev->psp.asd_feature_version;
327 		break;
328 	case AMDGPU_INFO_FW_DMCU:
329 		fw_info->ver = adev->dm.dmcu_fw_version;
330 		fw_info->feature = 0;
331 		break;
332 	default:
333 		return -EINVAL;
334 	}
335 	return 0;
336 }
337 
338 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
339 			     struct drm_amdgpu_info *info,
340 			     struct drm_amdgpu_info_hw_ip *result)
341 {
342 	uint32_t ib_start_alignment = 0;
343 	uint32_t ib_size_alignment = 0;
344 	enum amd_ip_block_type type;
345 	unsigned int num_rings = 0;
346 	unsigned int i, j;
347 
348 	if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
349 		return -EINVAL;
350 
351 	switch (info->query_hw_ip.type) {
352 	case AMDGPU_HW_IP_GFX:
353 		type = AMD_IP_BLOCK_TYPE_GFX;
354 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
355 			if (adev->gfx.gfx_ring[i].sched.ready)
356 				++num_rings;
357 		ib_start_alignment = 32;
358 		ib_size_alignment = 32;
359 		break;
360 	case AMDGPU_HW_IP_COMPUTE:
361 		type = AMD_IP_BLOCK_TYPE_GFX;
362 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
363 			if (adev->gfx.compute_ring[i].sched.ready)
364 				++num_rings;
365 		ib_start_alignment = 32;
366 		ib_size_alignment = 32;
367 		break;
368 	case AMDGPU_HW_IP_DMA:
369 		type = AMD_IP_BLOCK_TYPE_SDMA;
370 		for (i = 0; i < adev->sdma.num_instances; i++)
371 			if (adev->sdma.instance[i].ring.sched.ready)
372 				++num_rings;
373 		ib_start_alignment = 256;
374 		ib_size_alignment = 4;
375 		break;
376 	case AMDGPU_HW_IP_UVD:
377 		type = AMD_IP_BLOCK_TYPE_UVD;
378 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
379 			if (adev->uvd.harvest_config & (1 << i))
380 				continue;
381 
382 			if (adev->uvd.inst[i].ring.sched.ready)
383 				++num_rings;
384 		}
385 		ib_start_alignment = 64;
386 		ib_size_alignment = 64;
387 		break;
388 	case AMDGPU_HW_IP_VCE:
389 		type = AMD_IP_BLOCK_TYPE_VCE;
390 		for (i = 0; i < adev->vce.num_rings; i++)
391 			if (adev->vce.ring[i].sched.ready)
392 				++num_rings;
393 		ib_start_alignment = 4;
394 		ib_size_alignment = 1;
395 		break;
396 	case AMDGPU_HW_IP_UVD_ENC:
397 		type = AMD_IP_BLOCK_TYPE_UVD;
398 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
399 			if (adev->uvd.harvest_config & (1 << i))
400 				continue;
401 
402 			for (j = 0; j < adev->uvd.num_enc_rings; j++)
403 				if (adev->uvd.inst[i].ring_enc[j].sched.ready)
404 					++num_rings;
405 		}
406 		ib_start_alignment = 64;
407 		ib_size_alignment = 64;
408 		break;
409 	case AMDGPU_HW_IP_VCN_DEC:
410 		type = AMD_IP_BLOCK_TYPE_VCN;
411 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
412 			if (adev->uvd.harvest_config & (1 << i))
413 				continue;
414 
415 			if (adev->vcn.inst[i].ring_dec.sched.ready)
416 				++num_rings;
417 		}
418 		ib_start_alignment = 16;
419 		ib_size_alignment = 16;
420 		break;
421 	case AMDGPU_HW_IP_VCN_ENC:
422 		type = AMD_IP_BLOCK_TYPE_VCN;
423 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
424 			if (adev->uvd.harvest_config & (1 << i))
425 				continue;
426 
427 			for (j = 0; j < adev->vcn.num_enc_rings; j++)
428 				if (adev->vcn.inst[i].ring_enc[j].sched.ready)
429 					++num_rings;
430 		}
431 		ib_start_alignment = 64;
432 		ib_size_alignment = 1;
433 		break;
434 	case AMDGPU_HW_IP_VCN_JPEG:
435 		type = AMD_IP_BLOCK_TYPE_VCN;
436 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
437 			if (adev->uvd.harvest_config & (1 << i))
438 				continue;
439 
440 			if (adev->vcn.inst[i].ring_jpeg.sched.ready)
441 				++num_rings;
442 		}
443 		ib_start_alignment = 16;
444 		ib_size_alignment = 16;
445 		break;
446 	default:
447 		return -EINVAL;
448 	}
449 
450 	for (i = 0; i < adev->num_ip_blocks; i++)
451 		if (adev->ip_blocks[i].version->type == type &&
452 		    adev->ip_blocks[i].status.valid)
453 			break;
454 
455 	if (i == adev->num_ip_blocks)
456 		return 0;
457 
458 	num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
459 			num_rings);
460 
461 	result->hw_ip_version_major = adev->ip_blocks[i].version->major;
462 	result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
463 	result->capabilities_flags = 0;
464 	result->available_rings = (1 << num_rings) - 1;
465 	result->ib_start_alignment = ib_start_alignment;
466 	result->ib_size_alignment = ib_size_alignment;
467 	return 0;
468 }
469 
470 /*
471  * Userspace get information ioctl
472  */
473 /**
474  * amdgpu_info_ioctl - answer a device specific request.
475  *
476  * @adev: amdgpu device pointer
477  * @data: request object
478  * @filp: drm filp
479  *
480  * This function is used to pass device specific parameters to the userspace
481  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
482  * etc. (all asics).
483  * Returns 0 on success, -EINVAL on failure.
484  */
485 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
486 {
487 	struct amdgpu_device *adev = dev->dev_private;
488 	struct drm_amdgpu_info *info = data;
489 	struct amdgpu_mode_info *minfo = &adev->mode_info;
490 	void __user *out = (void __user *)(uintptr_t)info->return_pointer;
491 	uint32_t size = info->return_size;
492 	struct drm_crtc *crtc;
493 	uint32_t ui32 = 0;
494 	uint64_t ui64 = 0;
495 	int i, found;
496 	int ui32_size = sizeof(ui32);
497 
498 	if (!info->return_size || !info->return_pointer)
499 		return -EINVAL;
500 
501 	switch (info->query) {
502 	case AMDGPU_INFO_ACCEL_WORKING:
503 		ui32 = adev->accel_working;
504 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
505 	case AMDGPU_INFO_CRTC_FROM_ID:
506 		for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
507 			crtc = (struct drm_crtc *)minfo->crtcs[i];
508 			if (crtc && crtc->base.id == info->mode_crtc.id) {
509 				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
510 				ui32 = amdgpu_crtc->crtc_id;
511 				found = 1;
512 				break;
513 			}
514 		}
515 		if (!found) {
516 			DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
517 			return -EINVAL;
518 		}
519 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
520 	case AMDGPU_INFO_HW_IP_INFO: {
521 		struct drm_amdgpu_info_hw_ip ip = {};
522 		int ret;
523 
524 		ret = amdgpu_hw_ip_info(adev, info, &ip);
525 		if (ret)
526 			return ret;
527 
528 		ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
529 		return ret ? -EFAULT : 0;
530 	}
531 	case AMDGPU_INFO_HW_IP_COUNT: {
532 		enum amd_ip_block_type type;
533 		uint32_t count = 0;
534 
535 		switch (info->query_hw_ip.type) {
536 		case AMDGPU_HW_IP_GFX:
537 			type = AMD_IP_BLOCK_TYPE_GFX;
538 			break;
539 		case AMDGPU_HW_IP_COMPUTE:
540 			type = AMD_IP_BLOCK_TYPE_GFX;
541 			break;
542 		case AMDGPU_HW_IP_DMA:
543 			type = AMD_IP_BLOCK_TYPE_SDMA;
544 			break;
545 		case AMDGPU_HW_IP_UVD:
546 			type = AMD_IP_BLOCK_TYPE_UVD;
547 			break;
548 		case AMDGPU_HW_IP_VCE:
549 			type = AMD_IP_BLOCK_TYPE_VCE;
550 			break;
551 		case AMDGPU_HW_IP_UVD_ENC:
552 			type = AMD_IP_BLOCK_TYPE_UVD;
553 			break;
554 		case AMDGPU_HW_IP_VCN_DEC:
555 		case AMDGPU_HW_IP_VCN_ENC:
556 		case AMDGPU_HW_IP_VCN_JPEG:
557 			type = AMD_IP_BLOCK_TYPE_VCN;
558 			break;
559 		default:
560 			return -EINVAL;
561 		}
562 
563 		for (i = 0; i < adev->num_ip_blocks; i++)
564 			if (adev->ip_blocks[i].version->type == type &&
565 			    adev->ip_blocks[i].status.valid &&
566 			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
567 				count++;
568 
569 		return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
570 	}
571 	case AMDGPU_INFO_TIMESTAMP:
572 		ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
573 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
574 	case AMDGPU_INFO_FW_VERSION: {
575 		struct drm_amdgpu_info_firmware fw_info;
576 		int ret;
577 
578 		/* We only support one instance of each IP block right now. */
579 		if (info->query_fw.ip_instance != 0)
580 			return -EINVAL;
581 
582 		ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
583 		if (ret)
584 			return ret;
585 
586 		return copy_to_user(out, &fw_info,
587 				    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
588 	}
589 	case AMDGPU_INFO_NUM_BYTES_MOVED:
590 		ui64 = atomic64_read(&adev->num_bytes_moved);
591 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
592 	case AMDGPU_INFO_NUM_EVICTIONS:
593 		ui64 = atomic64_read(&adev->num_evictions);
594 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
595 	case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
596 		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
597 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
598 	case AMDGPU_INFO_VRAM_USAGE:
599 		ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
600 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
601 	case AMDGPU_INFO_VIS_VRAM_USAGE:
602 		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
603 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
604 	case AMDGPU_INFO_GTT_USAGE:
605 		ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
606 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
607 	case AMDGPU_INFO_GDS_CONFIG: {
608 		struct drm_amdgpu_info_gds gds_info;
609 
610 		memset(&gds_info, 0, sizeof(gds_info));
611 		gds_info.compute_partition_size = adev->gds.gds_size;
612 		gds_info.gds_total_size = adev->gds.gds_size;
613 		gds_info.gws_per_compute_partition = adev->gds.gws_size;
614 		gds_info.oa_per_compute_partition = adev->gds.oa_size;
615 		return copy_to_user(out, &gds_info,
616 				    min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
617 	}
618 	case AMDGPU_INFO_VRAM_GTT: {
619 		struct drm_amdgpu_info_vram_gtt vram_gtt;
620 
621 		vram_gtt.vram_size = adev->gmc.real_vram_size -
622 			atomic64_read(&adev->vram_pin_size);
623 		vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
624 			atomic64_read(&adev->visible_pin_size);
625 		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
626 		vram_gtt.gtt_size *= PAGE_SIZE;
627 		vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
628 		return copy_to_user(out, &vram_gtt,
629 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
630 	}
631 	case AMDGPU_INFO_MEMORY: {
632 		struct drm_amdgpu_memory_info mem;
633 
634 		memset(&mem, 0, sizeof(mem));
635 		mem.vram.total_heap_size = adev->gmc.real_vram_size;
636 		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
637 			atomic64_read(&adev->vram_pin_size);
638 		mem.vram.heap_usage =
639 			amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
640 		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
641 
642 		mem.cpu_accessible_vram.total_heap_size =
643 			adev->gmc.visible_vram_size;
644 		mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
645 			atomic64_read(&adev->visible_pin_size);
646 		mem.cpu_accessible_vram.heap_usage =
647 			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
648 		mem.cpu_accessible_vram.max_allocation =
649 			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
650 
651 		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
652 		mem.gtt.total_heap_size *= PAGE_SIZE;
653 		mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
654 			atomic64_read(&adev->gart_pin_size);
655 		mem.gtt.heap_usage =
656 			amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
657 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
658 
659 		return copy_to_user(out, &mem,
660 				    min((size_t)size, sizeof(mem)))
661 				    ? -EFAULT : 0;
662 	}
663 	case AMDGPU_INFO_READ_MMR_REG: {
664 		unsigned n, alloc_size;
665 		uint32_t *regs;
666 		unsigned se_num = (info->read_mmr_reg.instance >>
667 				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
668 				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
669 		unsigned sh_num = (info->read_mmr_reg.instance >>
670 				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
671 				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
672 
673 		/* set full masks if the userspace set all bits
674 		 * in the bitfields */
675 		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
676 			se_num = 0xffffffff;
677 		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
678 			sh_num = 0xffffffff;
679 
680 		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
681 		if (!regs)
682 			return -ENOMEM;
683 		alloc_size = info->read_mmr_reg.count * sizeof(*regs);
684 
685 		for (i = 0; i < info->read_mmr_reg.count; i++)
686 			if (amdgpu_asic_read_register(adev, se_num, sh_num,
687 						      info->read_mmr_reg.dword_offset + i,
688 						      &regs[i])) {
689 				DRM_DEBUG_KMS("unallowed offset %#x\n",
690 					      info->read_mmr_reg.dword_offset + i);
691 				kfree(regs);
692 				return -EFAULT;
693 			}
694 		n = copy_to_user(out, regs, min(size, alloc_size));
695 		kfree(regs);
696 		return n ? -EFAULT : 0;
697 	}
698 	case AMDGPU_INFO_DEV_INFO: {
699 		struct drm_amdgpu_info_device dev_info = {};
700 		uint64_t vm_size;
701 
702 		dev_info.device_id = dev->pdev->device;
703 		dev_info.chip_rev = adev->rev_id;
704 		dev_info.external_rev = adev->external_rev_id;
705 		dev_info.pci_rev = dev->pdev->revision;
706 		dev_info.family = adev->family;
707 		dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
708 		dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
709 		/* return all clocks in KHz */
710 		dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
711 		if (adev->pm.dpm_enabled) {
712 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
713 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
714 		} else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
715 			   adev->virt.ops->get_pp_clk) {
716 			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
717 			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
718 		} else {
719 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
720 			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
721 		}
722 		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
723 		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
724 			adev->gfx.config.max_shader_engines;
725 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
726 		dev_info._pad = 0;
727 		dev_info.ids_flags = 0;
728 		if (adev->flags & AMD_IS_APU)
729 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
730 		if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
731 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
732 
733 		vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
734 		vm_size -= AMDGPU_VA_RESERVED_SIZE;
735 
736 		/* Older VCE FW versions are buggy and can handle only 40bits */
737 		if (adev->vce.fw_version &&
738 		    adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
739 			vm_size = min(vm_size, 1ULL << 40);
740 
741 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
742 		dev_info.virtual_address_max =
743 			min(vm_size, AMDGPU_GMC_HOLE_START);
744 
745 		if (vm_size > AMDGPU_GMC_HOLE_START) {
746 			dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
747 			dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
748 		}
749 		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
750 		dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
751 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
752 		dev_info.cu_active_number = adev->gfx.cu_info.number;
753 		dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
754 		dev_info.ce_ram_size = adev->gfx.ce_ram_size;
755 		memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
756 		       sizeof(adev->gfx.cu_info.ao_cu_bitmap));
757 		memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
758 		       sizeof(adev->gfx.cu_info.bitmap));
759 		dev_info.vram_type = adev->gmc.vram_type;
760 		dev_info.vram_bit_width = adev->gmc.vram_width;
761 		dev_info.vce_harvest_config = adev->vce.harvest_config;
762 		dev_info.gc_double_offchip_lds_buf =
763 			adev->gfx.config.double_offchip_lds_buf;
764 
765 		if (amdgpu_ngg) {
766 			dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
767 			dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
768 			dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
769 			dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
770 			dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
771 			dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
772 			dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
773 			dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
774 		}
775 		dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
776 		dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
777 		dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
778 		dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
779 		dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
780 		dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
781 		dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
782 
783 		if (adev->family >= AMDGPU_FAMILY_NV)
784 			dev_info.pa_sc_tile_steering_override =
785 				adev->gfx.config.pa_sc_tile_steering_override;
786 
787 		return copy_to_user(out, &dev_info,
788 				    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
789 	}
790 	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
791 		unsigned i;
792 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
793 		struct amd_vce_state *vce_state;
794 
795 		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
796 			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
797 			if (vce_state) {
798 				vce_clk_table.entries[i].sclk = vce_state->sclk;
799 				vce_clk_table.entries[i].mclk = vce_state->mclk;
800 				vce_clk_table.entries[i].eclk = vce_state->evclk;
801 				vce_clk_table.num_valid_entries++;
802 			}
803 		}
804 
805 		return copy_to_user(out, &vce_clk_table,
806 				    min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
807 	}
808 	case AMDGPU_INFO_VBIOS: {
809 		uint32_t bios_size = adev->bios_size;
810 
811 		switch (info->vbios_info.type) {
812 		case AMDGPU_INFO_VBIOS_SIZE:
813 			return copy_to_user(out, &bios_size,
814 					min((size_t)size, sizeof(bios_size)))
815 					? -EFAULT : 0;
816 		case AMDGPU_INFO_VBIOS_IMAGE: {
817 			uint8_t *bios;
818 			uint32_t bios_offset = info->vbios_info.offset;
819 
820 			if (bios_offset >= bios_size)
821 				return -EINVAL;
822 
823 			bios = adev->bios + bios_offset;
824 			return copy_to_user(out, bios,
825 					    min((size_t)size, (size_t)(bios_size - bios_offset)))
826 					? -EFAULT : 0;
827 		}
828 		default:
829 			DRM_DEBUG_KMS("Invalid request %d\n",
830 					info->vbios_info.type);
831 			return -EINVAL;
832 		}
833 	}
834 	case AMDGPU_INFO_NUM_HANDLES: {
835 		struct drm_amdgpu_info_num_handles handle;
836 
837 		switch (info->query_hw_ip.type) {
838 		case AMDGPU_HW_IP_UVD:
839 			/* Starting Polaris, we support unlimited UVD handles */
840 			if (adev->asic_type < CHIP_POLARIS10) {
841 				handle.uvd_max_handles = adev->uvd.max_handles;
842 				handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
843 
844 				return copy_to_user(out, &handle,
845 					min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
846 			} else {
847 				return -ENODATA;
848 			}
849 
850 			break;
851 		default:
852 			return -EINVAL;
853 		}
854 	}
855 	case AMDGPU_INFO_SENSOR: {
856 		if (!adev->pm.dpm_enabled)
857 			return -ENOENT;
858 
859 		switch (info->sensor_info.type) {
860 		case AMDGPU_INFO_SENSOR_GFX_SCLK:
861 			/* get sclk in Mhz */
862 			if (amdgpu_dpm_read_sensor(adev,
863 						   AMDGPU_PP_SENSOR_GFX_SCLK,
864 						   (void *)&ui32, &ui32_size)) {
865 				return -EINVAL;
866 			}
867 			ui32 /= 100;
868 			break;
869 		case AMDGPU_INFO_SENSOR_GFX_MCLK:
870 			/* get mclk in Mhz */
871 			if (amdgpu_dpm_read_sensor(adev,
872 						   AMDGPU_PP_SENSOR_GFX_MCLK,
873 						   (void *)&ui32, &ui32_size)) {
874 				return -EINVAL;
875 			}
876 			ui32 /= 100;
877 			break;
878 		case AMDGPU_INFO_SENSOR_GPU_TEMP:
879 			/* get temperature in millidegrees C */
880 			if (amdgpu_dpm_read_sensor(adev,
881 						   AMDGPU_PP_SENSOR_GPU_TEMP,
882 						   (void *)&ui32, &ui32_size)) {
883 				return -EINVAL;
884 			}
885 			break;
886 		case AMDGPU_INFO_SENSOR_GPU_LOAD:
887 			/* get GPU load */
888 			if (amdgpu_dpm_read_sensor(adev,
889 						   AMDGPU_PP_SENSOR_GPU_LOAD,
890 						   (void *)&ui32, &ui32_size)) {
891 				return -EINVAL;
892 			}
893 			break;
894 		case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
895 			/* get average GPU power */
896 			if (amdgpu_dpm_read_sensor(adev,
897 						   AMDGPU_PP_SENSOR_GPU_POWER,
898 						   (void *)&ui32, &ui32_size)) {
899 				return -EINVAL;
900 			}
901 			ui32 >>= 8;
902 			break;
903 		case AMDGPU_INFO_SENSOR_VDDNB:
904 			/* get VDDNB in millivolts */
905 			if (amdgpu_dpm_read_sensor(adev,
906 						   AMDGPU_PP_SENSOR_VDDNB,
907 						   (void *)&ui32, &ui32_size)) {
908 				return -EINVAL;
909 			}
910 			break;
911 		case AMDGPU_INFO_SENSOR_VDDGFX:
912 			/* get VDDGFX in millivolts */
913 			if (amdgpu_dpm_read_sensor(adev,
914 						   AMDGPU_PP_SENSOR_VDDGFX,
915 						   (void *)&ui32, &ui32_size)) {
916 				return -EINVAL;
917 			}
918 			break;
919 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
920 			/* get stable pstate sclk in Mhz */
921 			if (amdgpu_dpm_read_sensor(adev,
922 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
923 						   (void *)&ui32, &ui32_size)) {
924 				return -EINVAL;
925 			}
926 			ui32 /= 100;
927 			break;
928 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
929 			/* get stable pstate mclk in Mhz */
930 			if (amdgpu_dpm_read_sensor(adev,
931 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
932 						   (void *)&ui32, &ui32_size)) {
933 				return -EINVAL;
934 			}
935 			ui32 /= 100;
936 			break;
937 		default:
938 			DRM_DEBUG_KMS("Invalid request %d\n",
939 				      info->sensor_info.type);
940 			return -EINVAL;
941 		}
942 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
943 	}
944 	case AMDGPU_INFO_VRAM_LOST_COUNTER:
945 		ui32 = atomic_read(&adev->vram_lost_counter);
946 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
947 	case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
948 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
949 		uint64_t ras_mask;
950 
951 		if (!ras)
952 			return -EINVAL;
953 		ras_mask = (uint64_t)ras->supported << 32 | ras->features;
954 
955 		return copy_to_user(out, &ras_mask,
956 				min_t(u64, size, sizeof(ras_mask))) ?
957 			-EFAULT : 0;
958 	}
959 	default:
960 		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
961 		return -EINVAL;
962 	}
963 	return 0;
964 }
965 
966 
967 /*
968  * Outdated mess for old drm with Xorg being in charge (void function now).
969  */
970 /**
971  * amdgpu_driver_lastclose_kms - drm callback for last close
972  *
973  * @dev: drm dev pointer
974  *
975  * Switch vga_switcheroo state after last close (all asics).
976  */
977 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
978 {
979 	drm_fb_helper_lastclose(dev);
980 	vga_switcheroo_process_delayed_switch();
981 }
982 
983 /**
984  * amdgpu_driver_open_kms - drm callback for open
985  *
986  * @dev: drm dev pointer
987  * @file_priv: drm file
988  *
989  * On device open, init vm on cayman+ (all asics).
990  * Returns 0 on success, error on failure.
991  */
992 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
993 {
994 	struct amdgpu_device *adev = dev->dev_private;
995 	struct amdgpu_fpriv *fpriv;
996 	int r, pasid;
997 
998 	/* Ensure IB tests are run on ring */
999 	flush_delayed_work(&adev->delayed_init_work);
1000 
1001 	file_priv->driver_priv = NULL;
1002 
1003 	r = pm_runtime_get_sync(dev->dev);
1004 	if (r < 0)
1005 		return r;
1006 
1007 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1008 	if (unlikely(!fpriv)) {
1009 		r = -ENOMEM;
1010 		goto out_suspend;
1011 	}
1012 
1013 	pasid = amdgpu_pasid_alloc(16);
1014 	if (pasid < 0) {
1015 		dev_warn(adev->dev, "No more PASIDs available!");
1016 		pasid = 0;
1017 	}
1018 	r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
1019 	if (r)
1020 		goto error_pasid;
1021 
1022 	fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1023 	if (!fpriv->prt_va) {
1024 		r = -ENOMEM;
1025 		goto error_vm;
1026 	}
1027 
1028 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1029 		uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1030 
1031 		r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1032 						&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1033 		if (r)
1034 			goto error_vm;
1035 	}
1036 
1037 	mutex_init(&fpriv->bo_list_lock);
1038 	idr_init(&fpriv->bo_list_handles);
1039 
1040 	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1041 
1042 	file_priv->driver_priv = fpriv;
1043 	goto out_suspend;
1044 
1045 error_vm:
1046 	amdgpu_vm_fini(adev, &fpriv->vm);
1047 
1048 error_pasid:
1049 	if (pasid)
1050 		amdgpu_pasid_free(pasid);
1051 
1052 	kfree(fpriv);
1053 
1054 out_suspend:
1055 	pm_runtime_mark_last_busy(dev->dev);
1056 	pm_runtime_put_autosuspend(dev->dev);
1057 
1058 	return r;
1059 }
1060 
1061 /**
1062  * amdgpu_driver_postclose_kms - drm callback for post close
1063  *
1064  * @dev: drm dev pointer
1065  * @file_priv: drm file
1066  *
1067  * On device post close, tear down vm on cayman+ (all asics).
1068  */
1069 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1070 				 struct drm_file *file_priv)
1071 {
1072 	struct amdgpu_device *adev = dev->dev_private;
1073 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1074 	struct amdgpu_bo_list *list;
1075 	struct amdgpu_bo *pd;
1076 	unsigned int pasid;
1077 	int handle;
1078 
1079 	if (!fpriv)
1080 		return;
1081 
1082 	pm_runtime_get_sync(dev->dev);
1083 
1084 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1085 		amdgpu_uvd_free_handles(adev, file_priv);
1086 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1087 		amdgpu_vce_free_handles(adev, file_priv);
1088 
1089 	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1090 
1091 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1092 		/* TODO: how to handle reserve failure */
1093 		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1094 		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1095 		fpriv->csa_va = NULL;
1096 		amdgpu_bo_unreserve(adev->virt.csa_obj);
1097 	}
1098 
1099 	pasid = fpriv->vm.pasid;
1100 	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1101 
1102 	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1103 	amdgpu_vm_fini(adev, &fpriv->vm);
1104 
1105 	if (pasid)
1106 		amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1107 	amdgpu_bo_unref(&pd);
1108 
1109 	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1110 		amdgpu_bo_list_put(list);
1111 
1112 	idr_destroy(&fpriv->bo_list_handles);
1113 	mutex_destroy(&fpriv->bo_list_lock);
1114 
1115 	kfree(fpriv);
1116 	file_priv->driver_priv = NULL;
1117 
1118 	pm_runtime_mark_last_busy(dev->dev);
1119 	pm_runtime_put_autosuspend(dev->dev);
1120 }
1121 
1122 /*
1123  * VBlank related functions.
1124  */
1125 /**
1126  * amdgpu_get_vblank_counter_kms - get frame count
1127  *
1128  * @dev: drm dev pointer
1129  * @pipe: crtc to get the frame count from
1130  *
1131  * Gets the frame count on the requested crtc (all asics).
1132  * Returns frame count on success, -EINVAL on failure.
1133  */
1134 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1135 {
1136 	struct amdgpu_device *adev = dev->dev_private;
1137 	int vpos, hpos, stat;
1138 	u32 count;
1139 
1140 	if (pipe >= adev->mode_info.num_crtc) {
1141 		DRM_ERROR("Invalid crtc %u\n", pipe);
1142 		return -EINVAL;
1143 	}
1144 
1145 	/* The hw increments its frame counter at start of vsync, not at start
1146 	 * of vblank, as is required by DRM core vblank counter handling.
1147 	 * Cook the hw count here to make it appear to the caller as if it
1148 	 * incremented at start of vblank. We measure distance to start of
1149 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1150 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1151 	 * result by 1 to give the proper appearance to caller.
1152 	 */
1153 	if (adev->mode_info.crtcs[pipe]) {
1154 		/* Repeat readout if needed to provide stable result if
1155 		 * we cross start of vsync during the queries.
1156 		 */
1157 		do {
1158 			count = amdgpu_display_vblank_get_counter(adev, pipe);
1159 			/* Ask amdgpu_display_get_crtc_scanoutpos to return
1160 			 * vpos as distance to start of vblank, instead of
1161 			 * regular vertical scanout pos.
1162 			 */
1163 			stat = amdgpu_display_get_crtc_scanoutpos(
1164 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1165 				&vpos, &hpos, NULL, NULL,
1166 				&adev->mode_info.crtcs[pipe]->base.hwmode);
1167 		} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1168 
1169 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1170 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1171 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1172 		} else {
1173 			DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1174 				      pipe, vpos);
1175 
1176 			/* Bump counter if we are at >= leading edge of vblank,
1177 			 * but before vsync where vpos would turn negative and
1178 			 * the hw counter really increments.
1179 			 */
1180 			if (vpos >= 0)
1181 				count++;
1182 		}
1183 	} else {
1184 		/* Fallback to use value as is. */
1185 		count = amdgpu_display_vblank_get_counter(adev, pipe);
1186 		DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1187 	}
1188 
1189 	return count;
1190 }
1191 
1192 /**
1193  * amdgpu_enable_vblank_kms - enable vblank interrupt
1194  *
1195  * @dev: drm dev pointer
1196  * @pipe: crtc to enable vblank interrupt for
1197  *
1198  * Enable the interrupt on the requested crtc (all asics).
1199  * Returns 0 on success, -EINVAL on failure.
1200  */
1201 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1202 {
1203 	struct amdgpu_device *adev = dev->dev_private;
1204 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1205 
1206 	return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1207 }
1208 
1209 /**
1210  * amdgpu_disable_vblank_kms - disable vblank interrupt
1211  *
1212  * @dev: drm dev pointer
1213  * @pipe: crtc to disable vblank interrupt for
1214  *
1215  * Disable the interrupt on the requested crtc (all asics).
1216  */
1217 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1218 {
1219 	struct amdgpu_device *adev = dev->dev_private;
1220 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1221 
1222 	amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1223 }
1224 
1225 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1226 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1227 	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1228 	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1229 	DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1230 	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1231 	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1232 	/* KMS */
1233 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1234 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1235 	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1236 	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1237 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1238 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1239 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1240 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1241 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1242 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1243 };
1244 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1245 
1246 /*
1247  * Debugfs info
1248  */
1249 #if defined(CONFIG_DEBUG_FS)
1250 
1251 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1252 {
1253 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1254 	struct drm_device *dev = node->minor->dev;
1255 	struct amdgpu_device *adev = dev->dev_private;
1256 	struct drm_amdgpu_info_firmware fw_info;
1257 	struct drm_amdgpu_query_fw query_fw;
1258 	struct atom_context *ctx = adev->mode_info.atom_context;
1259 	int ret, i;
1260 
1261 	/* VCE */
1262 	query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1263 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1264 	if (ret)
1265 		return ret;
1266 	seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1267 		   fw_info.feature, fw_info.ver);
1268 
1269 	/* UVD */
1270 	query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1271 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1272 	if (ret)
1273 		return ret;
1274 	seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1275 		   fw_info.feature, fw_info.ver);
1276 
1277 	/* GMC */
1278 	query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1279 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1280 	if (ret)
1281 		return ret;
1282 	seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1283 		   fw_info.feature, fw_info.ver);
1284 
1285 	/* ME */
1286 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1287 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1288 	if (ret)
1289 		return ret;
1290 	seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1291 		   fw_info.feature, fw_info.ver);
1292 
1293 	/* PFP */
1294 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1295 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1296 	if (ret)
1297 		return ret;
1298 	seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1299 		   fw_info.feature, fw_info.ver);
1300 
1301 	/* CE */
1302 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1303 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1304 	if (ret)
1305 		return ret;
1306 	seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1307 		   fw_info.feature, fw_info.ver);
1308 
1309 	/* RLC */
1310 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1311 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1312 	if (ret)
1313 		return ret;
1314 	seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1315 		   fw_info.feature, fw_info.ver);
1316 
1317 	/* RLC SAVE RESTORE LIST CNTL */
1318 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1319 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1320 	if (ret)
1321 		return ret;
1322 	seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1323 		   fw_info.feature, fw_info.ver);
1324 
1325 	/* RLC SAVE RESTORE LIST GPM MEM */
1326 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1327 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1328 	if (ret)
1329 		return ret;
1330 	seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1331 		   fw_info.feature, fw_info.ver);
1332 
1333 	/* RLC SAVE RESTORE LIST SRM MEM */
1334 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1335 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1336 	if (ret)
1337 		return ret;
1338 	seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1339 		   fw_info.feature, fw_info.ver);
1340 
1341 	/* MEC */
1342 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1343 	query_fw.index = 0;
1344 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1345 	if (ret)
1346 		return ret;
1347 	seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1348 		   fw_info.feature, fw_info.ver);
1349 
1350 	/* MEC2 */
1351 	if (adev->asic_type == CHIP_KAVERI ||
1352 	    (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1353 		query_fw.index = 1;
1354 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1355 		if (ret)
1356 			return ret;
1357 		seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1358 			   fw_info.feature, fw_info.ver);
1359 	}
1360 
1361 	/* PSP SOS */
1362 	query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1363 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1364 	if (ret)
1365 		return ret;
1366 	seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1367 		   fw_info.feature, fw_info.ver);
1368 
1369 
1370 	/* PSP ASD */
1371 	query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1372 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1373 	if (ret)
1374 		return ret;
1375 	seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1376 		   fw_info.feature, fw_info.ver);
1377 
1378 	query_fw.fw_type = AMDGPU_INFO_FW_TA;
1379 	for (i = 0; i < 2; i++) {
1380 		query_fw.index = i;
1381 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1382 		if (ret)
1383 			continue;
1384 		seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1385 				i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1386 	}
1387 
1388 	/* SMC */
1389 	query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1390 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1391 	if (ret)
1392 		return ret;
1393 	seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1394 		   fw_info.feature, fw_info.ver);
1395 
1396 	/* SDMA */
1397 	query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1398 	for (i = 0; i < adev->sdma.num_instances; i++) {
1399 		query_fw.index = i;
1400 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1401 		if (ret)
1402 			return ret;
1403 		seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1404 			   i, fw_info.feature, fw_info.ver);
1405 	}
1406 
1407 	/* VCN */
1408 	query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1409 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1410 	if (ret)
1411 		return ret;
1412 	seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1413 		   fw_info.feature, fw_info.ver);
1414 
1415 	/* DMCU */
1416 	query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1417 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1418 	if (ret)
1419 		return ret;
1420 	seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1421 		   fw_info.feature, fw_info.ver);
1422 
1423 
1424 	seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1425 
1426 	return 0;
1427 }
1428 
1429 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1430 	{"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1431 };
1432 #endif
1433 
1434 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1435 {
1436 #if defined(CONFIG_DEBUG_FS)
1437 	return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1438 					ARRAY_SIZE(amdgpu_firmware_info_list));
1439 #else
1440 	return 0;
1441 #endif
1442 }
1443