1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include "amdgpu.h"
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_sched.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
34 #include "atom.h"
35 
36 #include <linux/vga_switcheroo.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include "amdgpu_amdkfd.h"
40 #include "amdgpu_gem.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ras.h"
43 
44 static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
45 {
46 	struct amdgpu_gpu_instance *gpu_instance;
47 	int i;
48 
49 	mutex_lock(&mgpu_info.mutex);
50 
51 	for (i = 0; i < mgpu_info.num_gpu; i++) {
52 		gpu_instance = &(mgpu_info.gpu_ins[i]);
53 		if (gpu_instance->adev == adev) {
54 			mgpu_info.gpu_ins[i] =
55 				mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
56 			mgpu_info.num_gpu--;
57 			if (adev->flags & AMD_IS_APU)
58 				mgpu_info.num_apu--;
59 			else
60 				mgpu_info.num_dgpu--;
61 			break;
62 		}
63 	}
64 
65 	mutex_unlock(&mgpu_info.mutex);
66 }
67 
68 /**
69  * amdgpu_driver_unload_kms - Main unload function for KMS.
70  *
71  * @dev: drm dev pointer
72  *
73  * This is the main unload function for KMS (all asics).
74  * Returns 0 on success.
75  */
76 void amdgpu_driver_unload_kms(struct drm_device *dev)
77 {
78 	struct amdgpu_device *adev = dev->dev_private;
79 
80 	if (adev == NULL)
81 		return;
82 
83 	amdgpu_unregister_gpu_instance(adev);
84 
85 	if (adev->rmmio == NULL)
86 		goto done_free;
87 
88 	if (amdgpu_sriov_vf(adev))
89 		amdgpu_virt_request_full_gpu(adev, false);
90 
91 	if (amdgpu_device_is_px(dev)) {
92 		pm_runtime_get_sync(dev->dev);
93 		pm_runtime_forbid(dev->dev);
94 	}
95 
96 	amdgpu_acpi_fini(adev);
97 
98 	amdgpu_device_fini(adev);
99 
100 done_free:
101 	kfree(adev);
102 	dev->dev_private = NULL;
103 }
104 
105 static void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
106 {
107 	struct amdgpu_gpu_instance *gpu_instance;
108 
109 	mutex_lock(&mgpu_info.mutex);
110 
111 	if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
112 		DRM_ERROR("Cannot register more gpu instance\n");
113 		mutex_unlock(&mgpu_info.mutex);
114 		return;
115 	}
116 
117 	gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
118 	gpu_instance->adev = adev;
119 	gpu_instance->mgpu_fan_enabled = 0;
120 
121 	mgpu_info.num_gpu++;
122 	if (adev->flags & AMD_IS_APU)
123 		mgpu_info.num_apu++;
124 	else
125 		mgpu_info.num_dgpu++;
126 
127 	mutex_unlock(&mgpu_info.mutex);
128 }
129 
130 /**
131  * amdgpu_driver_load_kms - Main load function for KMS.
132  *
133  * @dev: drm dev pointer
134  * @flags: device flags
135  *
136  * This is the main load function for KMS (all asics).
137  * Returns 0 on success, error on failure.
138  */
139 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
140 {
141 	struct amdgpu_device *adev;
142 	int r, acpi_status;
143 
144 #ifdef CONFIG_DRM_AMDGPU_SI
145 	if (!amdgpu_si_support) {
146 		switch (flags & AMD_ASIC_MASK) {
147 		case CHIP_TAHITI:
148 		case CHIP_PITCAIRN:
149 		case CHIP_VERDE:
150 		case CHIP_OLAND:
151 		case CHIP_HAINAN:
152 			dev_info(dev->dev,
153 				 "SI support provided by radeon.\n");
154 			dev_info(dev->dev,
155 				 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
156 				);
157 			return -ENODEV;
158 		}
159 	}
160 #endif
161 #ifdef CONFIG_DRM_AMDGPU_CIK
162 	if (!amdgpu_cik_support) {
163 		switch (flags & AMD_ASIC_MASK) {
164 		case CHIP_KAVERI:
165 		case CHIP_BONAIRE:
166 		case CHIP_HAWAII:
167 		case CHIP_KABINI:
168 		case CHIP_MULLINS:
169 			dev_info(dev->dev,
170 				 "CIK support provided by radeon.\n");
171 			dev_info(dev->dev,
172 				 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
173 				);
174 			return -ENODEV;
175 		}
176 	}
177 #endif
178 
179 	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
180 	if (adev == NULL) {
181 		return -ENOMEM;
182 	}
183 	dev->dev_private = (void *)adev;
184 
185 	if ((amdgpu_runtime_pm != 0) &&
186 	    amdgpu_has_atpx() &&
187 	    (amdgpu_is_atpx_hybrid() ||
188 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
189 	    ((flags & AMD_IS_APU) == 0) &&
190 	    !pci_is_thunderbolt_attached(dev->pdev))
191 		flags |= AMD_IS_PX;
192 
193 	/* amdgpu_device_init should report only fatal error
194 	 * like memory allocation failure or iomapping failure,
195 	 * or memory manager initialization failure, it must
196 	 * properly initialize the GPU MC controller and permit
197 	 * VRAM allocation
198 	 */
199 	r = amdgpu_device_init(adev, dev, dev->pdev, flags);
200 	if (r) {
201 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
202 		goto out;
203 	}
204 
205 	/* Call ACPI methods: require modeset init
206 	 * but failure is not fatal
207 	 */
208 	if (!r) {
209 		acpi_status = amdgpu_acpi_init(adev);
210 		if (acpi_status)
211 			dev_dbg(&dev->pdev->dev,
212 				"Error during ACPI methods call\n");
213 	}
214 
215 	if (amdgpu_device_is_px(dev)) {
216 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
217 		pm_runtime_use_autosuspend(dev->dev);
218 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
219 		pm_runtime_set_active(dev->dev);
220 		pm_runtime_allow(dev->dev);
221 		pm_runtime_mark_last_busy(dev->dev);
222 		pm_runtime_put_autosuspend(dev->dev);
223 	}
224 
225 	amdgpu_register_gpu_instance(adev);
226 out:
227 	if (r) {
228 		/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
229 		if (adev->rmmio && amdgpu_device_is_px(dev))
230 			pm_runtime_put_noidle(dev->dev);
231 		amdgpu_driver_unload_kms(dev);
232 	}
233 
234 	return r;
235 }
236 
237 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
238 				struct drm_amdgpu_query_fw *query_fw,
239 				struct amdgpu_device *adev)
240 {
241 	switch (query_fw->fw_type) {
242 	case AMDGPU_INFO_FW_VCE:
243 		fw_info->ver = adev->vce.fw_version;
244 		fw_info->feature = adev->vce.fb_version;
245 		break;
246 	case AMDGPU_INFO_FW_UVD:
247 		fw_info->ver = adev->uvd.fw_version;
248 		fw_info->feature = 0;
249 		break;
250 	case AMDGPU_INFO_FW_VCN:
251 		fw_info->ver = adev->vcn.fw_version;
252 		fw_info->feature = 0;
253 		break;
254 	case AMDGPU_INFO_FW_GMC:
255 		fw_info->ver = adev->gmc.fw_version;
256 		fw_info->feature = 0;
257 		break;
258 	case AMDGPU_INFO_FW_GFX_ME:
259 		fw_info->ver = adev->gfx.me_fw_version;
260 		fw_info->feature = adev->gfx.me_feature_version;
261 		break;
262 	case AMDGPU_INFO_FW_GFX_PFP:
263 		fw_info->ver = adev->gfx.pfp_fw_version;
264 		fw_info->feature = adev->gfx.pfp_feature_version;
265 		break;
266 	case AMDGPU_INFO_FW_GFX_CE:
267 		fw_info->ver = adev->gfx.ce_fw_version;
268 		fw_info->feature = adev->gfx.ce_feature_version;
269 		break;
270 	case AMDGPU_INFO_FW_GFX_RLC:
271 		fw_info->ver = adev->gfx.rlc_fw_version;
272 		fw_info->feature = adev->gfx.rlc_feature_version;
273 		break;
274 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
275 		fw_info->ver = adev->gfx.rlc_srlc_fw_version;
276 		fw_info->feature = adev->gfx.rlc_srlc_feature_version;
277 		break;
278 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
279 		fw_info->ver = adev->gfx.rlc_srlg_fw_version;
280 		fw_info->feature = adev->gfx.rlc_srlg_feature_version;
281 		break;
282 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
283 		fw_info->ver = adev->gfx.rlc_srls_fw_version;
284 		fw_info->feature = adev->gfx.rlc_srls_feature_version;
285 		break;
286 	case AMDGPU_INFO_FW_GFX_MEC:
287 		if (query_fw->index == 0) {
288 			fw_info->ver = adev->gfx.mec_fw_version;
289 			fw_info->feature = adev->gfx.mec_feature_version;
290 		} else if (query_fw->index == 1) {
291 			fw_info->ver = adev->gfx.mec2_fw_version;
292 			fw_info->feature = adev->gfx.mec2_feature_version;
293 		} else
294 			return -EINVAL;
295 		break;
296 	case AMDGPU_INFO_FW_SMC:
297 		fw_info->ver = adev->pm.fw_version;
298 		fw_info->feature = 0;
299 		break;
300 	case AMDGPU_INFO_FW_TA:
301 		if (query_fw->index > 1)
302 			return -EINVAL;
303 		if (query_fw->index == 0) {
304 			fw_info->ver = adev->psp.ta_fw_version;
305 			fw_info->feature = adev->psp.ta_xgmi_ucode_version;
306 		} else {
307 			fw_info->ver = adev->psp.ta_fw_version;
308 			fw_info->feature = adev->psp.ta_ras_ucode_version;
309 		}
310 		break;
311 	case AMDGPU_INFO_FW_SDMA:
312 		if (query_fw->index >= adev->sdma.num_instances)
313 			return -EINVAL;
314 		fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
315 		fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
316 		break;
317 	case AMDGPU_INFO_FW_SOS:
318 		fw_info->ver = adev->psp.sos_fw_version;
319 		fw_info->feature = adev->psp.sos_feature_version;
320 		break;
321 	case AMDGPU_INFO_FW_ASD:
322 		fw_info->ver = adev->psp.asd_fw_version;
323 		fw_info->feature = adev->psp.asd_feature_version;
324 		break;
325 	case AMDGPU_INFO_FW_DMCU:
326 		fw_info->ver = adev->dm.dmcu_fw_version;
327 		fw_info->feature = 0;
328 		break;
329 	default:
330 		return -EINVAL;
331 	}
332 	return 0;
333 }
334 
335 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
336 			     struct drm_amdgpu_info *info,
337 			     struct drm_amdgpu_info_hw_ip *result)
338 {
339 	uint32_t ib_start_alignment = 0;
340 	uint32_t ib_size_alignment = 0;
341 	enum amd_ip_block_type type;
342 	unsigned int num_rings = 0;
343 	unsigned int i, j;
344 
345 	if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
346 		return -EINVAL;
347 
348 	switch (info->query_hw_ip.type) {
349 	case AMDGPU_HW_IP_GFX:
350 		type = AMD_IP_BLOCK_TYPE_GFX;
351 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
352 			if (adev->gfx.gfx_ring[i].sched.ready)
353 				++num_rings;
354 		ib_start_alignment = 32;
355 		ib_size_alignment = 32;
356 		break;
357 	case AMDGPU_HW_IP_COMPUTE:
358 		type = AMD_IP_BLOCK_TYPE_GFX;
359 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
360 			if (adev->gfx.compute_ring[i].sched.ready)
361 				++num_rings;
362 		ib_start_alignment = 32;
363 		ib_size_alignment = 32;
364 		break;
365 	case AMDGPU_HW_IP_DMA:
366 		type = AMD_IP_BLOCK_TYPE_SDMA;
367 		for (i = 0; i < adev->sdma.num_instances; i++)
368 			if (adev->sdma.instance[i].ring.sched.ready)
369 				++num_rings;
370 		ib_start_alignment = 256;
371 		ib_size_alignment = 4;
372 		break;
373 	case AMDGPU_HW_IP_UVD:
374 		type = AMD_IP_BLOCK_TYPE_UVD;
375 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
376 			if (adev->uvd.harvest_config & (1 << i))
377 				continue;
378 
379 			if (adev->uvd.inst[i].ring.sched.ready)
380 				++num_rings;
381 		}
382 		ib_start_alignment = 64;
383 		ib_size_alignment = 64;
384 		break;
385 	case AMDGPU_HW_IP_VCE:
386 		type = AMD_IP_BLOCK_TYPE_VCE;
387 		for (i = 0; i < adev->vce.num_rings; i++)
388 			if (adev->vce.ring[i].sched.ready)
389 				++num_rings;
390 		ib_start_alignment = 4;
391 		ib_size_alignment = 1;
392 		break;
393 	case AMDGPU_HW_IP_UVD_ENC:
394 		type = AMD_IP_BLOCK_TYPE_UVD;
395 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
396 			if (adev->uvd.harvest_config & (1 << i))
397 				continue;
398 
399 			for (j = 0; j < adev->uvd.num_enc_rings; j++)
400 				if (adev->uvd.inst[i].ring_enc[j].sched.ready)
401 					++num_rings;
402 		}
403 		ib_start_alignment = 64;
404 		ib_size_alignment = 64;
405 		break;
406 	case AMDGPU_HW_IP_VCN_DEC:
407 		type = AMD_IP_BLOCK_TYPE_VCN;
408 		if (adev->vcn.ring_dec.sched.ready)
409 			++num_rings;
410 		ib_start_alignment = 16;
411 		ib_size_alignment = 16;
412 		break;
413 	case AMDGPU_HW_IP_VCN_ENC:
414 		type = AMD_IP_BLOCK_TYPE_VCN;
415 		for (i = 0; i < adev->vcn.num_enc_rings; i++)
416 			if (adev->vcn.ring_enc[i].sched.ready)
417 				++num_rings;
418 		ib_start_alignment = 64;
419 		ib_size_alignment = 1;
420 		break;
421 	case AMDGPU_HW_IP_VCN_JPEG:
422 		type = AMD_IP_BLOCK_TYPE_VCN;
423 		if (adev->vcn.ring_jpeg.sched.ready)
424 			++num_rings;
425 		ib_start_alignment = 16;
426 		ib_size_alignment = 16;
427 		break;
428 	default:
429 		return -EINVAL;
430 	}
431 
432 	for (i = 0; i < adev->num_ip_blocks; i++)
433 		if (adev->ip_blocks[i].version->type == type &&
434 		    adev->ip_blocks[i].status.valid)
435 			break;
436 
437 	if (i == adev->num_ip_blocks)
438 		return 0;
439 
440 	num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
441 			num_rings);
442 
443 	result->hw_ip_version_major = adev->ip_blocks[i].version->major;
444 	result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
445 	result->capabilities_flags = 0;
446 	result->available_rings = (1 << num_rings) - 1;
447 	result->ib_start_alignment = ib_start_alignment;
448 	result->ib_size_alignment = ib_size_alignment;
449 	return 0;
450 }
451 
452 /*
453  * Userspace get information ioctl
454  */
455 /**
456  * amdgpu_info_ioctl - answer a device specific request.
457  *
458  * @adev: amdgpu device pointer
459  * @data: request object
460  * @filp: drm filp
461  *
462  * This function is used to pass device specific parameters to the userspace
463  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
464  * etc. (all asics).
465  * Returns 0 on success, -EINVAL on failure.
466  */
467 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
468 {
469 	struct amdgpu_device *adev = dev->dev_private;
470 	struct drm_amdgpu_info *info = data;
471 	struct amdgpu_mode_info *minfo = &adev->mode_info;
472 	void __user *out = (void __user *)(uintptr_t)info->return_pointer;
473 	uint32_t size = info->return_size;
474 	struct drm_crtc *crtc;
475 	uint32_t ui32 = 0;
476 	uint64_t ui64 = 0;
477 	int i, found;
478 	int ui32_size = sizeof(ui32);
479 
480 	if (!info->return_size || !info->return_pointer)
481 		return -EINVAL;
482 
483 	switch (info->query) {
484 	case AMDGPU_INFO_ACCEL_WORKING:
485 		ui32 = adev->accel_working;
486 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
487 	case AMDGPU_INFO_CRTC_FROM_ID:
488 		for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
489 			crtc = (struct drm_crtc *)minfo->crtcs[i];
490 			if (crtc && crtc->base.id == info->mode_crtc.id) {
491 				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
492 				ui32 = amdgpu_crtc->crtc_id;
493 				found = 1;
494 				break;
495 			}
496 		}
497 		if (!found) {
498 			DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
499 			return -EINVAL;
500 		}
501 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
502 	case AMDGPU_INFO_HW_IP_INFO: {
503 		struct drm_amdgpu_info_hw_ip ip = {};
504 		int ret;
505 
506 		ret = amdgpu_hw_ip_info(adev, info, &ip);
507 		if (ret)
508 			return ret;
509 
510 		ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
511 		return ret ? -EFAULT : 0;
512 	}
513 	case AMDGPU_INFO_HW_IP_COUNT: {
514 		enum amd_ip_block_type type;
515 		uint32_t count = 0;
516 
517 		switch (info->query_hw_ip.type) {
518 		case AMDGPU_HW_IP_GFX:
519 			type = AMD_IP_BLOCK_TYPE_GFX;
520 			break;
521 		case AMDGPU_HW_IP_COMPUTE:
522 			type = AMD_IP_BLOCK_TYPE_GFX;
523 			break;
524 		case AMDGPU_HW_IP_DMA:
525 			type = AMD_IP_BLOCK_TYPE_SDMA;
526 			break;
527 		case AMDGPU_HW_IP_UVD:
528 			type = AMD_IP_BLOCK_TYPE_UVD;
529 			break;
530 		case AMDGPU_HW_IP_VCE:
531 			type = AMD_IP_BLOCK_TYPE_VCE;
532 			break;
533 		case AMDGPU_HW_IP_UVD_ENC:
534 			type = AMD_IP_BLOCK_TYPE_UVD;
535 			break;
536 		case AMDGPU_HW_IP_VCN_DEC:
537 		case AMDGPU_HW_IP_VCN_ENC:
538 		case AMDGPU_HW_IP_VCN_JPEG:
539 			type = AMD_IP_BLOCK_TYPE_VCN;
540 			break;
541 		default:
542 			return -EINVAL;
543 		}
544 
545 		for (i = 0; i < adev->num_ip_blocks; i++)
546 			if (adev->ip_blocks[i].version->type == type &&
547 			    adev->ip_blocks[i].status.valid &&
548 			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
549 				count++;
550 
551 		return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
552 	}
553 	case AMDGPU_INFO_TIMESTAMP:
554 		ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
555 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
556 	case AMDGPU_INFO_FW_VERSION: {
557 		struct drm_amdgpu_info_firmware fw_info;
558 		int ret;
559 
560 		/* We only support one instance of each IP block right now. */
561 		if (info->query_fw.ip_instance != 0)
562 			return -EINVAL;
563 
564 		ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
565 		if (ret)
566 			return ret;
567 
568 		return copy_to_user(out, &fw_info,
569 				    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
570 	}
571 	case AMDGPU_INFO_NUM_BYTES_MOVED:
572 		ui64 = atomic64_read(&adev->num_bytes_moved);
573 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
574 	case AMDGPU_INFO_NUM_EVICTIONS:
575 		ui64 = atomic64_read(&adev->num_evictions);
576 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
577 	case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
578 		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
579 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
580 	case AMDGPU_INFO_VRAM_USAGE:
581 		ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
582 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
583 	case AMDGPU_INFO_VIS_VRAM_USAGE:
584 		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
585 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
586 	case AMDGPU_INFO_GTT_USAGE:
587 		ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
588 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
589 	case AMDGPU_INFO_GDS_CONFIG: {
590 		struct drm_amdgpu_info_gds gds_info;
591 
592 		memset(&gds_info, 0, sizeof(gds_info));
593 		gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size;
594 		gds_info.compute_partition_size = adev->gds.mem.cs_partition_size;
595 		gds_info.gds_total_size = adev->gds.mem.total_size;
596 		gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size;
597 		gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size;
598 		gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size;
599 		gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size;
600 		return copy_to_user(out, &gds_info,
601 				    min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
602 	}
603 	case AMDGPU_INFO_VRAM_GTT: {
604 		struct drm_amdgpu_info_vram_gtt vram_gtt;
605 
606 		vram_gtt.vram_size = adev->gmc.real_vram_size -
607 			atomic64_read(&adev->vram_pin_size);
608 		vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
609 			atomic64_read(&adev->visible_pin_size);
610 		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
611 		vram_gtt.gtt_size *= PAGE_SIZE;
612 		vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
613 		return copy_to_user(out, &vram_gtt,
614 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
615 	}
616 	case AMDGPU_INFO_MEMORY: {
617 		struct drm_amdgpu_memory_info mem;
618 
619 		memset(&mem, 0, sizeof(mem));
620 		mem.vram.total_heap_size = adev->gmc.real_vram_size;
621 		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
622 			atomic64_read(&adev->vram_pin_size);
623 		mem.vram.heap_usage =
624 			amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
625 		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
626 
627 		mem.cpu_accessible_vram.total_heap_size =
628 			adev->gmc.visible_vram_size;
629 		mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
630 			atomic64_read(&adev->visible_pin_size);
631 		mem.cpu_accessible_vram.heap_usage =
632 			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
633 		mem.cpu_accessible_vram.max_allocation =
634 			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
635 
636 		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
637 		mem.gtt.total_heap_size *= PAGE_SIZE;
638 		mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
639 			atomic64_read(&adev->gart_pin_size);
640 		mem.gtt.heap_usage =
641 			amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
642 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
643 
644 		return copy_to_user(out, &mem,
645 				    min((size_t)size, sizeof(mem)))
646 				    ? -EFAULT : 0;
647 	}
648 	case AMDGPU_INFO_READ_MMR_REG: {
649 		unsigned n, alloc_size;
650 		uint32_t *regs;
651 		unsigned se_num = (info->read_mmr_reg.instance >>
652 				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
653 				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
654 		unsigned sh_num = (info->read_mmr_reg.instance >>
655 				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
656 				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
657 
658 		/* set full masks if the userspace set all bits
659 		 * in the bitfields */
660 		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
661 			se_num = 0xffffffff;
662 		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
663 			sh_num = 0xffffffff;
664 
665 		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
666 		if (!regs)
667 			return -ENOMEM;
668 		alloc_size = info->read_mmr_reg.count * sizeof(*regs);
669 
670 		for (i = 0; i < info->read_mmr_reg.count; i++)
671 			if (amdgpu_asic_read_register(adev, se_num, sh_num,
672 						      info->read_mmr_reg.dword_offset + i,
673 						      &regs[i])) {
674 				DRM_DEBUG_KMS("unallowed offset %#x\n",
675 					      info->read_mmr_reg.dword_offset + i);
676 				kfree(regs);
677 				return -EFAULT;
678 			}
679 		n = copy_to_user(out, regs, min(size, alloc_size));
680 		kfree(regs);
681 		return n ? -EFAULT : 0;
682 	}
683 	case AMDGPU_INFO_DEV_INFO: {
684 		struct drm_amdgpu_info_device dev_info = {};
685 		uint64_t vm_size;
686 
687 		dev_info.device_id = dev->pdev->device;
688 		dev_info.chip_rev = adev->rev_id;
689 		dev_info.external_rev = adev->external_rev_id;
690 		dev_info.pci_rev = dev->pdev->revision;
691 		dev_info.family = adev->family;
692 		dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
693 		dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
694 		/* return all clocks in KHz */
695 		dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
696 		if (adev->pm.dpm_enabled) {
697 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
698 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
699 		} else {
700 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
701 			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
702 		}
703 		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
704 		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
705 			adev->gfx.config.max_shader_engines;
706 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
707 		dev_info._pad = 0;
708 		dev_info.ids_flags = 0;
709 		if (adev->flags & AMD_IS_APU)
710 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
711 		if (amdgpu_sriov_vf(adev))
712 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
713 
714 		vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
715 		vm_size -= AMDGPU_VA_RESERVED_SIZE;
716 
717 		/* Older VCE FW versions are buggy and can handle only 40bits */
718 		if (adev->vce.fw_version &&
719 		    adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
720 			vm_size = min(vm_size, 1ULL << 40);
721 
722 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
723 		dev_info.virtual_address_max =
724 			min(vm_size, AMDGPU_GMC_HOLE_START);
725 
726 		if (vm_size > AMDGPU_GMC_HOLE_START) {
727 			dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
728 			dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
729 		}
730 		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
731 		dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
732 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
733 		dev_info.cu_active_number = adev->gfx.cu_info.number;
734 		dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
735 		dev_info.ce_ram_size = adev->gfx.ce_ram_size;
736 		memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
737 		       sizeof(adev->gfx.cu_info.ao_cu_bitmap));
738 		memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
739 		       sizeof(adev->gfx.cu_info.bitmap));
740 		dev_info.vram_type = adev->gmc.vram_type;
741 		dev_info.vram_bit_width = adev->gmc.vram_width;
742 		dev_info.vce_harvest_config = adev->vce.harvest_config;
743 		dev_info.gc_double_offchip_lds_buf =
744 			adev->gfx.config.double_offchip_lds_buf;
745 
746 		if (amdgpu_ngg) {
747 			dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
748 			dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
749 			dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
750 			dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
751 			dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
752 			dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
753 			dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
754 			dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
755 		}
756 		dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
757 		dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
758 		dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
759 		dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
760 		dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
761 		dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
762 		dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
763 
764 		return copy_to_user(out, &dev_info,
765 				    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
766 	}
767 	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
768 		unsigned i;
769 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
770 		struct amd_vce_state *vce_state;
771 
772 		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
773 			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
774 			if (vce_state) {
775 				vce_clk_table.entries[i].sclk = vce_state->sclk;
776 				vce_clk_table.entries[i].mclk = vce_state->mclk;
777 				vce_clk_table.entries[i].eclk = vce_state->evclk;
778 				vce_clk_table.num_valid_entries++;
779 			}
780 		}
781 
782 		return copy_to_user(out, &vce_clk_table,
783 				    min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
784 	}
785 	case AMDGPU_INFO_VBIOS: {
786 		uint32_t bios_size = adev->bios_size;
787 
788 		switch (info->vbios_info.type) {
789 		case AMDGPU_INFO_VBIOS_SIZE:
790 			return copy_to_user(out, &bios_size,
791 					min((size_t)size, sizeof(bios_size)))
792 					? -EFAULT : 0;
793 		case AMDGPU_INFO_VBIOS_IMAGE: {
794 			uint8_t *bios;
795 			uint32_t bios_offset = info->vbios_info.offset;
796 
797 			if (bios_offset >= bios_size)
798 				return -EINVAL;
799 
800 			bios = adev->bios + bios_offset;
801 			return copy_to_user(out, bios,
802 					    min((size_t)size, (size_t)(bios_size - bios_offset)))
803 					? -EFAULT : 0;
804 		}
805 		default:
806 			DRM_DEBUG_KMS("Invalid request %d\n",
807 					info->vbios_info.type);
808 			return -EINVAL;
809 		}
810 	}
811 	case AMDGPU_INFO_NUM_HANDLES: {
812 		struct drm_amdgpu_info_num_handles handle;
813 
814 		switch (info->query_hw_ip.type) {
815 		case AMDGPU_HW_IP_UVD:
816 			/* Starting Polaris, we support unlimited UVD handles */
817 			if (adev->asic_type < CHIP_POLARIS10) {
818 				handle.uvd_max_handles = adev->uvd.max_handles;
819 				handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
820 
821 				return copy_to_user(out, &handle,
822 					min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
823 			} else {
824 				return -ENODATA;
825 			}
826 
827 			break;
828 		default:
829 			return -EINVAL;
830 		}
831 	}
832 	case AMDGPU_INFO_SENSOR: {
833 		if (!adev->pm.dpm_enabled)
834 			return -ENOENT;
835 
836 		switch (info->sensor_info.type) {
837 		case AMDGPU_INFO_SENSOR_GFX_SCLK:
838 			/* get sclk in Mhz */
839 			if (amdgpu_dpm_read_sensor(adev,
840 						   AMDGPU_PP_SENSOR_GFX_SCLK,
841 						   (void *)&ui32, &ui32_size)) {
842 				return -EINVAL;
843 			}
844 			ui32 /= 100;
845 			break;
846 		case AMDGPU_INFO_SENSOR_GFX_MCLK:
847 			/* get mclk in Mhz */
848 			if (amdgpu_dpm_read_sensor(adev,
849 						   AMDGPU_PP_SENSOR_GFX_MCLK,
850 						   (void *)&ui32, &ui32_size)) {
851 				return -EINVAL;
852 			}
853 			ui32 /= 100;
854 			break;
855 		case AMDGPU_INFO_SENSOR_GPU_TEMP:
856 			/* get temperature in millidegrees C */
857 			if (amdgpu_dpm_read_sensor(adev,
858 						   AMDGPU_PP_SENSOR_GPU_TEMP,
859 						   (void *)&ui32, &ui32_size)) {
860 				return -EINVAL;
861 			}
862 			break;
863 		case AMDGPU_INFO_SENSOR_GPU_LOAD:
864 			/* get GPU load */
865 			if (amdgpu_dpm_read_sensor(adev,
866 						   AMDGPU_PP_SENSOR_GPU_LOAD,
867 						   (void *)&ui32, &ui32_size)) {
868 				return -EINVAL;
869 			}
870 			break;
871 		case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
872 			/* get average GPU power */
873 			if (amdgpu_dpm_read_sensor(adev,
874 						   AMDGPU_PP_SENSOR_GPU_POWER,
875 						   (void *)&ui32, &ui32_size)) {
876 				return -EINVAL;
877 			}
878 			ui32 >>= 8;
879 			break;
880 		case AMDGPU_INFO_SENSOR_VDDNB:
881 			/* get VDDNB in millivolts */
882 			if (amdgpu_dpm_read_sensor(adev,
883 						   AMDGPU_PP_SENSOR_VDDNB,
884 						   (void *)&ui32, &ui32_size)) {
885 				return -EINVAL;
886 			}
887 			break;
888 		case AMDGPU_INFO_SENSOR_VDDGFX:
889 			/* get VDDGFX in millivolts */
890 			if (amdgpu_dpm_read_sensor(adev,
891 						   AMDGPU_PP_SENSOR_VDDGFX,
892 						   (void *)&ui32, &ui32_size)) {
893 				return -EINVAL;
894 			}
895 			break;
896 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
897 			/* get stable pstate sclk in Mhz */
898 			if (amdgpu_dpm_read_sensor(adev,
899 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
900 						   (void *)&ui32, &ui32_size)) {
901 				return -EINVAL;
902 			}
903 			ui32 /= 100;
904 			break;
905 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
906 			/* get stable pstate mclk in Mhz */
907 			if (amdgpu_dpm_read_sensor(adev,
908 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
909 						   (void *)&ui32, &ui32_size)) {
910 				return -EINVAL;
911 			}
912 			ui32 /= 100;
913 			break;
914 		default:
915 			DRM_DEBUG_KMS("Invalid request %d\n",
916 				      info->sensor_info.type);
917 			return -EINVAL;
918 		}
919 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
920 	}
921 	case AMDGPU_INFO_VRAM_LOST_COUNTER:
922 		ui32 = atomic_read(&adev->vram_lost_counter);
923 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
924 	case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
925 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
926 		uint64_t ras_mask;
927 
928 		if (!ras)
929 			return -EINVAL;
930 		ras_mask = (uint64_t)ras->supported << 32 | ras->features;
931 
932 		return copy_to_user(out, &ras_mask,
933 				min_t(u64, size, sizeof(ras_mask))) ?
934 			-EFAULT : 0;
935 	}
936 	default:
937 		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
938 		return -EINVAL;
939 	}
940 	return 0;
941 }
942 
943 
944 /*
945  * Outdated mess for old drm with Xorg being in charge (void function now).
946  */
947 /**
948  * amdgpu_driver_lastclose_kms - drm callback for last close
949  *
950  * @dev: drm dev pointer
951  *
952  * Switch vga_switcheroo state after last close (all asics).
953  */
954 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
955 {
956 	drm_fb_helper_lastclose(dev);
957 	vga_switcheroo_process_delayed_switch();
958 }
959 
960 /**
961  * amdgpu_driver_open_kms - drm callback for open
962  *
963  * @dev: drm dev pointer
964  * @file_priv: drm file
965  *
966  * On device open, init vm on cayman+ (all asics).
967  * Returns 0 on success, error on failure.
968  */
969 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
970 {
971 	struct amdgpu_device *adev = dev->dev_private;
972 	struct amdgpu_fpriv *fpriv;
973 	int r, pasid;
974 
975 	/* Ensure IB tests are run on ring */
976 	flush_delayed_work(&adev->late_init_work);
977 
978 	file_priv->driver_priv = NULL;
979 
980 	r = pm_runtime_get_sync(dev->dev);
981 	if (r < 0)
982 		return r;
983 
984 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
985 	if (unlikely(!fpriv)) {
986 		r = -ENOMEM;
987 		goto out_suspend;
988 	}
989 
990 	pasid = amdgpu_pasid_alloc(16);
991 	if (pasid < 0) {
992 		dev_warn(adev->dev, "No more PASIDs available!");
993 		pasid = 0;
994 	}
995 	r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
996 	if (r)
997 		goto error_pasid;
998 
999 	fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1000 	if (!fpriv->prt_va) {
1001 		r = -ENOMEM;
1002 		goto error_vm;
1003 	}
1004 
1005 	if (amdgpu_sriov_vf(adev)) {
1006 		uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1007 
1008 		r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1009 						&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1010 		if (r)
1011 			goto error_vm;
1012 	}
1013 
1014 	mutex_init(&fpriv->bo_list_lock);
1015 	idr_init(&fpriv->bo_list_handles);
1016 
1017 	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1018 
1019 	file_priv->driver_priv = fpriv;
1020 	goto out_suspend;
1021 
1022 error_vm:
1023 	amdgpu_vm_fini(adev, &fpriv->vm);
1024 
1025 error_pasid:
1026 	if (pasid)
1027 		amdgpu_pasid_free(pasid);
1028 
1029 	kfree(fpriv);
1030 
1031 out_suspend:
1032 	pm_runtime_mark_last_busy(dev->dev);
1033 	pm_runtime_put_autosuspend(dev->dev);
1034 
1035 	return r;
1036 }
1037 
1038 /**
1039  * amdgpu_driver_postclose_kms - drm callback for post close
1040  *
1041  * @dev: drm dev pointer
1042  * @file_priv: drm file
1043  *
1044  * On device post close, tear down vm on cayman+ (all asics).
1045  */
1046 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1047 				 struct drm_file *file_priv)
1048 {
1049 	struct amdgpu_device *adev = dev->dev_private;
1050 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1051 	struct amdgpu_bo_list *list;
1052 	struct amdgpu_bo *pd;
1053 	unsigned int pasid;
1054 	int handle;
1055 
1056 	if (!fpriv)
1057 		return;
1058 
1059 	pm_runtime_get_sync(dev->dev);
1060 
1061 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1062 		amdgpu_uvd_free_handles(adev, file_priv);
1063 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1064 		amdgpu_vce_free_handles(adev, file_priv);
1065 
1066 	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1067 
1068 	if (amdgpu_sriov_vf(adev)) {
1069 		/* TODO: how to handle reserve failure */
1070 		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1071 		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1072 		fpriv->csa_va = NULL;
1073 		amdgpu_bo_unreserve(adev->virt.csa_obj);
1074 	}
1075 
1076 	pasid = fpriv->vm.pasid;
1077 	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1078 
1079 	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1080 	amdgpu_vm_fini(adev, &fpriv->vm);
1081 
1082 	if (pasid)
1083 		amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
1084 	amdgpu_bo_unref(&pd);
1085 
1086 	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1087 		amdgpu_bo_list_put(list);
1088 
1089 	idr_destroy(&fpriv->bo_list_handles);
1090 	mutex_destroy(&fpriv->bo_list_lock);
1091 
1092 	kfree(fpriv);
1093 	file_priv->driver_priv = NULL;
1094 
1095 	pm_runtime_mark_last_busy(dev->dev);
1096 	pm_runtime_put_autosuspend(dev->dev);
1097 }
1098 
1099 /*
1100  * VBlank related functions.
1101  */
1102 /**
1103  * amdgpu_get_vblank_counter_kms - get frame count
1104  *
1105  * @dev: drm dev pointer
1106  * @pipe: crtc to get the frame count from
1107  *
1108  * Gets the frame count on the requested crtc (all asics).
1109  * Returns frame count on success, -EINVAL on failure.
1110  */
1111 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1112 {
1113 	struct amdgpu_device *adev = dev->dev_private;
1114 	int vpos, hpos, stat;
1115 	u32 count;
1116 
1117 	if (pipe >= adev->mode_info.num_crtc) {
1118 		DRM_ERROR("Invalid crtc %u\n", pipe);
1119 		return -EINVAL;
1120 	}
1121 
1122 	/* The hw increments its frame counter at start of vsync, not at start
1123 	 * of vblank, as is required by DRM core vblank counter handling.
1124 	 * Cook the hw count here to make it appear to the caller as if it
1125 	 * incremented at start of vblank. We measure distance to start of
1126 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1127 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1128 	 * result by 1 to give the proper appearance to caller.
1129 	 */
1130 	if (adev->mode_info.crtcs[pipe]) {
1131 		/* Repeat readout if needed to provide stable result if
1132 		 * we cross start of vsync during the queries.
1133 		 */
1134 		do {
1135 			count = amdgpu_display_vblank_get_counter(adev, pipe);
1136 			/* Ask amdgpu_display_get_crtc_scanoutpos to return
1137 			 * vpos as distance to start of vblank, instead of
1138 			 * regular vertical scanout pos.
1139 			 */
1140 			stat = amdgpu_display_get_crtc_scanoutpos(
1141 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1142 				&vpos, &hpos, NULL, NULL,
1143 				&adev->mode_info.crtcs[pipe]->base.hwmode);
1144 		} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1145 
1146 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1147 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1148 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1149 		} else {
1150 			DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1151 				      pipe, vpos);
1152 
1153 			/* Bump counter if we are at >= leading edge of vblank,
1154 			 * but before vsync where vpos would turn negative and
1155 			 * the hw counter really increments.
1156 			 */
1157 			if (vpos >= 0)
1158 				count++;
1159 		}
1160 	} else {
1161 		/* Fallback to use value as is. */
1162 		count = amdgpu_display_vblank_get_counter(adev, pipe);
1163 		DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1164 	}
1165 
1166 	return count;
1167 }
1168 
1169 /**
1170  * amdgpu_enable_vblank_kms - enable vblank interrupt
1171  *
1172  * @dev: drm dev pointer
1173  * @pipe: crtc to enable vblank interrupt for
1174  *
1175  * Enable the interrupt on the requested crtc (all asics).
1176  * Returns 0 on success, -EINVAL on failure.
1177  */
1178 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1179 {
1180 	struct amdgpu_device *adev = dev->dev_private;
1181 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1182 
1183 	return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1184 }
1185 
1186 /**
1187  * amdgpu_disable_vblank_kms - disable vblank interrupt
1188  *
1189  * @dev: drm dev pointer
1190  * @pipe: crtc to disable vblank interrupt for
1191  *
1192  * Disable the interrupt on the requested crtc (all asics).
1193  */
1194 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1195 {
1196 	struct amdgpu_device *adev = dev->dev_private;
1197 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1198 
1199 	amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1200 }
1201 
1202 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1203 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1204 	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1205 	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1206 	DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1207 	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1208 	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1209 	/* KMS */
1210 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1211 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1212 	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1213 	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1214 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1215 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1216 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1217 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1218 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1219 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1220 };
1221 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1222 
1223 /*
1224  * Debugfs info
1225  */
1226 #if defined(CONFIG_DEBUG_FS)
1227 
1228 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1229 {
1230 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1231 	struct drm_device *dev = node->minor->dev;
1232 	struct amdgpu_device *adev = dev->dev_private;
1233 	struct drm_amdgpu_info_firmware fw_info;
1234 	struct drm_amdgpu_query_fw query_fw;
1235 	struct atom_context *ctx = adev->mode_info.atom_context;
1236 	int ret, i;
1237 
1238 	/* VCE */
1239 	query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1240 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1241 	if (ret)
1242 		return ret;
1243 	seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1244 		   fw_info.feature, fw_info.ver);
1245 
1246 	/* UVD */
1247 	query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1248 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1249 	if (ret)
1250 		return ret;
1251 	seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1252 		   fw_info.feature, fw_info.ver);
1253 
1254 	/* GMC */
1255 	query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1256 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1257 	if (ret)
1258 		return ret;
1259 	seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1260 		   fw_info.feature, fw_info.ver);
1261 
1262 	/* ME */
1263 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1264 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1265 	if (ret)
1266 		return ret;
1267 	seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1268 		   fw_info.feature, fw_info.ver);
1269 
1270 	/* PFP */
1271 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1272 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1273 	if (ret)
1274 		return ret;
1275 	seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1276 		   fw_info.feature, fw_info.ver);
1277 
1278 	/* CE */
1279 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1280 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1281 	if (ret)
1282 		return ret;
1283 	seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1284 		   fw_info.feature, fw_info.ver);
1285 
1286 	/* RLC */
1287 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1288 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1289 	if (ret)
1290 		return ret;
1291 	seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1292 		   fw_info.feature, fw_info.ver);
1293 
1294 	/* RLC SAVE RESTORE LIST CNTL */
1295 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1296 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1297 	if (ret)
1298 		return ret;
1299 	seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1300 		   fw_info.feature, fw_info.ver);
1301 
1302 	/* RLC SAVE RESTORE LIST GPM MEM */
1303 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1304 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1305 	if (ret)
1306 		return ret;
1307 	seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1308 		   fw_info.feature, fw_info.ver);
1309 
1310 	/* RLC SAVE RESTORE LIST SRM MEM */
1311 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1312 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1313 	if (ret)
1314 		return ret;
1315 	seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1316 		   fw_info.feature, fw_info.ver);
1317 
1318 	/* MEC */
1319 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1320 	query_fw.index = 0;
1321 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1322 	if (ret)
1323 		return ret;
1324 	seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1325 		   fw_info.feature, fw_info.ver);
1326 
1327 	/* MEC2 */
1328 	if (adev->asic_type == CHIP_KAVERI ||
1329 	    (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1330 		query_fw.index = 1;
1331 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1332 		if (ret)
1333 			return ret;
1334 		seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1335 			   fw_info.feature, fw_info.ver);
1336 	}
1337 
1338 	/* PSP SOS */
1339 	query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1340 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1341 	if (ret)
1342 		return ret;
1343 	seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1344 		   fw_info.feature, fw_info.ver);
1345 
1346 
1347 	/* PSP ASD */
1348 	query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1349 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1350 	if (ret)
1351 		return ret;
1352 	seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1353 		   fw_info.feature, fw_info.ver);
1354 
1355 	query_fw.fw_type = AMDGPU_INFO_FW_TA;
1356 	for (i = 0; i < 2; i++) {
1357 		query_fw.index = i;
1358 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1359 		if (ret)
1360 			continue;
1361 		seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1362 				i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1363 	}
1364 
1365 	/* SMC */
1366 	query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1367 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1368 	if (ret)
1369 		return ret;
1370 	seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1371 		   fw_info.feature, fw_info.ver);
1372 
1373 	/* SDMA */
1374 	query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1375 	for (i = 0; i < adev->sdma.num_instances; i++) {
1376 		query_fw.index = i;
1377 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1378 		if (ret)
1379 			return ret;
1380 		seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1381 			   i, fw_info.feature, fw_info.ver);
1382 	}
1383 
1384 	/* VCN */
1385 	query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1386 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1387 	if (ret)
1388 		return ret;
1389 	seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1390 		   fw_info.feature, fw_info.ver);
1391 
1392 	/* DMCU */
1393 	query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1394 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1395 	if (ret)
1396 		return ret;
1397 	seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1398 		   fw_info.feature, fw_info.ver);
1399 
1400 
1401 	seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1402 
1403 	return 0;
1404 }
1405 
1406 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1407 	{"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1408 };
1409 #endif
1410 
1411 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1412 {
1413 #if defined(CONFIG_DEBUG_FS)
1414 	return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1415 					ARRAY_SIZE(amdgpu_firmware_info_list));
1416 #else
1417 	return 0;
1418 #endif
1419 }
1420