1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include "amdgpu.h"
30 #include <drm/amdgpu_drm.h>
31 #include <drm/drm_drv.h>
32 #include <drm/drm_fb_helper.h>
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "atom.h"
36
37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h"
46 #include "amdgpu_reset.h"
47 #include "amd_pcie.h"
48
amdgpu_unregister_gpu_instance(struct amdgpu_device * adev)49 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
50 {
51 struct amdgpu_gpu_instance *gpu_instance;
52 int i;
53
54 mutex_lock(&mgpu_info.mutex);
55
56 for (i = 0; i < mgpu_info.num_gpu; i++) {
57 gpu_instance = &(mgpu_info.gpu_ins[i]);
58 if (gpu_instance->adev == adev) {
59 mgpu_info.gpu_ins[i] =
60 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
61 mgpu_info.num_gpu--;
62 if (adev->flags & AMD_IS_APU)
63 mgpu_info.num_apu--;
64 else
65 mgpu_info.num_dgpu--;
66 break;
67 }
68 }
69
70 mutex_unlock(&mgpu_info.mutex);
71 }
72
73 /**
74 * amdgpu_driver_unload_kms - Main unload function for KMS.
75 *
76 * @dev: drm dev pointer
77 *
78 * This is the main unload function for KMS (all asics).
79 * Returns 0 on success.
80 */
amdgpu_driver_unload_kms(struct drm_device * dev)81 void amdgpu_driver_unload_kms(struct drm_device *dev)
82 {
83 struct amdgpu_device *adev = drm_to_adev(dev);
84
85 if (adev == NULL)
86 return;
87
88 amdgpu_unregister_gpu_instance(adev);
89
90 if (adev->rmmio == NULL)
91 return;
92
93 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
94 DRM_WARN("smart shift update failed\n");
95
96 amdgpu_acpi_fini(adev);
97 amdgpu_device_fini_hw(adev);
98 }
99
amdgpu_register_gpu_instance(struct amdgpu_device * adev)100 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
101 {
102 struct amdgpu_gpu_instance *gpu_instance;
103
104 mutex_lock(&mgpu_info.mutex);
105
106 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
107 DRM_ERROR("Cannot register more gpu instance\n");
108 mutex_unlock(&mgpu_info.mutex);
109 return;
110 }
111
112 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
113 gpu_instance->adev = adev;
114 gpu_instance->mgpu_fan_enabled = 0;
115
116 mgpu_info.num_gpu++;
117 if (adev->flags & AMD_IS_APU)
118 mgpu_info.num_apu++;
119 else
120 mgpu_info.num_dgpu++;
121
122 mutex_unlock(&mgpu_info.mutex);
123 }
124
125 /**
126 * amdgpu_driver_load_kms - Main load function for KMS.
127 *
128 * @adev: pointer to struct amdgpu_device
129 * @flags: device flags
130 *
131 * This is the main load function for KMS (all asics).
132 * Returns 0 on success, error on failure.
133 */
amdgpu_driver_load_kms(struct amdgpu_device * adev,unsigned long flags)134 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
135 {
136 struct drm_device *dev;
137 int r, acpi_status;
138
139 dev = adev_to_drm(adev);
140
141 /* amdgpu_device_init should report only fatal error
142 * like memory allocation failure or iomapping failure,
143 * or memory manager initialization failure, it must
144 * properly initialize the GPU MC controller and permit
145 * VRAM allocation
146 */
147 r = amdgpu_device_init(adev, flags);
148 if (r) {
149 dev_err(dev->dev, "Fatal error during GPU init\n");
150 goto out;
151 }
152
153 adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
154 if (amdgpu_device_supports_px(dev) &&
155 (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */
156 adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
157 dev_info(adev->dev, "Using ATPX for runtime pm\n");
158 } else if (amdgpu_device_supports_boco(dev) &&
159 (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */
160 adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
161 dev_info(adev->dev, "Using BOCO for runtime pm\n");
162 } else if (amdgpu_device_supports_baco(dev) &&
163 (amdgpu_runtime_pm != 0)) {
164 switch (adev->asic_type) {
165 case CHIP_VEGA20:
166 case CHIP_ARCTURUS:
167 /* enable BACO as runpm mode if runpm=1 */
168 if (amdgpu_runtime_pm > 0)
169 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
170 break;
171 case CHIP_VEGA10:
172 /* enable BACO as runpm mode if noretry=0 */
173 if (!adev->gmc.noretry)
174 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
175 break;
176 default:
177 /* enable BACO as runpm mode on CI+ */
178 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
179 break;
180 }
181
182 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)
183 dev_info(adev->dev, "Using BACO for runtime pm\n");
184 }
185
186 /* Call ACPI methods: require modeset init
187 * but failure is not fatal
188 */
189
190 acpi_status = amdgpu_acpi_init(adev);
191 if (acpi_status)
192 dev_dbg(dev->dev, "Error during ACPI methods call\n");
193
194 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
195 DRM_WARN("smart shift update failed\n");
196
197 out:
198 if (r)
199 amdgpu_driver_unload_kms(dev);
200
201 return r;
202 }
203
amdgpu_firmware_info(struct drm_amdgpu_info_firmware * fw_info,struct drm_amdgpu_query_fw * query_fw,struct amdgpu_device * adev)204 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
205 struct drm_amdgpu_query_fw *query_fw,
206 struct amdgpu_device *adev)
207 {
208 switch (query_fw->fw_type) {
209 case AMDGPU_INFO_FW_VCE:
210 fw_info->ver = adev->vce.fw_version;
211 fw_info->feature = adev->vce.fb_version;
212 break;
213 case AMDGPU_INFO_FW_UVD:
214 fw_info->ver = adev->uvd.fw_version;
215 fw_info->feature = 0;
216 break;
217 case AMDGPU_INFO_FW_VCN:
218 fw_info->ver = adev->vcn.fw_version;
219 fw_info->feature = 0;
220 break;
221 case AMDGPU_INFO_FW_GMC:
222 fw_info->ver = adev->gmc.fw_version;
223 fw_info->feature = 0;
224 break;
225 case AMDGPU_INFO_FW_GFX_ME:
226 fw_info->ver = adev->gfx.me_fw_version;
227 fw_info->feature = adev->gfx.me_feature_version;
228 break;
229 case AMDGPU_INFO_FW_GFX_PFP:
230 fw_info->ver = adev->gfx.pfp_fw_version;
231 fw_info->feature = adev->gfx.pfp_feature_version;
232 break;
233 case AMDGPU_INFO_FW_GFX_CE:
234 fw_info->ver = adev->gfx.ce_fw_version;
235 fw_info->feature = adev->gfx.ce_feature_version;
236 break;
237 case AMDGPU_INFO_FW_GFX_RLC:
238 fw_info->ver = adev->gfx.rlc_fw_version;
239 fw_info->feature = adev->gfx.rlc_feature_version;
240 break;
241 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
242 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
243 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
244 break;
245 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
246 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
247 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
248 break;
249 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
250 fw_info->ver = adev->gfx.rlc_srls_fw_version;
251 fw_info->feature = adev->gfx.rlc_srls_feature_version;
252 break;
253 case AMDGPU_INFO_FW_GFX_RLCP:
254 fw_info->ver = adev->gfx.rlcp_ucode_version;
255 fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
256 break;
257 case AMDGPU_INFO_FW_GFX_RLCV:
258 fw_info->ver = adev->gfx.rlcv_ucode_version;
259 fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
260 break;
261 case AMDGPU_INFO_FW_GFX_MEC:
262 if (query_fw->index == 0) {
263 fw_info->ver = adev->gfx.mec_fw_version;
264 fw_info->feature = adev->gfx.mec_feature_version;
265 } else if (query_fw->index == 1) {
266 fw_info->ver = adev->gfx.mec2_fw_version;
267 fw_info->feature = adev->gfx.mec2_feature_version;
268 } else
269 return -EINVAL;
270 break;
271 case AMDGPU_INFO_FW_SMC:
272 fw_info->ver = adev->pm.fw_version;
273 fw_info->feature = 0;
274 break;
275 case AMDGPU_INFO_FW_TA:
276 switch (query_fw->index) {
277 case TA_FW_TYPE_PSP_XGMI:
278 fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version;
279 fw_info->feature = adev->psp.xgmi_context.context
280 .bin_desc.feature_version;
281 break;
282 case TA_FW_TYPE_PSP_RAS:
283 fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version;
284 fw_info->feature = adev->psp.ras_context.context
285 .bin_desc.feature_version;
286 break;
287 case TA_FW_TYPE_PSP_HDCP:
288 fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version;
289 fw_info->feature = adev->psp.hdcp_context.context
290 .bin_desc.feature_version;
291 break;
292 case TA_FW_TYPE_PSP_DTM:
293 fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version;
294 fw_info->feature = adev->psp.dtm_context.context
295 .bin_desc.feature_version;
296 break;
297 case TA_FW_TYPE_PSP_RAP:
298 fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version;
299 fw_info->feature = adev->psp.rap_context.context
300 .bin_desc.feature_version;
301 break;
302 case TA_FW_TYPE_PSP_SECUREDISPLAY:
303 fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version;
304 fw_info->feature =
305 adev->psp.securedisplay_context.context.bin_desc
306 .feature_version;
307 break;
308 default:
309 return -EINVAL;
310 }
311 break;
312 case AMDGPU_INFO_FW_SDMA:
313 if (query_fw->index >= adev->sdma.num_instances)
314 return -EINVAL;
315 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
316 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
317 break;
318 case AMDGPU_INFO_FW_SOS:
319 fw_info->ver = adev->psp.sos.fw_version;
320 fw_info->feature = adev->psp.sos.feature_version;
321 break;
322 case AMDGPU_INFO_FW_ASD:
323 fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
324 fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
325 break;
326 case AMDGPU_INFO_FW_DMCU:
327 fw_info->ver = adev->dm.dmcu_fw_version;
328 fw_info->feature = 0;
329 break;
330 case AMDGPU_INFO_FW_DMCUB:
331 fw_info->ver = adev->dm.dmcub_fw_version;
332 fw_info->feature = 0;
333 break;
334 case AMDGPU_INFO_FW_TOC:
335 fw_info->ver = adev->psp.toc.fw_version;
336 fw_info->feature = adev->psp.toc.feature_version;
337 break;
338 case AMDGPU_INFO_FW_CAP:
339 fw_info->ver = adev->psp.cap_fw_version;
340 fw_info->feature = adev->psp.cap_feature_version;
341 break;
342 case AMDGPU_INFO_FW_MES_KIQ:
343 fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
344 fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK)
345 >> AMDGPU_MES_FEAT_VERSION_SHIFT;
346 break;
347 case AMDGPU_INFO_FW_MES:
348 fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
349 fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK)
350 >> AMDGPU_MES_FEAT_VERSION_SHIFT;
351 break;
352 case AMDGPU_INFO_FW_IMU:
353 fw_info->ver = adev->gfx.imu_fw_version;
354 fw_info->feature = 0;
355 break;
356 default:
357 return -EINVAL;
358 }
359 return 0;
360 }
361
amdgpu_hw_ip_info(struct amdgpu_device * adev,struct drm_amdgpu_info * info,struct drm_amdgpu_info_hw_ip * result)362 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
363 struct drm_amdgpu_info *info,
364 struct drm_amdgpu_info_hw_ip *result)
365 {
366 uint32_t ib_start_alignment = 0;
367 uint32_t ib_size_alignment = 0;
368 enum amd_ip_block_type type;
369 unsigned int num_rings = 0;
370 unsigned int i, j;
371
372 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
373 return -EINVAL;
374
375 switch (info->query_hw_ip.type) {
376 case AMDGPU_HW_IP_GFX:
377 type = AMD_IP_BLOCK_TYPE_GFX;
378 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
379 if (adev->gfx.gfx_ring[i].sched.ready)
380 ++num_rings;
381 ib_start_alignment = 32;
382 ib_size_alignment = 32;
383 break;
384 case AMDGPU_HW_IP_COMPUTE:
385 type = AMD_IP_BLOCK_TYPE_GFX;
386 for (i = 0; i < adev->gfx.num_compute_rings; i++)
387 if (adev->gfx.compute_ring[i].sched.ready)
388 ++num_rings;
389 ib_start_alignment = 32;
390 ib_size_alignment = 32;
391 break;
392 case AMDGPU_HW_IP_DMA:
393 type = AMD_IP_BLOCK_TYPE_SDMA;
394 for (i = 0; i < adev->sdma.num_instances; i++)
395 if (adev->sdma.instance[i].ring.sched.ready)
396 ++num_rings;
397 ib_start_alignment = 256;
398 ib_size_alignment = 4;
399 break;
400 case AMDGPU_HW_IP_UVD:
401 type = AMD_IP_BLOCK_TYPE_UVD;
402 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
403 if (adev->uvd.harvest_config & (1 << i))
404 continue;
405
406 if (adev->uvd.inst[i].ring.sched.ready)
407 ++num_rings;
408 }
409 ib_start_alignment = 64;
410 ib_size_alignment = 64;
411 break;
412 case AMDGPU_HW_IP_VCE:
413 type = AMD_IP_BLOCK_TYPE_VCE;
414 for (i = 0; i < adev->vce.num_rings; i++)
415 if (adev->vce.ring[i].sched.ready)
416 ++num_rings;
417 ib_start_alignment = 4;
418 ib_size_alignment = 1;
419 break;
420 case AMDGPU_HW_IP_UVD_ENC:
421 type = AMD_IP_BLOCK_TYPE_UVD;
422 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
423 if (adev->uvd.harvest_config & (1 << i))
424 continue;
425
426 for (j = 0; j < adev->uvd.num_enc_rings; j++)
427 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
428 ++num_rings;
429 }
430 ib_start_alignment = 64;
431 ib_size_alignment = 64;
432 break;
433 case AMDGPU_HW_IP_VCN_DEC:
434 type = AMD_IP_BLOCK_TYPE_VCN;
435 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
436 if (adev->vcn.harvest_config & (1 << i))
437 continue;
438
439 if (adev->vcn.inst[i].ring_dec.sched.ready)
440 ++num_rings;
441 }
442 ib_start_alignment = 16;
443 ib_size_alignment = 16;
444 break;
445 case AMDGPU_HW_IP_VCN_ENC:
446 type = AMD_IP_BLOCK_TYPE_VCN;
447 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
448 if (adev->vcn.harvest_config & (1 << i))
449 continue;
450
451 for (j = 0; j < adev->vcn.num_enc_rings; j++)
452 if (adev->vcn.inst[i].ring_enc[j].sched.ready)
453 ++num_rings;
454 }
455 ib_start_alignment = 64;
456 ib_size_alignment = 1;
457 break;
458 case AMDGPU_HW_IP_VCN_JPEG:
459 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
460 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
461
462 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
463 if (adev->jpeg.harvest_config & (1 << i))
464 continue;
465
466 for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
467 if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
468 ++num_rings;
469 }
470 ib_start_alignment = 16;
471 ib_size_alignment = 16;
472 break;
473 default:
474 return -EINVAL;
475 }
476
477 for (i = 0; i < adev->num_ip_blocks; i++)
478 if (adev->ip_blocks[i].version->type == type &&
479 adev->ip_blocks[i].status.valid)
480 break;
481
482 if (i == adev->num_ip_blocks)
483 return 0;
484
485 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
486 num_rings);
487
488 result->hw_ip_version_major = adev->ip_blocks[i].version->major;
489 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
490
491 if (adev->asic_type >= CHIP_VEGA10) {
492 switch (type) {
493 case AMD_IP_BLOCK_TYPE_GFX:
494 result->ip_discovery_version = adev->ip_versions[GC_HWIP][0];
495 break;
496 case AMD_IP_BLOCK_TYPE_SDMA:
497 result->ip_discovery_version = adev->ip_versions[SDMA0_HWIP][0];
498 break;
499 case AMD_IP_BLOCK_TYPE_UVD:
500 case AMD_IP_BLOCK_TYPE_VCN:
501 case AMD_IP_BLOCK_TYPE_JPEG:
502 result->ip_discovery_version = adev->ip_versions[UVD_HWIP][0];
503 break;
504 case AMD_IP_BLOCK_TYPE_VCE:
505 result->ip_discovery_version = adev->ip_versions[VCE_HWIP][0];
506 break;
507 default:
508 result->ip_discovery_version = 0;
509 break;
510 }
511 } else {
512 result->ip_discovery_version = 0;
513 }
514 result->capabilities_flags = 0;
515 result->available_rings = (1 << num_rings) - 1;
516 result->ib_start_alignment = ib_start_alignment;
517 result->ib_size_alignment = ib_size_alignment;
518 return 0;
519 }
520
521 /*
522 * Userspace get information ioctl
523 */
524 /**
525 * amdgpu_info_ioctl - answer a device specific request.
526 *
527 * @dev: drm device pointer
528 * @data: request object
529 * @filp: drm filp
530 *
531 * This function is used to pass device specific parameters to the userspace
532 * drivers. Examples include: pci device id, pipeline parms, tiling params,
533 * etc. (all asics).
534 * Returns 0 on success, -EINVAL on failure.
535 */
amdgpu_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)536 int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
537 {
538 struct amdgpu_device *adev = drm_to_adev(dev);
539 struct drm_amdgpu_info *info = data;
540 struct amdgpu_mode_info *minfo = &adev->mode_info;
541 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
542 uint32_t size = info->return_size;
543 struct drm_crtc *crtc;
544 uint32_t ui32 = 0;
545 uint64_t ui64 = 0;
546 int i, found;
547 int ui32_size = sizeof(ui32);
548
549 if (!info->return_size || !info->return_pointer)
550 return -EINVAL;
551
552 switch (info->query) {
553 case AMDGPU_INFO_ACCEL_WORKING:
554 ui32 = adev->accel_working;
555 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
556 case AMDGPU_INFO_CRTC_FROM_ID:
557 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
558 crtc = (struct drm_crtc *)minfo->crtcs[i];
559 if (crtc && crtc->base.id == info->mode_crtc.id) {
560 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
561
562 ui32 = amdgpu_crtc->crtc_id;
563 found = 1;
564 break;
565 }
566 }
567 if (!found) {
568 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
569 return -EINVAL;
570 }
571 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
572 case AMDGPU_INFO_HW_IP_INFO: {
573 struct drm_amdgpu_info_hw_ip ip = {};
574 int ret;
575
576 ret = amdgpu_hw_ip_info(adev, info, &ip);
577 if (ret)
578 return ret;
579
580 ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
581 return ret ? -EFAULT : 0;
582 }
583 case AMDGPU_INFO_HW_IP_COUNT: {
584 enum amd_ip_block_type type;
585 uint32_t count = 0;
586
587 switch (info->query_hw_ip.type) {
588 case AMDGPU_HW_IP_GFX:
589 type = AMD_IP_BLOCK_TYPE_GFX;
590 break;
591 case AMDGPU_HW_IP_COMPUTE:
592 type = AMD_IP_BLOCK_TYPE_GFX;
593 break;
594 case AMDGPU_HW_IP_DMA:
595 type = AMD_IP_BLOCK_TYPE_SDMA;
596 break;
597 case AMDGPU_HW_IP_UVD:
598 type = AMD_IP_BLOCK_TYPE_UVD;
599 break;
600 case AMDGPU_HW_IP_VCE:
601 type = AMD_IP_BLOCK_TYPE_VCE;
602 break;
603 case AMDGPU_HW_IP_UVD_ENC:
604 type = AMD_IP_BLOCK_TYPE_UVD;
605 break;
606 case AMDGPU_HW_IP_VCN_DEC:
607 case AMDGPU_HW_IP_VCN_ENC:
608 type = AMD_IP_BLOCK_TYPE_VCN;
609 break;
610 case AMDGPU_HW_IP_VCN_JPEG:
611 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
612 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
613 break;
614 default:
615 return -EINVAL;
616 }
617
618 for (i = 0; i < adev->num_ip_blocks; i++)
619 if (adev->ip_blocks[i].version->type == type &&
620 adev->ip_blocks[i].status.valid &&
621 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
622 count++;
623
624 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
625 }
626 case AMDGPU_INFO_TIMESTAMP:
627 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
628 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
629 case AMDGPU_INFO_FW_VERSION: {
630 struct drm_amdgpu_info_firmware fw_info;
631 int ret;
632
633 /* We only support one instance of each IP block right now. */
634 if (info->query_fw.ip_instance != 0)
635 return -EINVAL;
636
637 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
638 if (ret)
639 return ret;
640
641 return copy_to_user(out, &fw_info,
642 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
643 }
644 case AMDGPU_INFO_NUM_BYTES_MOVED:
645 ui64 = atomic64_read(&adev->num_bytes_moved);
646 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
647 case AMDGPU_INFO_NUM_EVICTIONS:
648 ui64 = atomic64_read(&adev->num_evictions);
649 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
650 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
651 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
652 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
653 case AMDGPU_INFO_VRAM_USAGE:
654 ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
655 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
656 case AMDGPU_INFO_VIS_VRAM_USAGE:
657 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
658 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
659 case AMDGPU_INFO_GTT_USAGE:
660 ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager);
661 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
662 case AMDGPU_INFO_GDS_CONFIG: {
663 struct drm_amdgpu_info_gds gds_info;
664
665 memset(&gds_info, 0, sizeof(gds_info));
666 gds_info.compute_partition_size = adev->gds.gds_size;
667 gds_info.gds_total_size = adev->gds.gds_size;
668 gds_info.gws_per_compute_partition = adev->gds.gws_size;
669 gds_info.oa_per_compute_partition = adev->gds.oa_size;
670 return copy_to_user(out, &gds_info,
671 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
672 }
673 case AMDGPU_INFO_VRAM_GTT: {
674 struct drm_amdgpu_info_vram_gtt vram_gtt;
675
676 vram_gtt.vram_size = adev->gmc.real_vram_size -
677 atomic64_read(&adev->vram_pin_size) -
678 AMDGPU_VM_RESERVED_VRAM;
679 vram_gtt.vram_cpu_accessible_size =
680 min(adev->gmc.visible_vram_size -
681 atomic64_read(&adev->visible_pin_size),
682 vram_gtt.vram_size);
683 vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
684 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
685 return copy_to_user(out, &vram_gtt,
686 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
687 }
688 case AMDGPU_INFO_MEMORY: {
689 struct drm_amdgpu_memory_info mem;
690 struct ttm_resource_manager *gtt_man =
691 &adev->mman.gtt_mgr.manager;
692 struct ttm_resource_manager *vram_man =
693 &adev->mman.vram_mgr.manager;
694
695 memset(&mem, 0, sizeof(mem));
696 mem.vram.total_heap_size = adev->gmc.real_vram_size;
697 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
698 atomic64_read(&adev->vram_pin_size) -
699 AMDGPU_VM_RESERVED_VRAM;
700 mem.vram.heap_usage =
701 ttm_resource_manager_usage(vram_man);
702 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
703
704 mem.cpu_accessible_vram.total_heap_size =
705 adev->gmc.visible_vram_size;
706 mem.cpu_accessible_vram.usable_heap_size =
707 min(adev->gmc.visible_vram_size -
708 atomic64_read(&adev->visible_pin_size),
709 mem.vram.usable_heap_size);
710 mem.cpu_accessible_vram.heap_usage =
711 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
712 mem.cpu_accessible_vram.max_allocation =
713 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
714
715 mem.gtt.total_heap_size = gtt_man->size;
716 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
717 atomic64_read(&adev->gart_pin_size);
718 mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
719 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
720
721 return copy_to_user(out, &mem,
722 min((size_t)size, sizeof(mem)))
723 ? -EFAULT : 0;
724 }
725 case AMDGPU_INFO_READ_MMR_REG: {
726 int ret = 0;
727 unsigned int n, alloc_size;
728 uint32_t *regs;
729 unsigned int se_num = (info->read_mmr_reg.instance >>
730 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
731 AMDGPU_INFO_MMR_SE_INDEX_MASK;
732 unsigned int sh_num = (info->read_mmr_reg.instance >>
733 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
734 AMDGPU_INFO_MMR_SH_INDEX_MASK;
735
736 if (!down_read_trylock(&adev->reset_domain->sem))
737 return -ENOENT;
738
739 /* set full masks if the userspace set all bits
740 * in the bitfields
741 */
742 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) {
743 se_num = 0xffffffff;
744 } else if (se_num >= AMDGPU_GFX_MAX_SE) {
745 ret = -EINVAL;
746 goto out;
747 }
748
749 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) {
750 sh_num = 0xffffffff;
751 } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) {
752 ret = -EINVAL;
753 goto out;
754 }
755
756 if (info->read_mmr_reg.count > 128) {
757 ret = -EINVAL;
758 goto out;
759 }
760
761 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
762 if (!regs) {
763 ret = -ENOMEM;
764 goto out;
765 }
766
767 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
768
769 amdgpu_gfx_off_ctrl(adev, false);
770 for (i = 0; i < info->read_mmr_reg.count; i++) {
771 if (amdgpu_asic_read_register(adev, se_num, sh_num,
772 info->read_mmr_reg.dword_offset + i,
773 ®s[i])) {
774 DRM_DEBUG_KMS("unallowed offset %#x\n",
775 info->read_mmr_reg.dword_offset + i);
776 kfree(regs);
777 amdgpu_gfx_off_ctrl(adev, true);
778 ret = -EFAULT;
779 goto out;
780 }
781 }
782 amdgpu_gfx_off_ctrl(adev, true);
783 n = copy_to_user(out, regs, min(size, alloc_size));
784 kfree(regs);
785 ret = (n ? -EFAULT : 0);
786 out:
787 up_read(&adev->reset_domain->sem);
788 return ret;
789 }
790 case AMDGPU_INFO_DEV_INFO: {
791 struct drm_amdgpu_info_device *dev_info;
792 uint64_t vm_size;
793 uint32_t pcie_gen_mask;
794 int ret;
795
796 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
797 if (!dev_info)
798 return -ENOMEM;
799
800 dev_info->device_id = adev->pdev->device;
801 dev_info->chip_rev = adev->rev_id;
802 dev_info->external_rev = adev->external_rev_id;
803 dev_info->pci_rev = adev->pdev->revision;
804 dev_info->family = adev->family;
805 dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
806 dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
807 /* return all clocks in KHz */
808 dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
809 if (adev->pm.dpm_enabled) {
810 dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
811 dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
812 dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10;
813 dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10;
814 } else {
815 dev_info->max_engine_clock =
816 dev_info->min_engine_clock =
817 adev->clock.default_sclk * 10;
818 dev_info->max_memory_clock =
819 dev_info->min_memory_clock =
820 adev->clock.default_mclk * 10;
821 }
822 dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
823 dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
824 adev->gfx.config.max_shader_engines;
825 dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
826 dev_info->ids_flags = 0;
827 if (adev->flags & AMD_IS_APU)
828 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
829 if (adev->gfx.mcbp)
830 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
831 if (amdgpu_is_tmz(adev))
832 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
833 if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
834 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
835
836 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
837 vm_size -= AMDGPU_VA_RESERVED_SIZE;
838
839 /* Older VCE FW versions are buggy and can handle only 40bits */
840 if (adev->vce.fw_version &&
841 adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
842 vm_size = min(vm_size, 1ULL << 40);
843
844 dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
845 dev_info->virtual_address_max =
846 min(vm_size, AMDGPU_GMC_HOLE_START);
847
848 if (vm_size > AMDGPU_GMC_HOLE_START) {
849 dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
850 dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
851 }
852 dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
853 dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
854 dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
855 dev_info->cu_active_number = adev->gfx.cu_info.number;
856 dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
857 dev_info->ce_ram_size = adev->gfx.ce_ram_size;
858 memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
859 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
860 memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
861 sizeof(dev_info->cu_bitmap));
862 dev_info->vram_type = adev->gmc.vram_type;
863 dev_info->vram_bit_width = adev->gmc.vram_width;
864 dev_info->vce_harvest_config = adev->vce.harvest_config;
865 dev_info->gc_double_offchip_lds_buf =
866 adev->gfx.config.double_offchip_lds_buf;
867 dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size;
868 dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs;
869 dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
870 dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
871 dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
872 dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
873 dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
874
875 if (adev->family >= AMDGPU_FAMILY_NV)
876 dev_info->pa_sc_tile_steering_override =
877 adev->gfx.config.pa_sc_tile_steering_override;
878
879 dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
880
881 /* Combine the chip gen mask with the platform (CPU/mobo) mask. */
882 pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
883 dev_info->pcie_gen = fls(pcie_gen_mask);
884 dev_info->pcie_num_lanes =
885 adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
886 adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
887 adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
888 adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
889 adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
890 adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
891
892 dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
893 dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
894 dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
895 dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
896 dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance *
897 adev->gfx.config.gc_gl1c_per_sa;
898 dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
899 dev_info->mall_size = adev->gmc.mall_size;
900
901
902 if (adev->gfx.funcs->get_gfx_shadow_info) {
903 struct amdgpu_gfx_shadow_info shadow_info;
904
905 ret = amdgpu_gfx_get_gfx_shadow_info(adev, &shadow_info);
906 if (!ret) {
907 dev_info->shadow_size = shadow_info.shadow_size;
908 dev_info->shadow_alignment = shadow_info.shadow_alignment;
909 dev_info->csa_size = shadow_info.csa_size;
910 dev_info->csa_alignment = shadow_info.csa_alignment;
911 }
912 }
913
914 ret = copy_to_user(out, dev_info,
915 min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
916 kfree(dev_info);
917 return ret;
918 }
919 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
920 unsigned int i;
921 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
922 struct amd_vce_state *vce_state;
923
924 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
925 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
926 if (vce_state) {
927 vce_clk_table.entries[i].sclk = vce_state->sclk;
928 vce_clk_table.entries[i].mclk = vce_state->mclk;
929 vce_clk_table.entries[i].eclk = vce_state->evclk;
930 vce_clk_table.num_valid_entries++;
931 }
932 }
933
934 return copy_to_user(out, &vce_clk_table,
935 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
936 }
937 case AMDGPU_INFO_VBIOS: {
938 uint32_t bios_size = adev->bios_size;
939
940 switch (info->vbios_info.type) {
941 case AMDGPU_INFO_VBIOS_SIZE:
942 return copy_to_user(out, &bios_size,
943 min((size_t)size, sizeof(bios_size)))
944 ? -EFAULT : 0;
945 case AMDGPU_INFO_VBIOS_IMAGE: {
946 uint8_t *bios;
947 uint32_t bios_offset = info->vbios_info.offset;
948
949 if (bios_offset >= bios_size)
950 return -EINVAL;
951
952 bios = adev->bios + bios_offset;
953 return copy_to_user(out, bios,
954 min((size_t)size, (size_t)(bios_size - bios_offset)))
955 ? -EFAULT : 0;
956 }
957 case AMDGPU_INFO_VBIOS_INFO: {
958 struct drm_amdgpu_info_vbios vbios_info = {};
959 struct atom_context *atom_context;
960
961 atom_context = adev->mode_info.atom_context;
962 if (atom_context) {
963 memcpy(vbios_info.name, atom_context->name,
964 sizeof(atom_context->name));
965 memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
966 sizeof(atom_context->vbios_pn));
967 vbios_info.version = atom_context->version;
968 memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
969 sizeof(atom_context->vbios_ver_str));
970 memcpy(vbios_info.date, atom_context->date,
971 sizeof(atom_context->date));
972 }
973
974 return copy_to_user(out, &vbios_info,
975 min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
976 }
977 default:
978 DRM_DEBUG_KMS("Invalid request %d\n",
979 info->vbios_info.type);
980 return -EINVAL;
981 }
982 }
983 case AMDGPU_INFO_NUM_HANDLES: {
984 struct drm_amdgpu_info_num_handles handle;
985
986 switch (info->query_hw_ip.type) {
987 case AMDGPU_HW_IP_UVD:
988 /* Starting Polaris, we support unlimited UVD handles */
989 if (adev->asic_type < CHIP_POLARIS10) {
990 handle.uvd_max_handles = adev->uvd.max_handles;
991 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
992
993 return copy_to_user(out, &handle,
994 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
995 } else {
996 return -ENODATA;
997 }
998
999 break;
1000 default:
1001 return -EINVAL;
1002 }
1003 }
1004 case AMDGPU_INFO_SENSOR: {
1005 if (!adev->pm.dpm_enabled)
1006 return -ENOENT;
1007
1008 switch (info->sensor_info.type) {
1009 case AMDGPU_INFO_SENSOR_GFX_SCLK:
1010 /* get sclk in Mhz */
1011 if (amdgpu_dpm_read_sensor(adev,
1012 AMDGPU_PP_SENSOR_GFX_SCLK,
1013 (void *)&ui32, &ui32_size)) {
1014 return -EINVAL;
1015 }
1016 ui32 /= 100;
1017 break;
1018 case AMDGPU_INFO_SENSOR_GFX_MCLK:
1019 /* get mclk in Mhz */
1020 if (amdgpu_dpm_read_sensor(adev,
1021 AMDGPU_PP_SENSOR_GFX_MCLK,
1022 (void *)&ui32, &ui32_size)) {
1023 return -EINVAL;
1024 }
1025 ui32 /= 100;
1026 break;
1027 case AMDGPU_INFO_SENSOR_GPU_TEMP:
1028 /* get temperature in millidegrees C */
1029 if (amdgpu_dpm_read_sensor(adev,
1030 AMDGPU_PP_SENSOR_GPU_TEMP,
1031 (void *)&ui32, &ui32_size)) {
1032 return -EINVAL;
1033 }
1034 break;
1035 case AMDGPU_INFO_SENSOR_GPU_LOAD:
1036 /* get GPU load */
1037 if (amdgpu_dpm_read_sensor(adev,
1038 AMDGPU_PP_SENSOR_GPU_LOAD,
1039 (void *)&ui32, &ui32_size)) {
1040 return -EINVAL;
1041 }
1042 break;
1043 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
1044 /* get average GPU power */
1045 if (amdgpu_dpm_read_sensor(adev,
1046 AMDGPU_PP_SENSOR_GPU_AVG_POWER,
1047 (void *)&ui32, &ui32_size)) {
1048 /* fall back to input power for backwards compat */
1049 if (amdgpu_dpm_read_sensor(adev,
1050 AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
1051 (void *)&ui32, &ui32_size)) {
1052 return -EINVAL;
1053 }
1054 }
1055 ui32 >>= 8;
1056 break;
1057 case AMDGPU_INFO_SENSOR_VDDNB:
1058 /* get VDDNB in millivolts */
1059 if (amdgpu_dpm_read_sensor(adev,
1060 AMDGPU_PP_SENSOR_VDDNB,
1061 (void *)&ui32, &ui32_size)) {
1062 return -EINVAL;
1063 }
1064 break;
1065 case AMDGPU_INFO_SENSOR_VDDGFX:
1066 /* get VDDGFX in millivolts */
1067 if (amdgpu_dpm_read_sensor(adev,
1068 AMDGPU_PP_SENSOR_VDDGFX,
1069 (void *)&ui32, &ui32_size)) {
1070 return -EINVAL;
1071 }
1072 break;
1073 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
1074 /* get stable pstate sclk in Mhz */
1075 if (amdgpu_dpm_read_sensor(adev,
1076 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
1077 (void *)&ui32, &ui32_size)) {
1078 return -EINVAL;
1079 }
1080 ui32 /= 100;
1081 break;
1082 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
1083 /* get stable pstate mclk in Mhz */
1084 if (amdgpu_dpm_read_sensor(adev,
1085 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
1086 (void *)&ui32, &ui32_size)) {
1087 return -EINVAL;
1088 }
1089 ui32 /= 100;
1090 break;
1091 case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK:
1092 /* get peak pstate sclk in Mhz */
1093 if (amdgpu_dpm_read_sensor(adev,
1094 AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
1095 (void *)&ui32, &ui32_size)) {
1096 return -EINVAL;
1097 }
1098 ui32 /= 100;
1099 break;
1100 case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK:
1101 /* get peak pstate mclk in Mhz */
1102 if (amdgpu_dpm_read_sensor(adev,
1103 AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
1104 (void *)&ui32, &ui32_size)) {
1105 return -EINVAL;
1106 }
1107 ui32 /= 100;
1108 break;
1109 default:
1110 DRM_DEBUG_KMS("Invalid request %d\n",
1111 info->sensor_info.type);
1112 return -EINVAL;
1113 }
1114 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
1115 }
1116 case AMDGPU_INFO_VRAM_LOST_COUNTER:
1117 ui32 = atomic_read(&adev->vram_lost_counter);
1118 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
1119 case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
1120 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1121 uint64_t ras_mask;
1122
1123 if (!ras)
1124 return -EINVAL;
1125 ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features;
1126
1127 return copy_to_user(out, &ras_mask,
1128 min_t(u64, size, sizeof(ras_mask))) ?
1129 -EFAULT : 0;
1130 }
1131 case AMDGPU_INFO_VIDEO_CAPS: {
1132 const struct amdgpu_video_codecs *codecs;
1133 struct drm_amdgpu_info_video_caps *caps;
1134 int r;
1135
1136 if (!adev->asic_funcs->query_video_codecs)
1137 return -EINVAL;
1138
1139 switch (info->video_cap.type) {
1140 case AMDGPU_INFO_VIDEO_CAPS_DECODE:
1141 r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
1142 if (r)
1143 return -EINVAL;
1144 break;
1145 case AMDGPU_INFO_VIDEO_CAPS_ENCODE:
1146 r = amdgpu_asic_query_video_codecs(adev, true, &codecs);
1147 if (r)
1148 return -EINVAL;
1149 break;
1150 default:
1151 DRM_DEBUG_KMS("Invalid request %d\n",
1152 info->video_cap.type);
1153 return -EINVAL;
1154 }
1155
1156 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
1157 if (!caps)
1158 return -ENOMEM;
1159
1160 for (i = 0; i < codecs->codec_count; i++) {
1161 int idx = codecs->codec_array[i].codec_type;
1162
1163 switch (idx) {
1164 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2:
1165 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4:
1166 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1:
1167 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC:
1168 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC:
1169 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG:
1170 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9:
1171 case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1:
1172 caps->codec_info[idx].valid = 1;
1173 caps->codec_info[idx].max_width =
1174 codecs->codec_array[i].max_width;
1175 caps->codec_info[idx].max_height =
1176 codecs->codec_array[i].max_height;
1177 caps->codec_info[idx].max_pixels_per_frame =
1178 codecs->codec_array[i].max_pixels_per_frame;
1179 caps->codec_info[idx].max_level =
1180 codecs->codec_array[i].max_level;
1181 break;
1182 default:
1183 break;
1184 }
1185 }
1186 r = copy_to_user(out, caps,
1187 min((size_t)size, sizeof(*caps))) ? -EFAULT : 0;
1188 kfree(caps);
1189 return r;
1190 }
1191 case AMDGPU_INFO_MAX_IBS: {
1192 uint32_t max_ibs[AMDGPU_HW_IP_NUM];
1193
1194 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
1195 max_ibs[i] = amdgpu_ring_max_ibs(i);
1196
1197 return copy_to_user(out, max_ibs,
1198 min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0;
1199 }
1200 default:
1201 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
1202 return -EINVAL;
1203 }
1204 return 0;
1205 }
1206
1207
1208 /*
1209 * Outdated mess for old drm with Xorg being in charge (void function now).
1210 */
1211 /**
1212 * amdgpu_driver_lastclose_kms - drm callback for last close
1213 *
1214 * @dev: drm dev pointer
1215 *
1216 * Switch vga_switcheroo state after last close (all asics).
1217 */
amdgpu_driver_lastclose_kms(struct drm_device * dev)1218 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
1219 {
1220 drm_fb_helper_lastclose(dev);
1221 vga_switcheroo_process_delayed_switch();
1222 }
1223
1224 /**
1225 * amdgpu_driver_open_kms - drm callback for open
1226 *
1227 * @dev: drm dev pointer
1228 * @file_priv: drm file
1229 *
1230 * On device open, init vm on cayman+ (all asics).
1231 * Returns 0 on success, error on failure.
1232 */
amdgpu_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)1233 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1234 {
1235 struct amdgpu_device *adev = drm_to_adev(dev);
1236 struct amdgpu_fpriv *fpriv;
1237 int r, pasid;
1238
1239 /* Ensure IB tests are run on ring */
1240 flush_delayed_work(&adev->delayed_init_work);
1241
1242
1243 if (amdgpu_ras_intr_triggered()) {
1244 DRM_ERROR("RAS Intr triggered, device disabled!!");
1245 return -EHWPOISON;
1246 }
1247
1248 file_priv->driver_priv = NULL;
1249
1250 r = pm_runtime_get_sync(dev->dev);
1251 if (r < 0)
1252 goto pm_put;
1253
1254 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1255 if (unlikely(!fpriv)) {
1256 r = -ENOMEM;
1257 goto out_suspend;
1258 }
1259
1260 pasid = amdgpu_pasid_alloc(16);
1261 if (pasid < 0) {
1262 dev_warn(adev->dev, "No more PASIDs available!");
1263 pasid = 0;
1264 }
1265
1266 r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
1267 if (r)
1268 goto error_pasid;
1269
1270 r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
1271 if (r)
1272 goto error_pasid;
1273
1274 r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
1275 if (r)
1276 goto error_vm;
1277
1278 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1279 if (!fpriv->prt_va) {
1280 r = -ENOMEM;
1281 goto error_vm;
1282 }
1283
1284 if (adev->gfx.mcbp) {
1285 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1286
1287 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1288 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1289 if (r)
1290 goto error_vm;
1291 }
1292
1293 mutex_init(&fpriv->bo_list_lock);
1294 idr_init_base(&fpriv->bo_list_handles, 1);
1295
1296 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
1297
1298 file_priv->driver_priv = fpriv;
1299 goto out_suspend;
1300
1301 error_vm:
1302 amdgpu_vm_fini(adev, &fpriv->vm);
1303
1304 error_pasid:
1305 if (pasid) {
1306 amdgpu_pasid_free(pasid);
1307 amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
1308 }
1309
1310 kfree(fpriv);
1311
1312 out_suspend:
1313 pm_runtime_mark_last_busy(dev->dev);
1314 pm_put:
1315 pm_runtime_put_autosuspend(dev->dev);
1316
1317 return r;
1318 }
1319
1320 /**
1321 * amdgpu_driver_postclose_kms - drm callback for post close
1322 *
1323 * @dev: drm dev pointer
1324 * @file_priv: drm file
1325 *
1326 * On device post close, tear down vm on cayman+ (all asics).
1327 */
amdgpu_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)1328 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1329 struct drm_file *file_priv)
1330 {
1331 struct amdgpu_device *adev = drm_to_adev(dev);
1332 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1333 struct amdgpu_bo_list *list;
1334 struct amdgpu_bo *pd;
1335 u32 pasid;
1336 int handle;
1337
1338 if (!fpriv)
1339 return;
1340
1341 pm_runtime_get_sync(dev->dev);
1342
1343 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1344 amdgpu_uvd_free_handles(adev, file_priv);
1345 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1346 amdgpu_vce_free_handles(adev, file_priv);
1347
1348 if (fpriv->csa_va) {
1349 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1350
1351 WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1352 fpriv->csa_va, csa_addr));
1353 fpriv->csa_va = NULL;
1354 }
1355
1356 pasid = fpriv->vm.pasid;
1357 pd = amdgpu_bo_ref(fpriv->vm.root.bo);
1358 if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
1359 amdgpu_vm_bo_del(adev, fpriv->prt_va);
1360 amdgpu_bo_unreserve(pd);
1361 }
1362
1363 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1364 amdgpu_vm_fini(adev, &fpriv->vm);
1365
1366 if (pasid)
1367 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1368 amdgpu_bo_unref(&pd);
1369
1370 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1371 amdgpu_bo_list_put(list);
1372
1373 idr_destroy(&fpriv->bo_list_handles);
1374 mutex_destroy(&fpriv->bo_list_lock);
1375
1376 kfree(fpriv);
1377 file_priv->driver_priv = NULL;
1378
1379 pm_runtime_mark_last_busy(dev->dev);
1380 pm_runtime_put_autosuspend(dev->dev);
1381 }
1382
1383
amdgpu_driver_release_kms(struct drm_device * dev)1384 void amdgpu_driver_release_kms(struct drm_device *dev)
1385 {
1386 struct amdgpu_device *adev = drm_to_adev(dev);
1387
1388 amdgpu_device_fini_sw(adev);
1389 pci_set_drvdata(adev->pdev, NULL);
1390 }
1391
1392 /*
1393 * VBlank related functions.
1394 */
1395 /**
1396 * amdgpu_get_vblank_counter_kms - get frame count
1397 *
1398 * @crtc: crtc to get the frame count from
1399 *
1400 * Gets the frame count on the requested crtc (all asics).
1401 * Returns frame count on success, -EINVAL on failure.
1402 */
amdgpu_get_vblank_counter_kms(struct drm_crtc * crtc)1403 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
1404 {
1405 struct drm_device *dev = crtc->dev;
1406 unsigned int pipe = crtc->index;
1407 struct amdgpu_device *adev = drm_to_adev(dev);
1408 int vpos, hpos, stat;
1409 u32 count;
1410
1411 if (pipe >= adev->mode_info.num_crtc) {
1412 DRM_ERROR("Invalid crtc %u\n", pipe);
1413 return -EINVAL;
1414 }
1415
1416 /* The hw increments its frame counter at start of vsync, not at start
1417 * of vblank, as is required by DRM core vblank counter handling.
1418 * Cook the hw count here to make it appear to the caller as if it
1419 * incremented at start of vblank. We measure distance to start of
1420 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1421 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1422 * result by 1 to give the proper appearance to caller.
1423 */
1424 if (adev->mode_info.crtcs[pipe]) {
1425 /* Repeat readout if needed to provide stable result if
1426 * we cross start of vsync during the queries.
1427 */
1428 do {
1429 count = amdgpu_display_vblank_get_counter(adev, pipe);
1430 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1431 * vpos as distance to start of vblank, instead of
1432 * regular vertical scanout pos.
1433 */
1434 stat = amdgpu_display_get_crtc_scanoutpos(
1435 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1436 &vpos, &hpos, NULL, NULL,
1437 &adev->mode_info.crtcs[pipe]->base.hwmode);
1438 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1439
1440 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1441 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1442 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1443 } else {
1444 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1445 pipe, vpos);
1446
1447 /* Bump counter if we are at >= leading edge of vblank,
1448 * but before vsync where vpos would turn negative and
1449 * the hw counter really increments.
1450 */
1451 if (vpos >= 0)
1452 count++;
1453 }
1454 } else {
1455 /* Fallback to use value as is. */
1456 count = amdgpu_display_vblank_get_counter(adev, pipe);
1457 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1458 }
1459
1460 return count;
1461 }
1462
1463 /**
1464 * amdgpu_enable_vblank_kms - enable vblank interrupt
1465 *
1466 * @crtc: crtc to enable vblank interrupt for
1467 *
1468 * Enable the interrupt on the requested crtc (all asics).
1469 * Returns 0 on success, -EINVAL on failure.
1470 */
amdgpu_enable_vblank_kms(struct drm_crtc * crtc)1471 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
1472 {
1473 struct drm_device *dev = crtc->dev;
1474 unsigned int pipe = crtc->index;
1475 struct amdgpu_device *adev = drm_to_adev(dev);
1476 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1477
1478 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1479 }
1480
1481 /**
1482 * amdgpu_disable_vblank_kms - disable vblank interrupt
1483 *
1484 * @crtc: crtc to disable vblank interrupt for
1485 *
1486 * Disable the interrupt on the requested crtc (all asics).
1487 */
amdgpu_disable_vblank_kms(struct drm_crtc * crtc)1488 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
1489 {
1490 struct drm_device *dev = crtc->dev;
1491 unsigned int pipe = crtc->index;
1492 struct amdgpu_device *adev = drm_to_adev(dev);
1493 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1494
1495 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1496 }
1497
1498 /*
1499 * Debugfs info
1500 */
1501 #if defined(CONFIG_DEBUG_FS)
1502
amdgpu_debugfs_firmware_info_show(struct seq_file * m,void * unused)1503 static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
1504 {
1505 struct amdgpu_device *adev = m->private;
1506 struct drm_amdgpu_info_firmware fw_info;
1507 struct drm_amdgpu_query_fw query_fw;
1508 struct atom_context *ctx = adev->mode_info.atom_context;
1509 uint8_t smu_program, smu_major, smu_minor, smu_debug;
1510 int ret, i;
1511
1512 static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
1513 #define TA_FW_NAME(type)[TA_FW_TYPE_PSP_##type] = #type
1514 TA_FW_NAME(XGMI),
1515 TA_FW_NAME(RAS),
1516 TA_FW_NAME(HDCP),
1517 TA_FW_NAME(DTM),
1518 TA_FW_NAME(RAP),
1519 TA_FW_NAME(SECUREDISPLAY),
1520 #undef TA_FW_NAME
1521 };
1522
1523 /* VCE */
1524 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1525 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1526 if (ret)
1527 return ret;
1528 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1529 fw_info.feature, fw_info.ver);
1530
1531 /* UVD */
1532 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1533 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1534 if (ret)
1535 return ret;
1536 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1537 fw_info.feature, fw_info.ver);
1538
1539 /* GMC */
1540 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1541 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1542 if (ret)
1543 return ret;
1544 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1545 fw_info.feature, fw_info.ver);
1546
1547 /* ME */
1548 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1549 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1550 if (ret)
1551 return ret;
1552 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1553 fw_info.feature, fw_info.ver);
1554
1555 /* PFP */
1556 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1557 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1558 if (ret)
1559 return ret;
1560 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1561 fw_info.feature, fw_info.ver);
1562
1563 /* CE */
1564 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1565 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1566 if (ret)
1567 return ret;
1568 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1569 fw_info.feature, fw_info.ver);
1570
1571 /* RLC */
1572 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1573 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1574 if (ret)
1575 return ret;
1576 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1577 fw_info.feature, fw_info.ver);
1578
1579 /* RLC SAVE RESTORE LIST CNTL */
1580 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1581 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1582 if (ret)
1583 return ret;
1584 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1585 fw_info.feature, fw_info.ver);
1586
1587 /* RLC SAVE RESTORE LIST GPM MEM */
1588 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1589 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1590 if (ret)
1591 return ret;
1592 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1593 fw_info.feature, fw_info.ver);
1594
1595 /* RLC SAVE RESTORE LIST SRM MEM */
1596 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1597 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1598 if (ret)
1599 return ret;
1600 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1601 fw_info.feature, fw_info.ver);
1602
1603 /* RLCP */
1604 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
1605 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1606 if (ret)
1607 return ret;
1608 seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
1609 fw_info.feature, fw_info.ver);
1610
1611 /* RLCV */
1612 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
1613 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1614 if (ret)
1615 return ret;
1616 seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
1617 fw_info.feature, fw_info.ver);
1618
1619 /* MEC */
1620 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1621 query_fw.index = 0;
1622 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1623 if (ret)
1624 return ret;
1625 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1626 fw_info.feature, fw_info.ver);
1627
1628 /* MEC2 */
1629 if (adev->gfx.mec2_fw) {
1630 query_fw.index = 1;
1631 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1632 if (ret)
1633 return ret;
1634 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1635 fw_info.feature, fw_info.ver);
1636 }
1637
1638 /* IMU */
1639 query_fw.fw_type = AMDGPU_INFO_FW_IMU;
1640 query_fw.index = 0;
1641 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1642 if (ret)
1643 return ret;
1644 seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
1645 fw_info.feature, fw_info.ver);
1646
1647 /* PSP SOS */
1648 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1649 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1650 if (ret)
1651 return ret;
1652 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1653 fw_info.feature, fw_info.ver);
1654
1655
1656 /* PSP ASD */
1657 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1658 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1659 if (ret)
1660 return ret;
1661 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1662 fw_info.feature, fw_info.ver);
1663
1664 query_fw.fw_type = AMDGPU_INFO_FW_TA;
1665 for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) {
1666 query_fw.index = i;
1667 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1668 if (ret)
1669 continue;
1670
1671 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
1672 ta_fw_name[i], fw_info.feature, fw_info.ver);
1673 }
1674
1675 /* SMC */
1676 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1677 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1678 if (ret)
1679 return ret;
1680 smu_program = (fw_info.ver >> 24) & 0xff;
1681 smu_major = (fw_info.ver >> 16) & 0xff;
1682 smu_minor = (fw_info.ver >> 8) & 0xff;
1683 smu_debug = (fw_info.ver >> 0) & 0xff;
1684 seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
1685 fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
1686
1687 /* SDMA */
1688 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1689 for (i = 0; i < adev->sdma.num_instances; i++) {
1690 query_fw.index = i;
1691 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1692 if (ret)
1693 return ret;
1694 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1695 i, fw_info.feature, fw_info.ver);
1696 }
1697
1698 /* VCN */
1699 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1700 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1701 if (ret)
1702 return ret;
1703 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1704 fw_info.feature, fw_info.ver);
1705
1706 /* DMCU */
1707 query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1708 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1709 if (ret)
1710 return ret;
1711 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1712 fw_info.feature, fw_info.ver);
1713
1714 /* DMCUB */
1715 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1716 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1717 if (ret)
1718 return ret;
1719 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1720 fw_info.feature, fw_info.ver);
1721
1722 /* TOC */
1723 query_fw.fw_type = AMDGPU_INFO_FW_TOC;
1724 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1725 if (ret)
1726 return ret;
1727 seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
1728 fw_info.feature, fw_info.ver);
1729
1730 /* CAP */
1731 if (adev->psp.cap_fw) {
1732 query_fw.fw_type = AMDGPU_INFO_FW_CAP;
1733 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1734 if (ret)
1735 return ret;
1736 seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
1737 fw_info.feature, fw_info.ver);
1738 }
1739
1740 /* MES_KIQ */
1741 query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
1742 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1743 if (ret)
1744 return ret;
1745 seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
1746 fw_info.feature, fw_info.ver);
1747
1748 /* MES */
1749 query_fw.fw_type = AMDGPU_INFO_FW_MES;
1750 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1751 if (ret)
1752 return ret;
1753 seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
1754 fw_info.feature, fw_info.ver);
1755
1756 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn);
1757
1758 return 0;
1759 }
1760
1761 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info);
1762
1763 #endif
1764
amdgpu_debugfs_firmware_init(struct amdgpu_device * adev)1765 void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1766 {
1767 #if defined(CONFIG_DEBUG_FS)
1768 struct drm_minor *minor = adev_to_drm(adev)->primary;
1769 struct dentry *root = minor->debugfs_root;
1770
1771 debugfs_create_file("amdgpu_firmware_info", 0444, root,
1772 adev, &amdgpu_debugfs_firmware_info_fops);
1773
1774 #endif
1775 }
1776