1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/kthread.h>
27 #include <linux/pci.h>
28 #include <linux/uaccess.h>
29 #include <linux/pm_runtime.h>
30 
31 #include <drm/drm_debugfs.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_dm_debugfs.h"
36 #include "amdgpu_ras.h"
37 
38 /**
39  * amdgpu_debugfs_add_files - Add simple debugfs entries
40  *
41  * @adev:  Device to attach debugfs entries to
42  * @files:  Array of function callbacks that respond to reads
43  * @nfiles: Number of callbacks to register
44  *
45  */
46 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
47 			     const struct drm_info_list *files,
48 			     unsigned nfiles)
49 {
50 	unsigned i;
51 
52 	for (i = 0; i < adev->debugfs_count; i++) {
53 		if (adev->debugfs[i].files == files) {
54 			/* Already registered */
55 			return 0;
56 		}
57 	}
58 
59 	i = adev->debugfs_count + 1;
60 	if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
61 		DRM_ERROR("Reached maximum number of debugfs components.\n");
62 		DRM_ERROR("Report so we increase "
63 			  "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
64 		return -EINVAL;
65 	}
66 	adev->debugfs[adev->debugfs_count].files = files;
67 	adev->debugfs[adev->debugfs_count].num_files = nfiles;
68 	adev->debugfs_count = i;
69 #if defined(CONFIG_DEBUG_FS)
70 	drm_debugfs_create_files(files, nfiles,
71 				 adev->ddev->primary->debugfs_root,
72 				 adev->ddev->primary);
73 #endif
74 	return 0;
75 }
76 
77 #if defined(CONFIG_DEBUG_FS)
78 
79 /**
80  * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
81  *
82  * @read: True if reading
83  * @f: open file handle
84  * @buf: User buffer to write/read to
85  * @size: Number of bytes to write/read
86  * @pos:  Offset to seek to
87  *
88  * This debugfs entry has special meaning on the offset being sought.
89  * Various bits have different meanings:
90  *
91  * Bit 62:  Indicates a GRBM bank switch is needed
92  * Bit 61:  Indicates a SRBM bank switch is needed (implies bit 62 is
93  * 			zero)
94  * Bits 24..33: The SE or ME selector if needed
95  * Bits 34..43: The SH (or SA) or PIPE selector if needed
96  * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
97  *
98  * Bit 23:  Indicates that the PM power gating lock should be held
99  * 			This is necessary to read registers that might be
100  * 			unreliable during a power gating transistion.
101  *
102  * The lower bits are the BYTE offset of the register to read.  This
103  * allows reading multiple registers in a single call and having
104  * the returned size reflect that.
105  */
106 static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
107 		char __user *buf, size_t size, loff_t *pos)
108 {
109 	struct amdgpu_device *adev = file_inode(f)->i_private;
110 	ssize_t result = 0;
111 	int r;
112 	bool pm_pg_lock, use_bank, use_ring;
113 	unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
114 
115 	pm_pg_lock = use_bank = use_ring = false;
116 	instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
117 
118 	if (size & 0x3 || *pos & 0x3 ||
119 			((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
120 		return -EINVAL;
121 
122 	/* are we reading registers for which a PG lock is necessary? */
123 	pm_pg_lock = (*pos >> 23) & 1;
124 
125 	if (*pos & (1ULL << 62)) {
126 		se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
127 		sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
128 		instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
129 
130 		if (se_bank == 0x3FF)
131 			se_bank = 0xFFFFFFFF;
132 		if (sh_bank == 0x3FF)
133 			sh_bank = 0xFFFFFFFF;
134 		if (instance_bank == 0x3FF)
135 			instance_bank = 0xFFFFFFFF;
136 		use_bank = true;
137 	} else if (*pos & (1ULL << 61)) {
138 
139 		me = (*pos & GENMASK_ULL(33, 24)) >> 24;
140 		pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
141 		queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
142 		vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
143 
144 		use_ring = true;
145 	} else {
146 		use_bank = use_ring = false;
147 	}
148 
149 	*pos &= (1UL << 22) - 1;
150 
151 	r = pm_runtime_get_sync(adev->ddev->dev);
152 	if (r < 0)
153 		return r;
154 
155 	if (use_bank) {
156 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
157 		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
158 			pm_runtime_mark_last_busy(adev->ddev->dev);
159 			pm_runtime_put_autosuspend(adev->ddev->dev);
160 			return -EINVAL;
161 		}
162 		mutex_lock(&adev->grbm_idx_mutex);
163 		amdgpu_gfx_select_se_sh(adev, se_bank,
164 					sh_bank, instance_bank);
165 	} else if (use_ring) {
166 		mutex_lock(&adev->srbm_mutex);
167 		amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
168 	}
169 
170 	if (pm_pg_lock)
171 		mutex_lock(&adev->pm.mutex);
172 
173 	while (size) {
174 		uint32_t value;
175 
176 		if (read) {
177 			value = RREG32(*pos >> 2);
178 			r = put_user(value, (uint32_t *)buf);
179 		} else {
180 			r = get_user(value, (uint32_t *)buf);
181 			if (!r)
182 				amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
183 		}
184 		if (r) {
185 			result = r;
186 			goto end;
187 		}
188 
189 		result += 4;
190 		buf += 4;
191 		*pos += 4;
192 		size -= 4;
193 	}
194 
195 end:
196 	if (use_bank) {
197 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
198 		mutex_unlock(&adev->grbm_idx_mutex);
199 	} else if (use_ring) {
200 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
201 		mutex_unlock(&adev->srbm_mutex);
202 	}
203 
204 	if (pm_pg_lock)
205 		mutex_unlock(&adev->pm.mutex);
206 
207 	pm_runtime_mark_last_busy(adev->ddev->dev);
208 	pm_runtime_put_autosuspend(adev->ddev->dev);
209 
210 	return result;
211 }
212 
213 /**
214  * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
215  */
216 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
217 					size_t size, loff_t *pos)
218 {
219 	return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
220 }
221 
222 /**
223  * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
224  */
225 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
226 					 size_t size, loff_t *pos)
227 {
228 	return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
229 }
230 
231 
232 /**
233  * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
234  *
235  * @f: open file handle
236  * @buf: User buffer to store read data in
237  * @size: Number of bytes to read
238  * @pos:  Offset to seek to
239  *
240  * The lower bits are the BYTE offset of the register to read.  This
241  * allows reading multiple registers in a single call and having
242  * the returned size reflect that.
243  */
244 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
245 					size_t size, loff_t *pos)
246 {
247 	struct amdgpu_device *adev = file_inode(f)->i_private;
248 	ssize_t result = 0;
249 	int r;
250 
251 	if (size & 0x3 || *pos & 0x3)
252 		return -EINVAL;
253 
254 	r = pm_runtime_get_sync(adev->ddev->dev);
255 	if (r < 0)
256 		return r;
257 
258 	while (size) {
259 		uint32_t value;
260 
261 		value = RREG32_PCIE(*pos >> 2);
262 		r = put_user(value, (uint32_t *)buf);
263 		if (r) {
264 			pm_runtime_mark_last_busy(adev->ddev->dev);
265 			pm_runtime_put_autosuspend(adev->ddev->dev);
266 			return r;
267 		}
268 
269 		result += 4;
270 		buf += 4;
271 		*pos += 4;
272 		size -= 4;
273 	}
274 
275 	pm_runtime_mark_last_busy(adev->ddev->dev);
276 	pm_runtime_put_autosuspend(adev->ddev->dev);
277 
278 	return result;
279 }
280 
281 /**
282  * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
283  *
284  * @f: open file handle
285  * @buf: User buffer to write data from
286  * @size: Number of bytes to write
287  * @pos:  Offset to seek to
288  *
289  * The lower bits are the BYTE offset of the register to write.  This
290  * allows writing multiple registers in a single call and having
291  * the returned size reflect that.
292  */
293 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
294 					 size_t size, loff_t *pos)
295 {
296 	struct amdgpu_device *adev = file_inode(f)->i_private;
297 	ssize_t result = 0;
298 	int r;
299 
300 	if (size & 0x3 || *pos & 0x3)
301 		return -EINVAL;
302 
303 	r = pm_runtime_get_sync(adev->ddev->dev);
304 	if (r < 0)
305 		return r;
306 
307 	while (size) {
308 		uint32_t value;
309 
310 		r = get_user(value, (uint32_t *)buf);
311 		if (r) {
312 			pm_runtime_mark_last_busy(adev->ddev->dev);
313 			pm_runtime_put_autosuspend(adev->ddev->dev);
314 			return r;
315 		}
316 
317 		WREG32_PCIE(*pos >> 2, value);
318 
319 		result += 4;
320 		buf += 4;
321 		*pos += 4;
322 		size -= 4;
323 	}
324 
325 	pm_runtime_mark_last_busy(adev->ddev->dev);
326 	pm_runtime_put_autosuspend(adev->ddev->dev);
327 
328 	return result;
329 }
330 
331 /**
332  * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
333  *
334  * @f: open file handle
335  * @buf: User buffer to store read data in
336  * @size: Number of bytes to read
337  * @pos:  Offset to seek to
338  *
339  * The lower bits are the BYTE offset of the register to read.  This
340  * allows reading multiple registers in a single call and having
341  * the returned size reflect that.
342  */
343 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
344 					size_t size, loff_t *pos)
345 {
346 	struct amdgpu_device *adev = file_inode(f)->i_private;
347 	ssize_t result = 0;
348 	int r;
349 
350 	if (size & 0x3 || *pos & 0x3)
351 		return -EINVAL;
352 
353 	r = pm_runtime_get_sync(adev->ddev->dev);
354 	if (r < 0)
355 		return r;
356 
357 	while (size) {
358 		uint32_t value;
359 
360 		value = RREG32_DIDT(*pos >> 2);
361 		r = put_user(value, (uint32_t *)buf);
362 		if (r) {
363 			pm_runtime_mark_last_busy(adev->ddev->dev);
364 			pm_runtime_put_autosuspend(adev->ddev->dev);
365 			return r;
366 		}
367 
368 		result += 4;
369 		buf += 4;
370 		*pos += 4;
371 		size -= 4;
372 	}
373 
374 	pm_runtime_mark_last_busy(adev->ddev->dev);
375 	pm_runtime_put_autosuspend(adev->ddev->dev);
376 
377 	return result;
378 }
379 
380 /**
381  * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
382  *
383  * @f: open file handle
384  * @buf: User buffer to write data from
385  * @size: Number of bytes to write
386  * @pos:  Offset to seek to
387  *
388  * The lower bits are the BYTE offset of the register to write.  This
389  * allows writing multiple registers in a single call and having
390  * the returned size reflect that.
391  */
392 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
393 					 size_t size, loff_t *pos)
394 {
395 	struct amdgpu_device *adev = file_inode(f)->i_private;
396 	ssize_t result = 0;
397 	int r;
398 
399 	if (size & 0x3 || *pos & 0x3)
400 		return -EINVAL;
401 
402 	r = pm_runtime_get_sync(adev->ddev->dev);
403 	if (r < 0)
404 		return r;
405 
406 	while (size) {
407 		uint32_t value;
408 
409 		r = get_user(value, (uint32_t *)buf);
410 		if (r) {
411 			pm_runtime_mark_last_busy(adev->ddev->dev);
412 			pm_runtime_put_autosuspend(adev->ddev->dev);
413 			return r;
414 		}
415 
416 		WREG32_DIDT(*pos >> 2, value);
417 
418 		result += 4;
419 		buf += 4;
420 		*pos += 4;
421 		size -= 4;
422 	}
423 
424 	pm_runtime_mark_last_busy(adev->ddev->dev);
425 	pm_runtime_put_autosuspend(adev->ddev->dev);
426 
427 	return result;
428 }
429 
430 /**
431  * amdgpu_debugfs_regs_smc_read - Read from a SMC register
432  *
433  * @f: open file handle
434  * @buf: User buffer to store read data in
435  * @size: Number of bytes to read
436  * @pos:  Offset to seek to
437  *
438  * The lower bits are the BYTE offset of the register to read.  This
439  * allows reading multiple registers in a single call and having
440  * the returned size reflect that.
441  */
442 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
443 					size_t size, loff_t *pos)
444 {
445 	struct amdgpu_device *adev = file_inode(f)->i_private;
446 	ssize_t result = 0;
447 	int r;
448 
449 	if (size & 0x3 || *pos & 0x3)
450 		return -EINVAL;
451 
452 	r = pm_runtime_get_sync(adev->ddev->dev);
453 	if (r < 0)
454 		return r;
455 
456 	while (size) {
457 		uint32_t value;
458 
459 		value = RREG32_SMC(*pos);
460 		r = put_user(value, (uint32_t *)buf);
461 		if (r) {
462 			pm_runtime_mark_last_busy(adev->ddev->dev);
463 			pm_runtime_put_autosuspend(adev->ddev->dev);
464 			return r;
465 		}
466 
467 		result += 4;
468 		buf += 4;
469 		*pos += 4;
470 		size -= 4;
471 	}
472 
473 	pm_runtime_mark_last_busy(adev->ddev->dev);
474 	pm_runtime_put_autosuspend(adev->ddev->dev);
475 
476 	return result;
477 }
478 
479 /**
480  * amdgpu_debugfs_regs_smc_write - Write to a SMC register
481  *
482  * @f: open file handle
483  * @buf: User buffer to write data from
484  * @size: Number of bytes to write
485  * @pos:  Offset to seek to
486  *
487  * The lower bits are the BYTE offset of the register to write.  This
488  * allows writing multiple registers in a single call and having
489  * the returned size reflect that.
490  */
491 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
492 					 size_t size, loff_t *pos)
493 {
494 	struct amdgpu_device *adev = file_inode(f)->i_private;
495 	ssize_t result = 0;
496 	int r;
497 
498 	if (size & 0x3 || *pos & 0x3)
499 		return -EINVAL;
500 
501 	r = pm_runtime_get_sync(adev->ddev->dev);
502 	if (r < 0)
503 		return r;
504 
505 	while (size) {
506 		uint32_t value;
507 
508 		r = get_user(value, (uint32_t *)buf);
509 		if (r) {
510 			pm_runtime_mark_last_busy(adev->ddev->dev);
511 			pm_runtime_put_autosuspend(adev->ddev->dev);
512 			return r;
513 		}
514 
515 		WREG32_SMC(*pos, value);
516 
517 		result += 4;
518 		buf += 4;
519 		*pos += 4;
520 		size -= 4;
521 	}
522 
523 	pm_runtime_mark_last_busy(adev->ddev->dev);
524 	pm_runtime_put_autosuspend(adev->ddev->dev);
525 
526 	return result;
527 }
528 
529 /**
530  * amdgpu_debugfs_gca_config_read - Read from gfx config data
531  *
532  * @f: open file handle
533  * @buf: User buffer to store read data in
534  * @size: Number of bytes to read
535  * @pos:  Offset to seek to
536  *
537  * This file is used to access configuration data in a somewhat
538  * stable fashion.  The format is a series of DWORDs with the first
539  * indicating which revision it is.  New content is appended to the
540  * end so that older software can still read the data.
541  */
542 
543 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
544 					size_t size, loff_t *pos)
545 {
546 	struct amdgpu_device *adev = file_inode(f)->i_private;
547 	ssize_t result = 0;
548 	int r;
549 	uint32_t *config, no_regs = 0;
550 
551 	if (size & 0x3 || *pos & 0x3)
552 		return -EINVAL;
553 
554 	config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
555 	if (!config)
556 		return -ENOMEM;
557 
558 	/* version, increment each time something is added */
559 	config[no_regs++] = 3;
560 	config[no_regs++] = adev->gfx.config.max_shader_engines;
561 	config[no_regs++] = adev->gfx.config.max_tile_pipes;
562 	config[no_regs++] = adev->gfx.config.max_cu_per_sh;
563 	config[no_regs++] = adev->gfx.config.max_sh_per_se;
564 	config[no_regs++] = adev->gfx.config.max_backends_per_se;
565 	config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
566 	config[no_regs++] = adev->gfx.config.max_gprs;
567 	config[no_regs++] = adev->gfx.config.max_gs_threads;
568 	config[no_regs++] = adev->gfx.config.max_hw_contexts;
569 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
570 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
571 	config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
572 	config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
573 	config[no_regs++] = adev->gfx.config.num_tile_pipes;
574 	config[no_regs++] = adev->gfx.config.backend_enable_mask;
575 	config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
576 	config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
577 	config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
578 	config[no_regs++] = adev->gfx.config.num_gpus;
579 	config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
580 	config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
581 	config[no_regs++] = adev->gfx.config.gb_addr_config;
582 	config[no_regs++] = adev->gfx.config.num_rbs;
583 
584 	/* rev==1 */
585 	config[no_regs++] = adev->rev_id;
586 	config[no_regs++] = adev->pg_flags;
587 	config[no_regs++] = adev->cg_flags;
588 
589 	/* rev==2 */
590 	config[no_regs++] = adev->family;
591 	config[no_regs++] = adev->external_rev_id;
592 
593 	/* rev==3 */
594 	config[no_regs++] = adev->pdev->device;
595 	config[no_regs++] = adev->pdev->revision;
596 	config[no_regs++] = adev->pdev->subsystem_device;
597 	config[no_regs++] = adev->pdev->subsystem_vendor;
598 
599 	while (size && (*pos < no_regs * 4)) {
600 		uint32_t value;
601 
602 		value = config[*pos >> 2];
603 		r = put_user(value, (uint32_t *)buf);
604 		if (r) {
605 			kfree(config);
606 			return r;
607 		}
608 
609 		result += 4;
610 		buf += 4;
611 		*pos += 4;
612 		size -= 4;
613 	}
614 
615 	kfree(config);
616 	return result;
617 }
618 
619 /**
620  * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
621  *
622  * @f: open file handle
623  * @buf: User buffer to store read data in
624  * @size: Number of bytes to read
625  * @pos:  Offset to seek to
626  *
627  * The offset is treated as the BYTE address of one of the sensors
628  * enumerated in amd/include/kgd_pp_interface.h under the
629  * 'amd_pp_sensors' enumeration.  For instance to read the UVD VCLK
630  * you would use the offset 3 * 4 = 12.
631  */
632 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
633 					size_t size, loff_t *pos)
634 {
635 	struct amdgpu_device *adev = file_inode(f)->i_private;
636 	int idx, x, outsize, r, valuesize;
637 	uint32_t values[16];
638 
639 	if (size & 3 || *pos & 0x3)
640 		return -EINVAL;
641 
642 	if (!adev->pm.dpm_enabled)
643 		return -EINVAL;
644 
645 	/* convert offset to sensor number */
646 	idx = *pos >> 2;
647 
648 	valuesize = sizeof(values);
649 
650 	r = pm_runtime_get_sync(adev->ddev->dev);
651 	if (r < 0)
652 		return r;
653 
654 	r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
655 
656 	pm_runtime_mark_last_busy(adev->ddev->dev);
657 	pm_runtime_put_autosuspend(adev->ddev->dev);
658 
659 	if (r)
660 		return r;
661 
662 	if (size > valuesize)
663 		return -EINVAL;
664 
665 	outsize = 0;
666 	x = 0;
667 	if (!r) {
668 		while (size) {
669 			r = put_user(values[x++], (int32_t *)buf);
670 			buf += 4;
671 			size -= 4;
672 			outsize += 4;
673 		}
674 	}
675 
676 	return !r ? outsize : r;
677 }
678 
679 /** amdgpu_debugfs_wave_read - Read WAVE STATUS data
680  *
681  * @f: open file handle
682  * @buf: User buffer to store read data in
683  * @size: Number of bytes to read
684  * @pos:  Offset to seek to
685  *
686  * The offset being sought changes which wave that the status data
687  * will be returned for.  The bits are used as follows:
688  *
689  * Bits 0..6: 	Byte offset into data
690  * Bits 7..14:	SE selector
691  * Bits 15..22:	SH/SA selector
692  * Bits 23..30: CU/{WGP+SIMD} selector
693  * Bits 31..36: WAVE ID selector
694  * Bits 37..44: SIMD ID selector
695  *
696  * The returned data begins with one DWORD of version information
697  * Followed by WAVE STATUS registers relevant to the GFX IP version
698  * being used.  See gfx_v8_0_read_wave_data() for an example output.
699  */
700 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
701 					size_t size, loff_t *pos)
702 {
703 	struct amdgpu_device *adev = f->f_inode->i_private;
704 	int r, x;
705 	ssize_t result=0;
706 	uint32_t offset, se, sh, cu, wave, simd, data[32];
707 
708 	if (size & 3 || *pos & 3)
709 		return -EINVAL;
710 
711 	/* decode offset */
712 	offset = (*pos & GENMASK_ULL(6, 0));
713 	se = (*pos & GENMASK_ULL(14, 7)) >> 7;
714 	sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
715 	cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
716 	wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
717 	simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
718 
719 	r = pm_runtime_get_sync(adev->ddev->dev);
720 	if (r < 0)
721 		return r;
722 
723 	/* switch to the specific se/sh/cu */
724 	mutex_lock(&adev->grbm_idx_mutex);
725 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
726 
727 	x = 0;
728 	if (adev->gfx.funcs->read_wave_data)
729 		adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
730 
731 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
732 	mutex_unlock(&adev->grbm_idx_mutex);
733 
734 	pm_runtime_mark_last_busy(adev->ddev->dev);
735 	pm_runtime_put_autosuspend(adev->ddev->dev);
736 
737 	if (!x)
738 		return -EINVAL;
739 
740 	while (size && (offset < x * 4)) {
741 		uint32_t value;
742 
743 		value = data[offset >> 2];
744 		r = put_user(value, (uint32_t *)buf);
745 		if (r)
746 			return r;
747 
748 		result += 4;
749 		buf += 4;
750 		offset += 4;
751 		size -= 4;
752 	}
753 
754 	return result;
755 }
756 
757 /** amdgpu_debugfs_gpr_read - Read wave gprs
758  *
759  * @f: open file handle
760  * @buf: User buffer to store read data in
761  * @size: Number of bytes to read
762  * @pos:  Offset to seek to
763  *
764  * The offset being sought changes which wave that the status data
765  * will be returned for.  The bits are used as follows:
766  *
767  * Bits 0..11:	Byte offset into data
768  * Bits 12..19:	SE selector
769  * Bits 20..27:	SH/SA selector
770  * Bits 28..35: CU/{WGP+SIMD} selector
771  * Bits 36..43: WAVE ID selector
772  * Bits 37..44: SIMD ID selector
773  * Bits 52..59: Thread selector
774  * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
775  *
776  * The return data comes from the SGPR or VGPR register bank for
777  * the selected operational unit.
778  */
779 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
780 					size_t size, loff_t *pos)
781 {
782 	struct amdgpu_device *adev = f->f_inode->i_private;
783 	int r;
784 	ssize_t result = 0;
785 	uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
786 
787 	if (size > 4096 || size & 3 || *pos & 3)
788 		return -EINVAL;
789 
790 	/* decode offset */
791 	offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
792 	se = (*pos & GENMASK_ULL(19, 12)) >> 12;
793 	sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
794 	cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
795 	wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
796 	simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
797 	thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
798 	bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
799 
800 	data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
801 	if (!data)
802 		return -ENOMEM;
803 
804 	r = pm_runtime_get_sync(adev->ddev->dev);
805 	if (r < 0)
806 		return r;
807 
808 	/* switch to the specific se/sh/cu */
809 	mutex_lock(&adev->grbm_idx_mutex);
810 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
811 
812 	if (bank == 0) {
813 		if (adev->gfx.funcs->read_wave_vgprs)
814 			adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
815 	} else {
816 		if (adev->gfx.funcs->read_wave_sgprs)
817 			adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
818 	}
819 
820 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
821 	mutex_unlock(&adev->grbm_idx_mutex);
822 
823 	pm_runtime_mark_last_busy(adev->ddev->dev);
824 	pm_runtime_put_autosuspend(adev->ddev->dev);
825 
826 	while (size) {
827 		uint32_t value;
828 
829 		value = data[result >> 2];
830 		r = put_user(value, (uint32_t *)buf);
831 		if (r) {
832 			result = r;
833 			goto err;
834 		}
835 
836 		result += 4;
837 		buf += 4;
838 		size -= 4;
839 	}
840 
841 err:
842 	kfree(data);
843 	return result;
844 }
845 
846 /**
847  * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
848  *
849  * @f: open file handle
850  * @buf: User buffer to write data from
851  * @size: Number of bytes to write
852  * @pos:  Offset to seek to
853  *
854  * Write a 32-bit zero to disable or a 32-bit non-zero to enable
855  */
856 static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
857 					 size_t size, loff_t *pos)
858 {
859 	struct amdgpu_device *adev = file_inode(f)->i_private;
860 	ssize_t result = 0;
861 	int r;
862 
863 	if (size & 0x3 || *pos & 0x3)
864 		return -EINVAL;
865 
866 	r = pm_runtime_get_sync(adev->ddev->dev);
867 	if (r < 0)
868 		return r;
869 
870 	while (size) {
871 		uint32_t value;
872 
873 		r = get_user(value, (uint32_t *)buf);
874 		if (r) {
875 			pm_runtime_mark_last_busy(adev->ddev->dev);
876 			pm_runtime_put_autosuspend(adev->ddev->dev);
877 			return r;
878 		}
879 
880 		amdgpu_gfx_off_ctrl(adev, value ? true : false);
881 
882 		result += 4;
883 		buf += 4;
884 		*pos += 4;
885 		size -= 4;
886 	}
887 
888 	pm_runtime_mark_last_busy(adev->ddev->dev);
889 	pm_runtime_put_autosuspend(adev->ddev->dev);
890 
891 	return result;
892 }
893 
894 
895 static const struct file_operations amdgpu_debugfs_regs_fops = {
896 	.owner = THIS_MODULE,
897 	.read = amdgpu_debugfs_regs_read,
898 	.write = amdgpu_debugfs_regs_write,
899 	.llseek = default_llseek
900 };
901 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
902 	.owner = THIS_MODULE,
903 	.read = amdgpu_debugfs_regs_didt_read,
904 	.write = amdgpu_debugfs_regs_didt_write,
905 	.llseek = default_llseek
906 };
907 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
908 	.owner = THIS_MODULE,
909 	.read = amdgpu_debugfs_regs_pcie_read,
910 	.write = amdgpu_debugfs_regs_pcie_write,
911 	.llseek = default_llseek
912 };
913 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
914 	.owner = THIS_MODULE,
915 	.read = amdgpu_debugfs_regs_smc_read,
916 	.write = amdgpu_debugfs_regs_smc_write,
917 	.llseek = default_llseek
918 };
919 
920 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
921 	.owner = THIS_MODULE,
922 	.read = amdgpu_debugfs_gca_config_read,
923 	.llseek = default_llseek
924 };
925 
926 static const struct file_operations amdgpu_debugfs_sensors_fops = {
927 	.owner = THIS_MODULE,
928 	.read = amdgpu_debugfs_sensor_read,
929 	.llseek = default_llseek
930 };
931 
932 static const struct file_operations amdgpu_debugfs_wave_fops = {
933 	.owner = THIS_MODULE,
934 	.read = amdgpu_debugfs_wave_read,
935 	.llseek = default_llseek
936 };
937 static const struct file_operations amdgpu_debugfs_gpr_fops = {
938 	.owner = THIS_MODULE,
939 	.read = amdgpu_debugfs_gpr_read,
940 	.llseek = default_llseek
941 };
942 
943 static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
944 	.owner = THIS_MODULE,
945 	.write = amdgpu_debugfs_gfxoff_write,
946 };
947 
948 static const struct file_operations *debugfs_regs[] = {
949 	&amdgpu_debugfs_regs_fops,
950 	&amdgpu_debugfs_regs_didt_fops,
951 	&amdgpu_debugfs_regs_pcie_fops,
952 	&amdgpu_debugfs_regs_smc_fops,
953 	&amdgpu_debugfs_gca_config_fops,
954 	&amdgpu_debugfs_sensors_fops,
955 	&amdgpu_debugfs_wave_fops,
956 	&amdgpu_debugfs_gpr_fops,
957 	&amdgpu_debugfs_gfxoff_fops,
958 };
959 
960 static const char *debugfs_regs_names[] = {
961 	"amdgpu_regs",
962 	"amdgpu_regs_didt",
963 	"amdgpu_regs_pcie",
964 	"amdgpu_regs_smc",
965 	"amdgpu_gca_config",
966 	"amdgpu_sensors",
967 	"amdgpu_wave",
968 	"amdgpu_gpr",
969 	"amdgpu_gfxoff",
970 };
971 
972 /**
973  * amdgpu_debugfs_regs_init -	Initialize debugfs entries that provide
974  * 								register access.
975  *
976  * @adev: The device to attach the debugfs entries to
977  */
978 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
979 {
980 	struct drm_minor *minor = adev->ddev->primary;
981 	struct dentry *ent, *root = minor->debugfs_root;
982 	unsigned int i;
983 
984 	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
985 		ent = debugfs_create_file(debugfs_regs_names[i],
986 					  S_IFREG | S_IRUGO, root,
987 					  adev, debugfs_regs[i]);
988 		if (!i && !IS_ERR_OR_NULL(ent))
989 			i_size_write(ent->d_inode, adev->rmmio_size);
990 		adev->debugfs_regs[i] = ent;
991 	}
992 
993 	return 0;
994 }
995 
996 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
997 {
998 	struct drm_info_node *node = (struct drm_info_node *) m->private;
999 	struct drm_device *dev = node->minor->dev;
1000 	struct amdgpu_device *adev = dev->dev_private;
1001 	int r = 0, i;
1002 
1003 	r = pm_runtime_get_sync(dev->dev);
1004 	if (r < 0)
1005 		return r;
1006 
1007 	/* Avoid accidently unparking the sched thread during GPU reset */
1008 	mutex_lock(&adev->lock_reset);
1009 
1010 	/* hold on the scheduler */
1011 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1012 		struct amdgpu_ring *ring = adev->rings[i];
1013 
1014 		if (!ring || !ring->sched.thread)
1015 			continue;
1016 		kthread_park(ring->sched.thread);
1017 	}
1018 
1019 	seq_printf(m, "run ib test:\n");
1020 	r = amdgpu_ib_ring_tests(adev);
1021 	if (r)
1022 		seq_printf(m, "ib ring tests failed (%d).\n", r);
1023 	else
1024 		seq_printf(m, "ib ring tests passed.\n");
1025 
1026 	/* go on the scheduler */
1027 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1028 		struct amdgpu_ring *ring = adev->rings[i];
1029 
1030 		if (!ring || !ring->sched.thread)
1031 			continue;
1032 		kthread_unpark(ring->sched.thread);
1033 	}
1034 
1035 	mutex_unlock(&adev->lock_reset);
1036 
1037 	pm_runtime_mark_last_busy(dev->dev);
1038 	pm_runtime_put_autosuspend(dev->dev);
1039 
1040 	return 0;
1041 }
1042 
1043 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
1044 {
1045 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1046 	struct drm_device *dev = node->minor->dev;
1047 	struct amdgpu_device *adev = dev->dev_private;
1048 
1049 	seq_write(m, adev->bios, adev->bios_size);
1050 	return 0;
1051 }
1052 
1053 static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
1054 {
1055 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1056 	struct drm_device *dev = node->minor->dev;
1057 	struct amdgpu_device *adev = dev->dev_private;
1058 	int r;
1059 
1060 	r = pm_runtime_get_sync(dev->dev);
1061 	if (r < 0)
1062 		return r;
1063 
1064 	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
1065 
1066 	pm_runtime_mark_last_busy(dev->dev);
1067 	pm_runtime_put_autosuspend(dev->dev);
1068 
1069 	return 0;
1070 }
1071 
1072 static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
1073 {
1074 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1075 	struct drm_device *dev = node->minor->dev;
1076 	struct amdgpu_device *adev = dev->dev_private;
1077 	int r;
1078 
1079 	r = pm_runtime_get_sync(dev->dev);
1080 	if (r < 0)
1081 		return r;
1082 
1083 	seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
1084 
1085 	pm_runtime_mark_last_busy(dev->dev);
1086 	pm_runtime_put_autosuspend(dev->dev);
1087 
1088 	return 0;
1089 }
1090 
1091 static const struct drm_info_list amdgpu_debugfs_list[] = {
1092 	{"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
1093 	{"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
1094 	{"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
1095 	{"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
1096 };
1097 
1098 static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1099 					  struct dma_fence **fences)
1100 {
1101 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
1102 	uint32_t sync_seq, last_seq;
1103 
1104 	last_seq = atomic_read(&ring->fence_drv.last_seq);
1105 	sync_seq = ring->fence_drv.sync_seq;
1106 
1107 	last_seq &= drv->num_fences_mask;
1108 	sync_seq &= drv->num_fences_mask;
1109 
1110 	do {
1111 		struct dma_fence *fence, **ptr;
1112 
1113 		++last_seq;
1114 		last_seq &= drv->num_fences_mask;
1115 		ptr = &drv->fences[last_seq];
1116 
1117 		fence = rcu_dereference_protected(*ptr, 1);
1118 		RCU_INIT_POINTER(*ptr, NULL);
1119 
1120 		if (!fence)
1121 			continue;
1122 
1123 		fences[last_seq] = fence;
1124 
1125 	} while (last_seq != sync_seq);
1126 }
1127 
1128 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1129 					    int length)
1130 {
1131 	int i;
1132 	struct dma_fence *fence;
1133 
1134 	for (i = 0; i < length; i++) {
1135 		fence = fences[i];
1136 		if (!fence)
1137 			continue;
1138 		dma_fence_signal(fence);
1139 		dma_fence_put(fence);
1140 	}
1141 }
1142 
1143 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1144 {
1145 	struct drm_sched_job *s_job;
1146 	struct dma_fence *fence;
1147 
1148 	spin_lock(&sched->job_list_lock);
1149 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
1150 		fence = sched->ops->run_job(s_job);
1151 		dma_fence_put(fence);
1152 	}
1153 	spin_unlock(&sched->job_list_lock);
1154 }
1155 
1156 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1157 {
1158 	struct amdgpu_job *job;
1159 	struct drm_sched_job *s_job;
1160 	uint32_t preempt_seq;
1161 	struct dma_fence *fence, **ptr;
1162 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
1163 	struct drm_gpu_scheduler *sched = &ring->sched;
1164 
1165 	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1166 		return;
1167 
1168 	preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1169 	if (preempt_seq <= atomic_read(&drv->last_seq))
1170 		return;
1171 
1172 	preempt_seq &= drv->num_fences_mask;
1173 	ptr = &drv->fences[preempt_seq];
1174 	fence = rcu_dereference_protected(*ptr, 1);
1175 
1176 	spin_lock(&sched->job_list_lock);
1177 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
1178 		job = to_amdgpu_job(s_job);
1179 		if (job->fence == fence)
1180 			/* mark the job as preempted */
1181 			job->preemption_status |= AMDGPU_IB_PREEMPTED;
1182 	}
1183 	spin_unlock(&sched->job_list_lock);
1184 }
1185 
1186 static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1187 {
1188 	int r, resched, length;
1189 	struct amdgpu_ring *ring;
1190 	struct dma_fence **fences = NULL;
1191 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1192 
1193 	if (val >= AMDGPU_MAX_RINGS)
1194 		return -EINVAL;
1195 
1196 	ring = adev->rings[val];
1197 
1198 	if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
1199 		return -EINVAL;
1200 
1201 	/* the last preemption failed */
1202 	if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1203 		return -EBUSY;
1204 
1205 	length = ring->fence_drv.num_fences_mask + 1;
1206 	fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1207 	if (!fences)
1208 		return -ENOMEM;
1209 
1210 	/* Avoid accidently unparking the sched thread during GPU reset */
1211 	mutex_lock(&adev->lock_reset);
1212 
1213 	/* stop the scheduler */
1214 	kthread_park(ring->sched.thread);
1215 
1216 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1217 
1218 	/* preempt the IB */
1219 	r = amdgpu_ring_preempt_ib(ring);
1220 	if (r) {
1221 		DRM_WARN("failed to preempt ring %d\n", ring->idx);
1222 		goto failure;
1223 	}
1224 
1225 	amdgpu_fence_process(ring);
1226 
1227 	if (atomic_read(&ring->fence_drv.last_seq) !=
1228 	    ring->fence_drv.sync_seq) {
1229 		DRM_INFO("ring %d was preempted\n", ring->idx);
1230 
1231 		amdgpu_ib_preempt_mark_partial_job(ring);
1232 
1233 		/* swap out the old fences */
1234 		amdgpu_ib_preempt_fences_swap(ring, fences);
1235 
1236 		amdgpu_fence_driver_force_completion(ring);
1237 
1238 		/* resubmit unfinished jobs */
1239 		amdgpu_ib_preempt_job_recovery(&ring->sched);
1240 
1241 		/* wait for jobs finished */
1242 		amdgpu_fence_wait_empty(ring);
1243 
1244 		/* signal the old fences */
1245 		amdgpu_ib_preempt_signal_fences(fences, length);
1246 	}
1247 
1248 failure:
1249 	/* restart the scheduler */
1250 	kthread_unpark(ring->sched.thread);
1251 
1252 	mutex_unlock(&adev->lock_reset);
1253 
1254 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1255 
1256 	kfree(fences);
1257 
1258 	return 0;
1259 }
1260 
1261 static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1262 {
1263 	int ret = 0;
1264 	uint32_t max_freq, min_freq;
1265 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1266 
1267 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1268 		return -EINVAL;
1269 
1270 	ret = pm_runtime_get_sync(adev->ddev->dev);
1271 	if (ret < 0)
1272 		return ret;
1273 
1274 	if (is_support_sw_smu(adev)) {
1275 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
1276 		if (ret || val > max_freq || val < min_freq)
1277 			return -EINVAL;
1278 		ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
1279 	} else {
1280 		return 0;
1281 	}
1282 
1283 	pm_runtime_mark_last_busy(adev->ddev->dev);
1284 	pm_runtime_put_autosuspend(adev->ddev->dev);
1285 
1286 	if (ret)
1287 		return -EINVAL;
1288 
1289 	return 0;
1290 }
1291 
1292 DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
1293 			amdgpu_debugfs_ib_preempt, "%llu\n");
1294 
1295 DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
1296 			amdgpu_debugfs_sclk_set, "%llu\n");
1297 
1298 int amdgpu_debugfs_init(struct amdgpu_device *adev)
1299 {
1300 	int r, i;
1301 
1302 	adev->debugfs_preempt =
1303 		debugfs_create_file("amdgpu_preempt_ib", 0600,
1304 				    adev->ddev->primary->debugfs_root, adev,
1305 				    &fops_ib_preempt);
1306 	if (!(adev->debugfs_preempt)) {
1307 		DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1308 		return -EIO;
1309 	}
1310 
1311 	adev->smu.debugfs_sclk =
1312 		debugfs_create_file("amdgpu_force_sclk", 0200,
1313 				    adev->ddev->primary->debugfs_root, adev,
1314 				    &fops_sclk_set);
1315 	if (!(adev->smu.debugfs_sclk)) {
1316 		DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1317 		return -EIO;
1318 	}
1319 
1320 	/* Register debugfs entries for amdgpu_ttm */
1321 	r = amdgpu_ttm_debugfs_init(adev);
1322 	if (r) {
1323 		DRM_ERROR("Failed to init debugfs\n");
1324 		return r;
1325 	}
1326 
1327 	r = amdgpu_debugfs_pm_init(adev);
1328 	if (r) {
1329 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
1330 		return r;
1331 	}
1332 
1333 	if (amdgpu_debugfs_sa_init(adev)) {
1334 		dev_err(adev->dev, "failed to register debugfs file for SA\n");
1335 	}
1336 
1337 	if (amdgpu_debugfs_fence_init(adev))
1338 		dev_err(adev->dev, "fence debugfs file creation failed\n");
1339 
1340 	r = amdgpu_debugfs_gem_init(adev);
1341 	if (r)
1342 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1343 
1344 	r = amdgpu_debugfs_regs_init(adev);
1345 	if (r)
1346 		DRM_ERROR("registering register debugfs failed (%d).\n", r);
1347 
1348 	r = amdgpu_debugfs_firmware_init(adev);
1349 	if (r)
1350 		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1351 
1352 #if defined(CONFIG_DRM_AMD_DC)
1353 	if (amdgpu_device_has_dc_support(adev)) {
1354 		if (dtn_debugfs_init(adev))
1355 			DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
1356 	}
1357 #endif
1358 
1359 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1360 		struct amdgpu_ring *ring = adev->rings[i];
1361 
1362 		if (!ring)
1363 			continue;
1364 
1365 		if (amdgpu_debugfs_ring_init(adev, ring)) {
1366 			DRM_ERROR("Failed to register debugfs file for rings !\n");
1367 		}
1368 	}
1369 
1370 	amdgpu_ras_debugfs_create_all(adev);
1371 
1372 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
1373 					ARRAY_SIZE(amdgpu_debugfs_list));
1374 }
1375 
1376 #else
1377 int amdgpu_debugfs_init(struct amdgpu_device *adev)
1378 {
1379 	return 0;
1380 }
1381 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1382 {
1383 	return 0;
1384 }
1385 #endif
1386