1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/kthread.h>
27 #include <linux/pci.h>
28 #include <linux/uaccess.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/poll.h>
31 #include <drm/drm_debugfs.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_dm_debugfs.h"
36 #include "amdgpu_ras.h"
37 #include "amdgpu_rap.h"
38 #include "amdgpu_securedisplay.h"
39 #include "amdgpu_fw_attestation.h"
40 
41 /**
42  * amdgpu_debugfs_add_files - Add simple debugfs entries
43  *
44  * @adev:  Device to attach debugfs entries to
45  * @files:  Array of function callbacks that respond to reads
46  * @nfiles: Number of callbacks to register
47  *
48  */
49 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
50 			     const struct drm_info_list *files,
51 			     unsigned nfiles)
52 {
53 	unsigned i;
54 
55 	for (i = 0; i < adev->debugfs_count; i++) {
56 		if (adev->debugfs[i].files == files) {
57 			/* Already registered */
58 			return 0;
59 		}
60 	}
61 
62 	i = adev->debugfs_count + 1;
63 	if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
64 		DRM_ERROR("Reached maximum number of debugfs components.\n");
65 		DRM_ERROR("Report so we increase "
66 			  "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
67 		return -EINVAL;
68 	}
69 	adev->debugfs[adev->debugfs_count].files = files;
70 	adev->debugfs[adev->debugfs_count].num_files = nfiles;
71 	adev->debugfs_count = i;
72 #if defined(CONFIG_DEBUG_FS)
73 	drm_debugfs_create_files(files, nfiles,
74 				 adev_to_drm(adev)->primary->debugfs_root,
75 				 adev_to_drm(adev)->primary);
76 #endif
77 	return 0;
78 }
79 
80 int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
81 {
82 #if defined(CONFIG_DEBUG_FS)
83 	unsigned long timeout = 600 * HZ;
84 	int ret;
85 
86 	wake_up_interruptible(&adev->autodump.gpu_hang);
87 
88 	ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
89 	if (ret == 0) {
90 		pr_err("autodump: timeout, move on to gpu recovery\n");
91 		return -ETIMEDOUT;
92 	}
93 #endif
94 	return 0;
95 }
96 
97 #if defined(CONFIG_DEBUG_FS)
98 
99 static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
100 {
101 	struct amdgpu_device *adev = inode->i_private;
102 	int ret;
103 
104 	file->private_data = adev;
105 
106 	ret = down_read_killable(&adev->reset_sem);
107 	if (ret)
108 		return ret;
109 
110 	if (adev->autodump.dumping.done) {
111 		reinit_completion(&adev->autodump.dumping);
112 		ret = 0;
113 	} else {
114 		ret = -EBUSY;
115 	}
116 
117 	up_read(&adev->reset_sem);
118 
119 	return ret;
120 }
121 
122 static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
123 {
124 	struct amdgpu_device *adev = file->private_data;
125 
126 	complete_all(&adev->autodump.dumping);
127 	return 0;
128 }
129 
130 static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
131 {
132 	struct amdgpu_device *adev = file->private_data;
133 
134 	poll_wait(file, &adev->autodump.gpu_hang, poll_table);
135 
136 	if (amdgpu_in_reset(adev))
137 		return POLLIN | POLLRDNORM | POLLWRNORM;
138 
139 	return 0;
140 }
141 
142 static const struct file_operations autodump_debug_fops = {
143 	.owner = THIS_MODULE,
144 	.open = amdgpu_debugfs_autodump_open,
145 	.poll = amdgpu_debugfs_autodump_poll,
146 	.release = amdgpu_debugfs_autodump_release,
147 };
148 
149 static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
150 {
151 	init_completion(&adev->autodump.dumping);
152 	complete_all(&adev->autodump.dumping);
153 	init_waitqueue_head(&adev->autodump.gpu_hang);
154 
155 	debugfs_create_file("amdgpu_autodump", 0600,
156 		adev_to_drm(adev)->primary->debugfs_root,
157 		adev, &autodump_debug_fops);
158 }
159 
160 /**
161  * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
162  *
163  * @read: True if reading
164  * @f: open file handle
165  * @buf: User buffer to write/read to
166  * @size: Number of bytes to write/read
167  * @pos:  Offset to seek to
168  *
169  * This debugfs entry has special meaning on the offset being sought.
170  * Various bits have different meanings:
171  *
172  * Bit 62:  Indicates a GRBM bank switch is needed
173  * Bit 61:  Indicates a SRBM bank switch is needed (implies bit 62 is
174  * 	    zero)
175  * Bits 24..33: The SE or ME selector if needed
176  * Bits 34..43: The SH (or SA) or PIPE selector if needed
177  * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
178  *
179  * Bit 23:  Indicates that the PM power gating lock should be held
180  * 	    This is necessary to read registers that might be
181  * 	    unreliable during a power gating transistion.
182  *
183  * The lower bits are the BYTE offset of the register to read.  This
184  * allows reading multiple registers in a single call and having
185  * the returned size reflect that.
186  */
187 static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
188 		char __user *buf, size_t size, loff_t *pos)
189 {
190 	struct amdgpu_device *adev = file_inode(f)->i_private;
191 	ssize_t result = 0;
192 	int r;
193 	bool pm_pg_lock, use_bank, use_ring;
194 	unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
195 
196 	pm_pg_lock = use_bank = use_ring = false;
197 	instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
198 
199 	if (size & 0x3 || *pos & 0x3 ||
200 			((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
201 		return -EINVAL;
202 
203 	/* are we reading registers for which a PG lock is necessary? */
204 	pm_pg_lock = (*pos >> 23) & 1;
205 
206 	if (*pos & (1ULL << 62)) {
207 		se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
208 		sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
209 		instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
210 
211 		if (se_bank == 0x3FF)
212 			se_bank = 0xFFFFFFFF;
213 		if (sh_bank == 0x3FF)
214 			sh_bank = 0xFFFFFFFF;
215 		if (instance_bank == 0x3FF)
216 			instance_bank = 0xFFFFFFFF;
217 		use_bank = true;
218 	} else if (*pos & (1ULL << 61)) {
219 
220 		me = (*pos & GENMASK_ULL(33, 24)) >> 24;
221 		pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
222 		queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
223 		vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
224 
225 		use_ring = true;
226 	} else {
227 		use_bank = use_ring = false;
228 	}
229 
230 	*pos &= (1UL << 22) - 1;
231 
232 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
233 	if (r < 0) {
234 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
235 		return r;
236 	}
237 
238 	r = amdgpu_virt_enable_access_debugfs(adev);
239 	if (r < 0) {
240 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
241 		return r;
242 	}
243 
244 	if (use_bank) {
245 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
246 		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
247 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
248 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
249 			amdgpu_virt_disable_access_debugfs(adev);
250 			return -EINVAL;
251 		}
252 		mutex_lock(&adev->grbm_idx_mutex);
253 		amdgpu_gfx_select_se_sh(adev, se_bank,
254 					sh_bank, instance_bank);
255 	} else if (use_ring) {
256 		mutex_lock(&adev->srbm_mutex);
257 		amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
258 	}
259 
260 	if (pm_pg_lock)
261 		mutex_lock(&adev->pm.mutex);
262 
263 	while (size) {
264 		uint32_t value;
265 
266 		if (read) {
267 			value = RREG32(*pos >> 2);
268 			r = put_user(value, (uint32_t *)buf);
269 		} else {
270 			r = get_user(value, (uint32_t *)buf);
271 			if (!r)
272 				amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
273 		}
274 		if (r) {
275 			result = r;
276 			goto end;
277 		}
278 
279 		result += 4;
280 		buf += 4;
281 		*pos += 4;
282 		size -= 4;
283 	}
284 
285 end:
286 	if (use_bank) {
287 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
288 		mutex_unlock(&adev->grbm_idx_mutex);
289 	} else if (use_ring) {
290 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
291 		mutex_unlock(&adev->srbm_mutex);
292 	}
293 
294 	if (pm_pg_lock)
295 		mutex_unlock(&adev->pm.mutex);
296 
297 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
298 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
299 
300 	amdgpu_virt_disable_access_debugfs(adev);
301 	return result;
302 }
303 
304 /*
305  * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
306  */
307 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
308 					size_t size, loff_t *pos)
309 {
310 	return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
311 }
312 
313 /*
314  * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
315  */
316 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
317 					 size_t size, loff_t *pos)
318 {
319 	return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
320 }
321 
322 
323 /**
324  * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
325  *
326  * @f: open file handle
327  * @buf: User buffer to store read data in
328  * @size: Number of bytes to read
329  * @pos:  Offset to seek to
330  *
331  * The lower bits are the BYTE offset of the register to read.  This
332  * allows reading multiple registers in a single call and having
333  * the returned size reflect that.
334  */
335 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
336 					size_t size, loff_t *pos)
337 {
338 	struct amdgpu_device *adev = file_inode(f)->i_private;
339 	ssize_t result = 0;
340 	int r;
341 
342 	if (size & 0x3 || *pos & 0x3)
343 		return -EINVAL;
344 
345 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
346 	if (r < 0) {
347 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
348 		return r;
349 	}
350 
351 	r = amdgpu_virt_enable_access_debugfs(adev);
352 	if (r < 0) {
353 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
354 		return r;
355 	}
356 
357 	while (size) {
358 		uint32_t value;
359 
360 		value = RREG32_PCIE(*pos);
361 		r = put_user(value, (uint32_t *)buf);
362 		if (r) {
363 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
364 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
365 			amdgpu_virt_disable_access_debugfs(adev);
366 			return r;
367 		}
368 
369 		result += 4;
370 		buf += 4;
371 		*pos += 4;
372 		size -= 4;
373 	}
374 
375 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
376 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
377 
378 	amdgpu_virt_disable_access_debugfs(adev);
379 	return result;
380 }
381 
382 /**
383  * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
384  *
385  * @f: open file handle
386  * @buf: User buffer to write data from
387  * @size: Number of bytes to write
388  * @pos:  Offset to seek to
389  *
390  * The lower bits are the BYTE offset of the register to write.  This
391  * allows writing multiple registers in a single call and having
392  * the returned size reflect that.
393  */
394 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
395 					 size_t size, loff_t *pos)
396 {
397 	struct amdgpu_device *adev = file_inode(f)->i_private;
398 	ssize_t result = 0;
399 	int r;
400 
401 	if (size & 0x3 || *pos & 0x3)
402 		return -EINVAL;
403 
404 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
405 	if (r < 0) {
406 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
407 		return r;
408 	}
409 
410 	r = amdgpu_virt_enable_access_debugfs(adev);
411 	if (r < 0) {
412 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
413 		return r;
414 	}
415 
416 	while (size) {
417 		uint32_t value;
418 
419 		r = get_user(value, (uint32_t *)buf);
420 		if (r) {
421 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
422 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
423 			amdgpu_virt_disable_access_debugfs(adev);
424 			return r;
425 		}
426 
427 		WREG32_PCIE(*pos, value);
428 
429 		result += 4;
430 		buf += 4;
431 		*pos += 4;
432 		size -= 4;
433 	}
434 
435 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
436 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
437 
438 	amdgpu_virt_disable_access_debugfs(adev);
439 	return result;
440 }
441 
442 /**
443  * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
444  *
445  * @f: open file handle
446  * @buf: User buffer to store read data in
447  * @size: Number of bytes to read
448  * @pos:  Offset to seek to
449  *
450  * The lower bits are the BYTE offset of the register to read.  This
451  * allows reading multiple registers in a single call and having
452  * the returned size reflect that.
453  */
454 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
455 					size_t size, loff_t *pos)
456 {
457 	struct amdgpu_device *adev = file_inode(f)->i_private;
458 	ssize_t result = 0;
459 	int r;
460 
461 	if (size & 0x3 || *pos & 0x3)
462 		return -EINVAL;
463 
464 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
465 	if (r < 0) {
466 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
467 		return r;
468 	}
469 
470 	r = amdgpu_virt_enable_access_debugfs(adev);
471 	if (r < 0) {
472 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
473 		return r;
474 	}
475 
476 	while (size) {
477 		uint32_t value;
478 
479 		value = RREG32_DIDT(*pos >> 2);
480 		r = put_user(value, (uint32_t *)buf);
481 		if (r) {
482 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
483 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
484 			amdgpu_virt_disable_access_debugfs(adev);
485 			return r;
486 		}
487 
488 		result += 4;
489 		buf += 4;
490 		*pos += 4;
491 		size -= 4;
492 	}
493 
494 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
495 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
496 
497 	amdgpu_virt_disable_access_debugfs(adev);
498 	return result;
499 }
500 
501 /**
502  * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
503  *
504  * @f: open file handle
505  * @buf: User buffer to write data from
506  * @size: Number of bytes to write
507  * @pos:  Offset to seek to
508  *
509  * The lower bits are the BYTE offset of the register to write.  This
510  * allows writing multiple registers in a single call and having
511  * the returned size reflect that.
512  */
513 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
514 					 size_t size, loff_t *pos)
515 {
516 	struct amdgpu_device *adev = file_inode(f)->i_private;
517 	ssize_t result = 0;
518 	int r;
519 
520 	if (size & 0x3 || *pos & 0x3)
521 		return -EINVAL;
522 
523 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
524 	if (r < 0) {
525 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
526 		return r;
527 	}
528 
529 	r = amdgpu_virt_enable_access_debugfs(adev);
530 	if (r < 0) {
531 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
532 		return r;
533 	}
534 
535 	while (size) {
536 		uint32_t value;
537 
538 		r = get_user(value, (uint32_t *)buf);
539 		if (r) {
540 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
541 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
542 			amdgpu_virt_disable_access_debugfs(adev);
543 			return r;
544 		}
545 
546 		WREG32_DIDT(*pos >> 2, value);
547 
548 		result += 4;
549 		buf += 4;
550 		*pos += 4;
551 		size -= 4;
552 	}
553 
554 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
555 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
556 
557 	amdgpu_virt_disable_access_debugfs(adev);
558 	return result;
559 }
560 
561 /**
562  * amdgpu_debugfs_regs_smc_read - Read from a SMC register
563  *
564  * @f: open file handle
565  * @buf: User buffer to store read data in
566  * @size: Number of bytes to read
567  * @pos:  Offset to seek to
568  *
569  * The lower bits are the BYTE offset of the register to read.  This
570  * allows reading multiple registers in a single call and having
571  * the returned size reflect that.
572  */
573 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
574 					size_t size, loff_t *pos)
575 {
576 	struct amdgpu_device *adev = file_inode(f)->i_private;
577 	ssize_t result = 0;
578 	int r;
579 
580 	if (size & 0x3 || *pos & 0x3)
581 		return -EINVAL;
582 
583 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
584 	if (r < 0) {
585 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
586 		return r;
587 	}
588 
589 	r = amdgpu_virt_enable_access_debugfs(adev);
590 	if (r < 0) {
591 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
592 		return r;
593 	}
594 
595 	while (size) {
596 		uint32_t value;
597 
598 		value = RREG32_SMC(*pos);
599 		r = put_user(value, (uint32_t *)buf);
600 		if (r) {
601 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
602 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
603 			amdgpu_virt_disable_access_debugfs(adev);
604 			return r;
605 		}
606 
607 		result += 4;
608 		buf += 4;
609 		*pos += 4;
610 		size -= 4;
611 	}
612 
613 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
614 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
615 
616 	amdgpu_virt_disable_access_debugfs(adev);
617 	return result;
618 }
619 
620 /**
621  * amdgpu_debugfs_regs_smc_write - Write to a SMC register
622  *
623  * @f: open file handle
624  * @buf: User buffer to write data from
625  * @size: Number of bytes to write
626  * @pos:  Offset to seek to
627  *
628  * The lower bits are the BYTE offset of the register to write.  This
629  * allows writing multiple registers in a single call and having
630  * the returned size reflect that.
631  */
632 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
633 					 size_t size, loff_t *pos)
634 {
635 	struct amdgpu_device *adev = file_inode(f)->i_private;
636 	ssize_t result = 0;
637 	int r;
638 
639 	if (size & 0x3 || *pos & 0x3)
640 		return -EINVAL;
641 
642 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
643 	if (r < 0) {
644 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
645 		return r;
646 	}
647 
648 	r = amdgpu_virt_enable_access_debugfs(adev);
649 	if (r < 0) {
650 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
651 		return r;
652 	}
653 
654 	while (size) {
655 		uint32_t value;
656 
657 		r = get_user(value, (uint32_t *)buf);
658 		if (r) {
659 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
660 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
661 			amdgpu_virt_disable_access_debugfs(adev);
662 			return r;
663 		}
664 
665 		WREG32_SMC(*pos, value);
666 
667 		result += 4;
668 		buf += 4;
669 		*pos += 4;
670 		size -= 4;
671 	}
672 
673 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
674 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
675 
676 	amdgpu_virt_disable_access_debugfs(adev);
677 	return result;
678 }
679 
680 /**
681  * amdgpu_debugfs_gca_config_read - Read from gfx config data
682  *
683  * @f: open file handle
684  * @buf: User buffer to store read data in
685  * @size: Number of bytes to read
686  * @pos:  Offset to seek to
687  *
688  * This file is used to access configuration data in a somewhat
689  * stable fashion.  The format is a series of DWORDs with the first
690  * indicating which revision it is.  New content is appended to the
691  * end so that older software can still read the data.
692  */
693 
694 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
695 					size_t size, loff_t *pos)
696 {
697 	struct amdgpu_device *adev = file_inode(f)->i_private;
698 	ssize_t result = 0;
699 	int r;
700 	uint32_t *config, no_regs = 0;
701 
702 	if (size & 0x3 || *pos & 0x3)
703 		return -EINVAL;
704 
705 	config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
706 	if (!config)
707 		return -ENOMEM;
708 
709 	/* version, increment each time something is added */
710 	config[no_regs++] = 3;
711 	config[no_regs++] = adev->gfx.config.max_shader_engines;
712 	config[no_regs++] = adev->gfx.config.max_tile_pipes;
713 	config[no_regs++] = adev->gfx.config.max_cu_per_sh;
714 	config[no_regs++] = adev->gfx.config.max_sh_per_se;
715 	config[no_regs++] = adev->gfx.config.max_backends_per_se;
716 	config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
717 	config[no_regs++] = adev->gfx.config.max_gprs;
718 	config[no_regs++] = adev->gfx.config.max_gs_threads;
719 	config[no_regs++] = adev->gfx.config.max_hw_contexts;
720 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
721 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
722 	config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
723 	config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
724 	config[no_regs++] = adev->gfx.config.num_tile_pipes;
725 	config[no_regs++] = adev->gfx.config.backend_enable_mask;
726 	config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
727 	config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
728 	config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
729 	config[no_regs++] = adev->gfx.config.num_gpus;
730 	config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
731 	config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
732 	config[no_regs++] = adev->gfx.config.gb_addr_config;
733 	config[no_regs++] = adev->gfx.config.num_rbs;
734 
735 	/* rev==1 */
736 	config[no_regs++] = adev->rev_id;
737 	config[no_regs++] = adev->pg_flags;
738 	config[no_regs++] = adev->cg_flags;
739 
740 	/* rev==2 */
741 	config[no_regs++] = adev->family;
742 	config[no_regs++] = adev->external_rev_id;
743 
744 	/* rev==3 */
745 	config[no_regs++] = adev->pdev->device;
746 	config[no_regs++] = adev->pdev->revision;
747 	config[no_regs++] = adev->pdev->subsystem_device;
748 	config[no_regs++] = adev->pdev->subsystem_vendor;
749 
750 	while (size && (*pos < no_regs * 4)) {
751 		uint32_t value;
752 
753 		value = config[*pos >> 2];
754 		r = put_user(value, (uint32_t *)buf);
755 		if (r) {
756 			kfree(config);
757 			return r;
758 		}
759 
760 		result += 4;
761 		buf += 4;
762 		*pos += 4;
763 		size -= 4;
764 	}
765 
766 	kfree(config);
767 	return result;
768 }
769 
770 /**
771  * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
772  *
773  * @f: open file handle
774  * @buf: User buffer to store read data in
775  * @size: Number of bytes to read
776  * @pos:  Offset to seek to
777  *
778  * The offset is treated as the BYTE address of one of the sensors
779  * enumerated in amd/include/kgd_pp_interface.h under the
780  * 'amd_pp_sensors' enumeration.  For instance to read the UVD VCLK
781  * you would use the offset 3 * 4 = 12.
782  */
783 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
784 					size_t size, loff_t *pos)
785 {
786 	struct amdgpu_device *adev = file_inode(f)->i_private;
787 	int idx, x, outsize, r, valuesize;
788 	uint32_t values[16];
789 
790 	if (size & 3 || *pos & 0x3)
791 		return -EINVAL;
792 
793 	if (!adev->pm.dpm_enabled)
794 		return -EINVAL;
795 
796 	/* convert offset to sensor number */
797 	idx = *pos >> 2;
798 
799 	valuesize = sizeof(values);
800 
801 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
802 	if (r < 0) {
803 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
804 		return r;
805 	}
806 
807 	r = amdgpu_virt_enable_access_debugfs(adev);
808 	if (r < 0) {
809 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
810 		return r;
811 	}
812 
813 	r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
814 
815 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
816 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
817 
818 	if (r) {
819 		amdgpu_virt_disable_access_debugfs(adev);
820 		return r;
821 	}
822 
823 	if (size > valuesize) {
824 		amdgpu_virt_disable_access_debugfs(adev);
825 		return -EINVAL;
826 	}
827 
828 	outsize = 0;
829 	x = 0;
830 	if (!r) {
831 		while (size) {
832 			r = put_user(values[x++], (int32_t *)buf);
833 			buf += 4;
834 			size -= 4;
835 			outsize += 4;
836 		}
837 	}
838 
839 	amdgpu_virt_disable_access_debugfs(adev);
840 	return !r ? outsize : r;
841 }
842 
843 /** amdgpu_debugfs_wave_read - Read WAVE STATUS data
844  *
845  * @f: open file handle
846  * @buf: User buffer to store read data in
847  * @size: Number of bytes to read
848  * @pos:  Offset to seek to
849  *
850  * The offset being sought changes which wave that the status data
851  * will be returned for.  The bits are used as follows:
852  *
853  * Bits 0..6: 	Byte offset into data
854  * Bits 7..14:	SE selector
855  * Bits 15..22:	SH/SA selector
856  * Bits 23..30: CU/{WGP+SIMD} selector
857  * Bits 31..36: WAVE ID selector
858  * Bits 37..44: SIMD ID selector
859  *
860  * The returned data begins with one DWORD of version information
861  * Followed by WAVE STATUS registers relevant to the GFX IP version
862  * being used.  See gfx_v8_0_read_wave_data() for an example output.
863  */
864 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
865 					size_t size, loff_t *pos)
866 {
867 	struct amdgpu_device *adev = f->f_inode->i_private;
868 	int r, x;
869 	ssize_t result = 0;
870 	uint32_t offset, se, sh, cu, wave, simd, data[32];
871 
872 	if (size & 3 || *pos & 3)
873 		return -EINVAL;
874 
875 	/* decode offset */
876 	offset = (*pos & GENMASK_ULL(6, 0));
877 	se = (*pos & GENMASK_ULL(14, 7)) >> 7;
878 	sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
879 	cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
880 	wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
881 	simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
882 
883 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
884 	if (r < 0) {
885 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
886 		return r;
887 	}
888 
889 	r = amdgpu_virt_enable_access_debugfs(adev);
890 	if (r < 0) {
891 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
892 		return r;
893 	}
894 
895 	/* switch to the specific se/sh/cu */
896 	mutex_lock(&adev->grbm_idx_mutex);
897 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
898 
899 	x = 0;
900 	if (adev->gfx.funcs->read_wave_data)
901 		adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
902 
903 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
904 	mutex_unlock(&adev->grbm_idx_mutex);
905 
906 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
907 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
908 
909 	if (!x) {
910 		amdgpu_virt_disable_access_debugfs(adev);
911 		return -EINVAL;
912 	}
913 
914 	while (size && (offset < x * 4)) {
915 		uint32_t value;
916 
917 		value = data[offset >> 2];
918 		r = put_user(value, (uint32_t *)buf);
919 		if (r) {
920 			amdgpu_virt_disable_access_debugfs(adev);
921 			return r;
922 		}
923 
924 		result += 4;
925 		buf += 4;
926 		offset += 4;
927 		size -= 4;
928 	}
929 
930 	amdgpu_virt_disable_access_debugfs(adev);
931 	return result;
932 }
933 
934 /** amdgpu_debugfs_gpr_read - Read wave gprs
935  *
936  * @f: open file handle
937  * @buf: User buffer to store read data in
938  * @size: Number of bytes to read
939  * @pos:  Offset to seek to
940  *
941  * The offset being sought changes which wave that the status data
942  * will be returned for.  The bits are used as follows:
943  *
944  * Bits 0..11:	Byte offset into data
945  * Bits 12..19:	SE selector
946  * Bits 20..27:	SH/SA selector
947  * Bits 28..35: CU/{WGP+SIMD} selector
948  * Bits 36..43: WAVE ID selector
949  * Bits 37..44: SIMD ID selector
950  * Bits 52..59: Thread selector
951  * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
952  *
953  * The return data comes from the SGPR or VGPR register bank for
954  * the selected operational unit.
955  */
956 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
957 					size_t size, loff_t *pos)
958 {
959 	struct amdgpu_device *adev = f->f_inode->i_private;
960 	int r;
961 	ssize_t result = 0;
962 	uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
963 
964 	if (size > 4096 || size & 3 || *pos & 3)
965 		return -EINVAL;
966 
967 	/* decode offset */
968 	offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
969 	se = (*pos & GENMASK_ULL(19, 12)) >> 12;
970 	sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
971 	cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
972 	wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
973 	simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
974 	thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
975 	bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
976 
977 	data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
978 	if (!data)
979 		return -ENOMEM;
980 
981 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
982 	if (r < 0)
983 		goto err;
984 
985 	r = amdgpu_virt_enable_access_debugfs(adev);
986 	if (r < 0)
987 		goto err;
988 
989 	/* switch to the specific se/sh/cu */
990 	mutex_lock(&adev->grbm_idx_mutex);
991 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
992 
993 	if (bank == 0) {
994 		if (adev->gfx.funcs->read_wave_vgprs)
995 			adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
996 	} else {
997 		if (adev->gfx.funcs->read_wave_sgprs)
998 			adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
999 	}
1000 
1001 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
1002 	mutex_unlock(&adev->grbm_idx_mutex);
1003 
1004 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1005 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1006 
1007 	while (size) {
1008 		uint32_t value;
1009 
1010 		value = data[result >> 2];
1011 		r = put_user(value, (uint32_t *)buf);
1012 		if (r) {
1013 			amdgpu_virt_disable_access_debugfs(adev);
1014 			goto err;
1015 		}
1016 
1017 		result += 4;
1018 		buf += 4;
1019 		size -= 4;
1020 	}
1021 
1022 	kfree(data);
1023 	amdgpu_virt_disable_access_debugfs(adev);
1024 	return result;
1025 
1026 err:
1027 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1028 	kfree(data);
1029 	return r;
1030 }
1031 
1032 /**
1033  * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
1034  *
1035  * @f: open file handle
1036  * @buf: User buffer to write data from
1037  * @size: Number of bytes to write
1038  * @pos:  Offset to seek to
1039  *
1040  * Write a 32-bit zero to disable or a 32-bit non-zero to enable
1041  */
1042 static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
1043 					 size_t size, loff_t *pos)
1044 {
1045 	struct amdgpu_device *adev = file_inode(f)->i_private;
1046 	ssize_t result = 0;
1047 	int r;
1048 
1049 	if (size & 0x3 || *pos & 0x3)
1050 		return -EINVAL;
1051 
1052 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1053 	if (r < 0) {
1054 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1055 		return r;
1056 	}
1057 
1058 	while (size) {
1059 		uint32_t value;
1060 
1061 		r = get_user(value, (uint32_t *)buf);
1062 		if (r) {
1063 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1064 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1065 			return r;
1066 		}
1067 
1068 		amdgpu_gfx_off_ctrl(adev, value ? true : false);
1069 
1070 		result += 4;
1071 		buf += 4;
1072 		*pos += 4;
1073 		size -= 4;
1074 	}
1075 
1076 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1077 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1078 
1079 	return result;
1080 }
1081 
1082 
1083 /**
1084  * amdgpu_debugfs_regs_gfxoff_status - read gfxoff status
1085  *
1086  * @f: open file handle
1087  * @buf: User buffer to store read data in
1088  * @size: Number of bytes to read
1089  * @pos:  Offset to seek to
1090  */
1091 static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
1092 					 size_t size, loff_t *pos)
1093 {
1094 	struct amdgpu_device *adev = file_inode(f)->i_private;
1095 	ssize_t result = 0;
1096 	int r;
1097 
1098 	if (size & 0x3 || *pos & 0x3)
1099 		return -EINVAL;
1100 
1101 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1102 	if (r < 0)
1103 		return r;
1104 
1105 	while (size) {
1106 		uint32_t value;
1107 
1108 		r = amdgpu_get_gfx_off_status(adev, &value);
1109 		if (r) {
1110 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1111 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1112 			return r;
1113 		}
1114 
1115 		r = put_user(value, (uint32_t *)buf);
1116 		if (r) {
1117 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1118 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1119 			return r;
1120 		}
1121 
1122 		result += 4;
1123 		buf += 4;
1124 		*pos += 4;
1125 		size -= 4;
1126 	}
1127 
1128 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1129 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1130 
1131 	return result;
1132 }
1133 
1134 static const struct file_operations amdgpu_debugfs_regs_fops = {
1135 	.owner = THIS_MODULE,
1136 	.read = amdgpu_debugfs_regs_read,
1137 	.write = amdgpu_debugfs_regs_write,
1138 	.llseek = default_llseek
1139 };
1140 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1141 	.owner = THIS_MODULE,
1142 	.read = amdgpu_debugfs_regs_didt_read,
1143 	.write = amdgpu_debugfs_regs_didt_write,
1144 	.llseek = default_llseek
1145 };
1146 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1147 	.owner = THIS_MODULE,
1148 	.read = amdgpu_debugfs_regs_pcie_read,
1149 	.write = amdgpu_debugfs_regs_pcie_write,
1150 	.llseek = default_llseek
1151 };
1152 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1153 	.owner = THIS_MODULE,
1154 	.read = amdgpu_debugfs_regs_smc_read,
1155 	.write = amdgpu_debugfs_regs_smc_write,
1156 	.llseek = default_llseek
1157 };
1158 
1159 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1160 	.owner = THIS_MODULE,
1161 	.read = amdgpu_debugfs_gca_config_read,
1162 	.llseek = default_llseek
1163 };
1164 
1165 static const struct file_operations amdgpu_debugfs_sensors_fops = {
1166 	.owner = THIS_MODULE,
1167 	.read = amdgpu_debugfs_sensor_read,
1168 	.llseek = default_llseek
1169 };
1170 
1171 static const struct file_operations amdgpu_debugfs_wave_fops = {
1172 	.owner = THIS_MODULE,
1173 	.read = amdgpu_debugfs_wave_read,
1174 	.llseek = default_llseek
1175 };
1176 static const struct file_operations amdgpu_debugfs_gpr_fops = {
1177 	.owner = THIS_MODULE,
1178 	.read = amdgpu_debugfs_gpr_read,
1179 	.llseek = default_llseek
1180 };
1181 
1182 static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1183 	.owner = THIS_MODULE,
1184 	.read = amdgpu_debugfs_gfxoff_read,
1185 	.write = amdgpu_debugfs_gfxoff_write,
1186 	.llseek = default_llseek
1187 };
1188 
1189 static const struct file_operations *debugfs_regs[] = {
1190 	&amdgpu_debugfs_regs_fops,
1191 	&amdgpu_debugfs_regs_didt_fops,
1192 	&amdgpu_debugfs_regs_pcie_fops,
1193 	&amdgpu_debugfs_regs_smc_fops,
1194 	&amdgpu_debugfs_gca_config_fops,
1195 	&amdgpu_debugfs_sensors_fops,
1196 	&amdgpu_debugfs_wave_fops,
1197 	&amdgpu_debugfs_gpr_fops,
1198 	&amdgpu_debugfs_gfxoff_fops,
1199 };
1200 
1201 static const char *debugfs_regs_names[] = {
1202 	"amdgpu_regs",
1203 	"amdgpu_regs_didt",
1204 	"amdgpu_regs_pcie",
1205 	"amdgpu_regs_smc",
1206 	"amdgpu_gca_config",
1207 	"amdgpu_sensors",
1208 	"amdgpu_wave",
1209 	"amdgpu_gpr",
1210 	"amdgpu_gfxoff",
1211 };
1212 
1213 /**
1214  * amdgpu_debugfs_regs_init -	Initialize debugfs entries that provide
1215  * 				register access.
1216  *
1217  * @adev: The device to attach the debugfs entries to
1218  */
1219 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1220 {
1221 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1222 	struct dentry *ent, *root = minor->debugfs_root;
1223 	unsigned int i;
1224 
1225 	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1226 		ent = debugfs_create_file(debugfs_regs_names[i],
1227 					  S_IFREG | S_IRUGO, root,
1228 					  adev, debugfs_regs[i]);
1229 		if (!i && !IS_ERR_OR_NULL(ent))
1230 			i_size_write(ent->d_inode, adev->rmmio_size);
1231 		adev->debugfs_regs[i] = ent;
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
1238 {
1239 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1240 	struct drm_device *dev = node->minor->dev;
1241 	struct amdgpu_device *adev = drm_to_adev(dev);
1242 	int r = 0, i;
1243 
1244 	r = pm_runtime_get_sync(dev->dev);
1245 	if (r < 0) {
1246 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1247 		return r;
1248 	}
1249 
1250 	/* Avoid accidently unparking the sched thread during GPU reset */
1251 	r = down_read_killable(&adev->reset_sem);
1252 	if (r)
1253 		return r;
1254 
1255 	/* hold on the scheduler */
1256 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1257 		struct amdgpu_ring *ring = adev->rings[i];
1258 
1259 		if (!ring || !ring->sched.thread)
1260 			continue;
1261 		kthread_park(ring->sched.thread);
1262 	}
1263 
1264 	seq_printf(m, "run ib test:\n");
1265 	r = amdgpu_ib_ring_tests(adev);
1266 	if (r)
1267 		seq_printf(m, "ib ring tests failed (%d).\n", r);
1268 	else
1269 		seq_printf(m, "ib ring tests passed.\n");
1270 
1271 	/* go on the scheduler */
1272 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1273 		struct amdgpu_ring *ring = adev->rings[i];
1274 
1275 		if (!ring || !ring->sched.thread)
1276 			continue;
1277 		kthread_unpark(ring->sched.thread);
1278 	}
1279 
1280 	up_read(&adev->reset_sem);
1281 
1282 	pm_runtime_mark_last_busy(dev->dev);
1283 	pm_runtime_put_autosuspend(dev->dev);
1284 
1285 	return 0;
1286 }
1287 
1288 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
1289 {
1290 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1291 	struct drm_device *dev = node->minor->dev;
1292 	struct amdgpu_device *adev = drm_to_adev(dev);
1293 
1294 	seq_write(m, adev->bios, adev->bios_size);
1295 	return 0;
1296 }
1297 
1298 static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
1299 {
1300 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1301 	struct drm_device *dev = node->minor->dev;
1302 	struct amdgpu_device *adev = drm_to_adev(dev);
1303 	int r;
1304 
1305 	r = pm_runtime_get_sync(dev->dev);
1306 	if (r < 0) {
1307 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1308 		return r;
1309 	}
1310 
1311 	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
1312 
1313 	pm_runtime_mark_last_busy(dev->dev);
1314 	pm_runtime_put_autosuspend(dev->dev);
1315 
1316 	return 0;
1317 }
1318 
1319 static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
1320 {
1321 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1322 	struct drm_device *dev = node->minor->dev;
1323 	struct amdgpu_device *adev = drm_to_adev(dev);
1324 	struct ttm_resource_manager *man;
1325 	int r;
1326 
1327 	r = pm_runtime_get_sync(dev->dev);
1328 	if (r < 0) {
1329 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1330 		return r;
1331 	}
1332 
1333 	man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
1334 	r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
1335 	seq_printf(m, "(%d)\n", r);
1336 
1337 	pm_runtime_mark_last_busy(dev->dev);
1338 	pm_runtime_put_autosuspend(dev->dev);
1339 
1340 	return 0;
1341 }
1342 
1343 static int amdgpu_debugfs_vm_info(struct seq_file *m, void *data)
1344 {
1345 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1346 	struct drm_device *dev = node->minor->dev;
1347 	struct drm_file *file;
1348 	int r;
1349 
1350 	r = mutex_lock_interruptible(&dev->filelist_mutex);
1351 	if (r)
1352 		return r;
1353 
1354 	list_for_each_entry(file, &dev->filelist, lhead) {
1355 		struct amdgpu_fpriv *fpriv = file->driver_priv;
1356 		struct amdgpu_vm *vm = &fpriv->vm;
1357 
1358 		seq_printf(m, "pid:%d\tProcess:%s ----------\n",
1359 				vm->task_info.pid, vm->task_info.process_name);
1360 		r = amdgpu_bo_reserve(vm->root.base.bo, true);
1361 		if (r)
1362 			break;
1363 		amdgpu_debugfs_vm_bo_info(vm, m);
1364 		amdgpu_bo_unreserve(vm->root.base.bo);
1365 	}
1366 
1367 	mutex_unlock(&dev->filelist_mutex);
1368 
1369 	return r;
1370 }
1371 
1372 static const struct drm_info_list amdgpu_debugfs_list[] = {
1373 	{"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
1374 	{"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
1375 	{"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
1376 	{"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
1377 	{"amdgpu_vm_info", &amdgpu_debugfs_vm_info},
1378 };
1379 
1380 static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1381 					  struct dma_fence **fences)
1382 {
1383 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
1384 	uint32_t sync_seq, last_seq;
1385 
1386 	last_seq = atomic_read(&ring->fence_drv.last_seq);
1387 	sync_seq = ring->fence_drv.sync_seq;
1388 
1389 	last_seq &= drv->num_fences_mask;
1390 	sync_seq &= drv->num_fences_mask;
1391 
1392 	do {
1393 		struct dma_fence *fence, **ptr;
1394 
1395 		++last_seq;
1396 		last_seq &= drv->num_fences_mask;
1397 		ptr = &drv->fences[last_seq];
1398 
1399 		fence = rcu_dereference_protected(*ptr, 1);
1400 		RCU_INIT_POINTER(*ptr, NULL);
1401 
1402 		if (!fence)
1403 			continue;
1404 
1405 		fences[last_seq] = fence;
1406 
1407 	} while (last_seq != sync_seq);
1408 }
1409 
1410 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1411 					    int length)
1412 {
1413 	int i;
1414 	struct dma_fence *fence;
1415 
1416 	for (i = 0; i < length; i++) {
1417 		fence = fences[i];
1418 		if (!fence)
1419 			continue;
1420 		dma_fence_signal(fence);
1421 		dma_fence_put(fence);
1422 	}
1423 }
1424 
1425 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1426 {
1427 	struct drm_sched_job *s_job;
1428 	struct dma_fence *fence;
1429 
1430 	spin_lock(&sched->job_list_lock);
1431 	list_for_each_entry(s_job, &sched->pending_list, list) {
1432 		fence = sched->ops->run_job(s_job);
1433 		dma_fence_put(fence);
1434 	}
1435 	spin_unlock(&sched->job_list_lock);
1436 }
1437 
1438 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1439 {
1440 	struct amdgpu_job *job;
1441 	struct drm_sched_job *s_job, *tmp;
1442 	uint32_t preempt_seq;
1443 	struct dma_fence *fence, **ptr;
1444 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
1445 	struct drm_gpu_scheduler *sched = &ring->sched;
1446 	bool preempted = true;
1447 
1448 	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1449 		return;
1450 
1451 	preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1452 	if (preempt_seq <= atomic_read(&drv->last_seq)) {
1453 		preempted = false;
1454 		goto no_preempt;
1455 	}
1456 
1457 	preempt_seq &= drv->num_fences_mask;
1458 	ptr = &drv->fences[preempt_seq];
1459 	fence = rcu_dereference_protected(*ptr, 1);
1460 
1461 no_preempt:
1462 	spin_lock(&sched->job_list_lock);
1463 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
1464 		if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1465 			/* remove job from ring_mirror_list */
1466 			list_del_init(&s_job->list);
1467 			sched->ops->free_job(s_job);
1468 			continue;
1469 		}
1470 		job = to_amdgpu_job(s_job);
1471 		if (preempted && job->fence == fence)
1472 			/* mark the job as preempted */
1473 			job->preemption_status |= AMDGPU_IB_PREEMPTED;
1474 	}
1475 	spin_unlock(&sched->job_list_lock);
1476 }
1477 
1478 static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1479 {
1480 	int r, resched, length;
1481 	struct amdgpu_ring *ring;
1482 	struct dma_fence **fences = NULL;
1483 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1484 
1485 	if (val >= AMDGPU_MAX_RINGS)
1486 		return -EINVAL;
1487 
1488 	ring = adev->rings[val];
1489 
1490 	if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
1491 		return -EINVAL;
1492 
1493 	/* the last preemption failed */
1494 	if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1495 		return -EBUSY;
1496 
1497 	length = ring->fence_drv.num_fences_mask + 1;
1498 	fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1499 	if (!fences)
1500 		return -ENOMEM;
1501 
1502 	/* Avoid accidently unparking the sched thread during GPU reset */
1503 	r = down_read_killable(&adev->reset_sem);
1504 	if (r)
1505 		goto pro_end;
1506 
1507 	/* stop the scheduler */
1508 	kthread_park(ring->sched.thread);
1509 
1510 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1511 
1512 	/* preempt the IB */
1513 	r = amdgpu_ring_preempt_ib(ring);
1514 	if (r) {
1515 		DRM_WARN("failed to preempt ring %d\n", ring->idx);
1516 		goto failure;
1517 	}
1518 
1519 	amdgpu_fence_process(ring);
1520 
1521 	if (atomic_read(&ring->fence_drv.last_seq) !=
1522 	    ring->fence_drv.sync_seq) {
1523 		DRM_INFO("ring %d was preempted\n", ring->idx);
1524 
1525 		amdgpu_ib_preempt_mark_partial_job(ring);
1526 
1527 		/* swap out the old fences */
1528 		amdgpu_ib_preempt_fences_swap(ring, fences);
1529 
1530 		amdgpu_fence_driver_force_completion(ring);
1531 
1532 		/* resubmit unfinished jobs */
1533 		amdgpu_ib_preempt_job_recovery(&ring->sched);
1534 
1535 		/* wait for jobs finished */
1536 		amdgpu_fence_wait_empty(ring);
1537 
1538 		/* signal the old fences */
1539 		amdgpu_ib_preempt_signal_fences(fences, length);
1540 	}
1541 
1542 failure:
1543 	/* restart the scheduler */
1544 	kthread_unpark(ring->sched.thread);
1545 
1546 	up_read(&adev->reset_sem);
1547 
1548 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1549 
1550 pro_end:
1551 	kfree(fences);
1552 
1553 	return r;
1554 }
1555 
1556 static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1557 {
1558 	int ret = 0;
1559 	uint32_t max_freq, min_freq;
1560 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1561 
1562 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1563 		return -EINVAL;
1564 
1565 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1566 	if (ret < 0) {
1567 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1568 		return ret;
1569 	}
1570 
1571 	if (is_support_sw_smu(adev)) {
1572 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq);
1573 		if (ret || val > max_freq || val < min_freq)
1574 			return -EINVAL;
1575 		ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
1576 	} else {
1577 		return 0;
1578 	}
1579 
1580 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1581 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1582 
1583 	if (ret)
1584 		return -EINVAL;
1585 
1586 	return 0;
1587 }
1588 
1589 DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
1590 			amdgpu_debugfs_ib_preempt, "%llu\n");
1591 
1592 DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
1593 			amdgpu_debugfs_sclk_set, "%llu\n");
1594 
1595 int amdgpu_debugfs_init(struct amdgpu_device *adev)
1596 {
1597 	int r, i;
1598 
1599 	adev->debugfs_preempt =
1600 		debugfs_create_file("amdgpu_preempt_ib", 0600,
1601 				    adev_to_drm(adev)->primary->debugfs_root, adev,
1602 				    &fops_ib_preempt);
1603 	if (!(adev->debugfs_preempt)) {
1604 		DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1605 		return -EIO;
1606 	}
1607 
1608 	adev->smu.debugfs_sclk =
1609 		debugfs_create_file("amdgpu_force_sclk", 0200,
1610 				    adev_to_drm(adev)->primary->debugfs_root, adev,
1611 				    &fops_sclk_set);
1612 	if (!(adev->smu.debugfs_sclk)) {
1613 		DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1614 		return -EIO;
1615 	}
1616 
1617 	/* Register debugfs entries for amdgpu_ttm */
1618 	r = amdgpu_ttm_debugfs_init(adev);
1619 	if (r) {
1620 		DRM_ERROR("Failed to init debugfs\n");
1621 		return r;
1622 	}
1623 
1624 	r = amdgpu_debugfs_pm_init(adev);
1625 	if (r) {
1626 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
1627 		return r;
1628 	}
1629 
1630 	if (amdgpu_debugfs_sa_init(adev)) {
1631 		dev_err(adev->dev, "failed to register debugfs file for SA\n");
1632 	}
1633 
1634 	if (amdgpu_debugfs_fence_init(adev))
1635 		dev_err(adev->dev, "fence debugfs file creation failed\n");
1636 
1637 	r = amdgpu_debugfs_gem_init(adev);
1638 	if (r)
1639 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1640 
1641 	r = amdgpu_debugfs_regs_init(adev);
1642 	if (r)
1643 		DRM_ERROR("registering register debugfs failed (%d).\n", r);
1644 
1645 	r = amdgpu_debugfs_firmware_init(adev);
1646 	if (r)
1647 		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1648 
1649 #if defined(CONFIG_DRM_AMD_DC)
1650 	if (amdgpu_device_has_dc_support(adev)) {
1651 		if (dtn_debugfs_init(adev))
1652 			DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
1653 	}
1654 #endif
1655 
1656 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1657 		struct amdgpu_ring *ring = adev->rings[i];
1658 
1659 		if (!ring)
1660 			continue;
1661 
1662 		if (amdgpu_debugfs_ring_init(adev, ring)) {
1663 			DRM_ERROR("Failed to register debugfs file for rings !\n");
1664 		}
1665 	}
1666 
1667 	amdgpu_ras_debugfs_create_all(adev);
1668 
1669 	amdgpu_debugfs_autodump_init(adev);
1670 
1671 	amdgpu_rap_debugfs_init(adev);
1672 
1673 	amdgpu_securedisplay_debugfs_init(adev);
1674 
1675 	amdgpu_fw_attestation_debugfs_init(adev);
1676 
1677 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
1678 					ARRAY_SIZE(amdgpu_debugfs_list));
1679 }
1680 
1681 #else
1682 int amdgpu_debugfs_init(struct amdgpu_device *adev)
1683 {
1684 	return 0;
1685 }
1686 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1687 {
1688 	return 0;
1689 }
1690 #endif
1691