xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c (revision e533cda12d8f0e7936354bafdc85c81741f805d2)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/kthread.h>
27 #include <linux/pci.h>
28 #include <linux/uaccess.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/poll.h>
31 #include <drm/drm_debugfs.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_dm_debugfs.h"
36 #include "amdgpu_ras.h"
37 #include "amdgpu_rap.h"
38 
39 /**
40  * amdgpu_debugfs_add_files - Add simple debugfs entries
41  *
42  * @adev:  Device to attach debugfs entries to
43  * @files:  Array of function callbacks that respond to reads
44  * @nfiles: Number of callbacks to register
45  *
46  */
47 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
48 			     const struct drm_info_list *files,
49 			     unsigned nfiles)
50 {
51 	unsigned i;
52 
53 	for (i = 0; i < adev->debugfs_count; i++) {
54 		if (adev->debugfs[i].files == files) {
55 			/* Already registered */
56 			return 0;
57 		}
58 	}
59 
60 	i = adev->debugfs_count + 1;
61 	if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
62 		DRM_ERROR("Reached maximum number of debugfs components.\n");
63 		DRM_ERROR("Report so we increase "
64 			  "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
65 		return -EINVAL;
66 	}
67 	adev->debugfs[adev->debugfs_count].files = files;
68 	adev->debugfs[adev->debugfs_count].num_files = nfiles;
69 	adev->debugfs_count = i;
70 #if defined(CONFIG_DEBUG_FS)
71 	drm_debugfs_create_files(files, nfiles,
72 				 adev_to_drm(adev)->primary->debugfs_root,
73 				 adev_to_drm(adev)->primary);
74 #endif
75 	return 0;
76 }
77 
78 int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
79 {
80 #if defined(CONFIG_DEBUG_FS)
81 	unsigned long timeout = 600 * HZ;
82 	int ret;
83 
84 	wake_up_interruptible(&adev->autodump.gpu_hang);
85 
86 	ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
87 	if (ret == 0) {
88 		pr_err("autodump: timeout, move on to gpu recovery\n");
89 		return -ETIMEDOUT;
90 	}
91 #endif
92 	return 0;
93 }
94 
95 #if defined(CONFIG_DEBUG_FS)
96 
97 static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
98 {
99 	struct amdgpu_device *adev = inode->i_private;
100 	int ret;
101 
102 	file->private_data = adev;
103 
104 	ret = down_read_killable(&adev->reset_sem);
105 	if (ret)
106 		return ret;
107 
108 	if (adev->autodump.dumping.done) {
109 		reinit_completion(&adev->autodump.dumping);
110 		ret = 0;
111 	} else {
112 		ret = -EBUSY;
113 	}
114 
115 	up_read(&adev->reset_sem);
116 
117 	return ret;
118 }
119 
120 static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
121 {
122 	struct amdgpu_device *adev = file->private_data;
123 
124 	complete_all(&adev->autodump.dumping);
125 	return 0;
126 }
127 
128 static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
129 {
130 	struct amdgpu_device *adev = file->private_data;
131 
132 	poll_wait(file, &adev->autodump.gpu_hang, poll_table);
133 
134 	if (amdgpu_in_reset(adev))
135 		return POLLIN | POLLRDNORM | POLLWRNORM;
136 
137 	return 0;
138 }
139 
140 static const struct file_operations autodump_debug_fops = {
141 	.owner = THIS_MODULE,
142 	.open = amdgpu_debugfs_autodump_open,
143 	.poll = amdgpu_debugfs_autodump_poll,
144 	.release = amdgpu_debugfs_autodump_release,
145 };
146 
147 static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
148 {
149 	init_completion(&adev->autodump.dumping);
150 	complete_all(&adev->autodump.dumping);
151 	init_waitqueue_head(&adev->autodump.gpu_hang);
152 
153 	debugfs_create_file("amdgpu_autodump", 0600,
154 		adev_to_drm(adev)->primary->debugfs_root,
155 		adev, &autodump_debug_fops);
156 }
157 
158 /**
159  * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
160  *
161  * @read: True if reading
162  * @f: open file handle
163  * @buf: User buffer to write/read to
164  * @size: Number of bytes to write/read
165  * @pos:  Offset to seek to
166  *
167  * This debugfs entry has special meaning on the offset being sought.
168  * Various bits have different meanings:
169  *
170  * Bit 62:  Indicates a GRBM bank switch is needed
171  * Bit 61:  Indicates a SRBM bank switch is needed (implies bit 62 is
172  * 			zero)
173  * Bits 24..33: The SE or ME selector if needed
174  * Bits 34..43: The SH (or SA) or PIPE selector if needed
175  * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
176  *
177  * Bit 23:  Indicates that the PM power gating lock should be held
178  * 			This is necessary to read registers that might be
179  * 			unreliable during a power gating transistion.
180  *
181  * The lower bits are the BYTE offset of the register to read.  This
182  * allows reading multiple registers in a single call and having
183  * the returned size reflect that.
184  */
185 static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
186 		char __user *buf, size_t size, loff_t *pos)
187 {
188 	struct amdgpu_device *adev = file_inode(f)->i_private;
189 	ssize_t result = 0;
190 	int r;
191 	bool pm_pg_lock, use_bank, use_ring;
192 	unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
193 
194 	pm_pg_lock = use_bank = use_ring = false;
195 	instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
196 
197 	if (size & 0x3 || *pos & 0x3 ||
198 			((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
199 		return -EINVAL;
200 
201 	/* are we reading registers for which a PG lock is necessary? */
202 	pm_pg_lock = (*pos >> 23) & 1;
203 
204 	if (*pos & (1ULL << 62)) {
205 		se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
206 		sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
207 		instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
208 
209 		if (se_bank == 0x3FF)
210 			se_bank = 0xFFFFFFFF;
211 		if (sh_bank == 0x3FF)
212 			sh_bank = 0xFFFFFFFF;
213 		if (instance_bank == 0x3FF)
214 			instance_bank = 0xFFFFFFFF;
215 		use_bank = true;
216 	} else if (*pos & (1ULL << 61)) {
217 
218 		me = (*pos & GENMASK_ULL(33, 24)) >> 24;
219 		pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
220 		queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
221 		vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
222 
223 		use_ring = true;
224 	} else {
225 		use_bank = use_ring = false;
226 	}
227 
228 	*pos &= (1UL << 22) - 1;
229 
230 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
231 	if (r < 0) {
232 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
233 		return r;
234 	}
235 
236 	r = amdgpu_virt_enable_access_debugfs(adev);
237 	if (r < 0) {
238 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
239 		return r;
240 	}
241 
242 	if (use_bank) {
243 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
244 		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
245 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
246 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
247 			amdgpu_virt_disable_access_debugfs(adev);
248 			return -EINVAL;
249 		}
250 		mutex_lock(&adev->grbm_idx_mutex);
251 		amdgpu_gfx_select_se_sh(adev, se_bank,
252 					sh_bank, instance_bank);
253 	} else if (use_ring) {
254 		mutex_lock(&adev->srbm_mutex);
255 		amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
256 	}
257 
258 	if (pm_pg_lock)
259 		mutex_lock(&adev->pm.mutex);
260 
261 	while (size) {
262 		uint32_t value;
263 
264 		if (read) {
265 			value = RREG32(*pos >> 2);
266 			r = put_user(value, (uint32_t *)buf);
267 		} else {
268 			r = get_user(value, (uint32_t *)buf);
269 			if (!r)
270 				amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
271 		}
272 		if (r) {
273 			result = r;
274 			goto end;
275 		}
276 
277 		result += 4;
278 		buf += 4;
279 		*pos += 4;
280 		size -= 4;
281 	}
282 
283 end:
284 	if (use_bank) {
285 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
286 		mutex_unlock(&adev->grbm_idx_mutex);
287 	} else if (use_ring) {
288 		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
289 		mutex_unlock(&adev->srbm_mutex);
290 	}
291 
292 	if (pm_pg_lock)
293 		mutex_unlock(&adev->pm.mutex);
294 
295 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
296 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
297 
298 	amdgpu_virt_disable_access_debugfs(adev);
299 	return result;
300 }
301 
302 /**
303  * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
304  */
305 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
306 					size_t size, loff_t *pos)
307 {
308 	return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
309 }
310 
311 /**
312  * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
313  */
314 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
315 					 size_t size, loff_t *pos)
316 {
317 	return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
318 }
319 
320 
321 /**
322  * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
323  *
324  * @f: open file handle
325  * @buf: User buffer to store read data in
326  * @size: Number of bytes to read
327  * @pos:  Offset to seek to
328  *
329  * The lower bits are the BYTE offset of the register to read.  This
330  * allows reading multiple registers in a single call and having
331  * the returned size reflect that.
332  */
333 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
334 					size_t size, loff_t *pos)
335 {
336 	struct amdgpu_device *adev = file_inode(f)->i_private;
337 	ssize_t result = 0;
338 	int r;
339 
340 	if (size & 0x3 || *pos & 0x3)
341 		return -EINVAL;
342 
343 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
344 	if (r < 0) {
345 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
346 		return r;
347 	}
348 
349 	r = amdgpu_virt_enable_access_debugfs(adev);
350 	if (r < 0) {
351 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
352 		return r;
353 	}
354 
355 	while (size) {
356 		uint32_t value;
357 
358 		value = RREG32_PCIE(*pos >> 2);
359 		r = put_user(value, (uint32_t *)buf);
360 		if (r) {
361 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
362 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
363 			amdgpu_virt_disable_access_debugfs(adev);
364 			return r;
365 		}
366 
367 		result += 4;
368 		buf += 4;
369 		*pos += 4;
370 		size -= 4;
371 	}
372 
373 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
374 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
375 
376 	amdgpu_virt_disable_access_debugfs(adev);
377 	return result;
378 }
379 
380 /**
381  * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
382  *
383  * @f: open file handle
384  * @buf: User buffer to write data from
385  * @size: Number of bytes to write
386  * @pos:  Offset to seek to
387  *
388  * The lower bits are the BYTE offset of the register to write.  This
389  * allows writing multiple registers in a single call and having
390  * the returned size reflect that.
391  */
392 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
393 					 size_t size, loff_t *pos)
394 {
395 	struct amdgpu_device *adev = file_inode(f)->i_private;
396 	ssize_t result = 0;
397 	int r;
398 
399 	if (size & 0x3 || *pos & 0x3)
400 		return -EINVAL;
401 
402 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
403 	if (r < 0) {
404 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
405 		return r;
406 	}
407 
408 	r = amdgpu_virt_enable_access_debugfs(adev);
409 	if (r < 0) {
410 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
411 		return r;
412 	}
413 
414 	while (size) {
415 		uint32_t value;
416 
417 		r = get_user(value, (uint32_t *)buf);
418 		if (r) {
419 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
420 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
421 			amdgpu_virt_disable_access_debugfs(adev);
422 			return r;
423 		}
424 
425 		WREG32_PCIE(*pos >> 2, value);
426 
427 		result += 4;
428 		buf += 4;
429 		*pos += 4;
430 		size -= 4;
431 	}
432 
433 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
434 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
435 
436 	amdgpu_virt_disable_access_debugfs(adev);
437 	return result;
438 }
439 
440 /**
441  * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
442  *
443  * @f: open file handle
444  * @buf: User buffer to store read data in
445  * @size: Number of bytes to read
446  * @pos:  Offset to seek to
447  *
448  * The lower bits are the BYTE offset of the register to read.  This
449  * allows reading multiple registers in a single call and having
450  * the returned size reflect that.
451  */
452 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
453 					size_t size, loff_t *pos)
454 {
455 	struct amdgpu_device *adev = file_inode(f)->i_private;
456 	ssize_t result = 0;
457 	int r;
458 
459 	if (size & 0x3 || *pos & 0x3)
460 		return -EINVAL;
461 
462 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
463 	if (r < 0) {
464 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
465 		return r;
466 	}
467 
468 	r = amdgpu_virt_enable_access_debugfs(adev);
469 	if (r < 0) {
470 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
471 		return r;
472 	}
473 
474 	while (size) {
475 		uint32_t value;
476 
477 		value = RREG32_DIDT(*pos >> 2);
478 		r = put_user(value, (uint32_t *)buf);
479 		if (r) {
480 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
481 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
482 			amdgpu_virt_disable_access_debugfs(adev);
483 			return r;
484 		}
485 
486 		result += 4;
487 		buf += 4;
488 		*pos += 4;
489 		size -= 4;
490 	}
491 
492 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
493 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
494 
495 	amdgpu_virt_disable_access_debugfs(adev);
496 	return result;
497 }
498 
499 /**
500  * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
501  *
502  * @f: open file handle
503  * @buf: User buffer to write data from
504  * @size: Number of bytes to write
505  * @pos:  Offset to seek to
506  *
507  * The lower bits are the BYTE offset of the register to write.  This
508  * allows writing multiple registers in a single call and having
509  * the returned size reflect that.
510  */
511 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
512 					 size_t size, loff_t *pos)
513 {
514 	struct amdgpu_device *adev = file_inode(f)->i_private;
515 	ssize_t result = 0;
516 	int r;
517 
518 	if (size & 0x3 || *pos & 0x3)
519 		return -EINVAL;
520 
521 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
522 	if (r < 0) {
523 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
524 		return r;
525 	}
526 
527 	r = amdgpu_virt_enable_access_debugfs(adev);
528 	if (r < 0) {
529 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
530 		return r;
531 	}
532 
533 	while (size) {
534 		uint32_t value;
535 
536 		r = get_user(value, (uint32_t *)buf);
537 		if (r) {
538 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
539 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
540 			amdgpu_virt_disable_access_debugfs(adev);
541 			return r;
542 		}
543 
544 		WREG32_DIDT(*pos >> 2, value);
545 
546 		result += 4;
547 		buf += 4;
548 		*pos += 4;
549 		size -= 4;
550 	}
551 
552 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
553 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
554 
555 	amdgpu_virt_disable_access_debugfs(adev);
556 	return result;
557 }
558 
559 /**
560  * amdgpu_debugfs_regs_smc_read - Read from a SMC register
561  *
562  * @f: open file handle
563  * @buf: User buffer to store read data in
564  * @size: Number of bytes to read
565  * @pos:  Offset to seek to
566  *
567  * The lower bits are the BYTE offset of the register to read.  This
568  * allows reading multiple registers in a single call and having
569  * the returned size reflect that.
570  */
571 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
572 					size_t size, loff_t *pos)
573 {
574 	struct amdgpu_device *adev = file_inode(f)->i_private;
575 	ssize_t result = 0;
576 	int r;
577 
578 	if (size & 0x3 || *pos & 0x3)
579 		return -EINVAL;
580 
581 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
582 	if (r < 0) {
583 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
584 		return r;
585 	}
586 
587 	r = amdgpu_virt_enable_access_debugfs(adev);
588 	if (r < 0) {
589 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
590 		return r;
591 	}
592 
593 	while (size) {
594 		uint32_t value;
595 
596 		value = RREG32_SMC(*pos);
597 		r = put_user(value, (uint32_t *)buf);
598 		if (r) {
599 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
600 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
601 			amdgpu_virt_disable_access_debugfs(adev);
602 			return r;
603 		}
604 
605 		result += 4;
606 		buf += 4;
607 		*pos += 4;
608 		size -= 4;
609 	}
610 
611 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
612 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
613 
614 	amdgpu_virt_disable_access_debugfs(adev);
615 	return result;
616 }
617 
618 /**
619  * amdgpu_debugfs_regs_smc_write - Write to a SMC register
620  *
621  * @f: open file handle
622  * @buf: User buffer to write data from
623  * @size: Number of bytes to write
624  * @pos:  Offset to seek to
625  *
626  * The lower bits are the BYTE offset of the register to write.  This
627  * allows writing multiple registers in a single call and having
628  * the returned size reflect that.
629  */
630 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
631 					 size_t size, loff_t *pos)
632 {
633 	struct amdgpu_device *adev = file_inode(f)->i_private;
634 	ssize_t result = 0;
635 	int r;
636 
637 	if (size & 0x3 || *pos & 0x3)
638 		return -EINVAL;
639 
640 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
641 	if (r < 0) {
642 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
643 		return r;
644 	}
645 
646 	r = amdgpu_virt_enable_access_debugfs(adev);
647 	if (r < 0) {
648 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
649 		return r;
650 	}
651 
652 	while (size) {
653 		uint32_t value;
654 
655 		r = get_user(value, (uint32_t *)buf);
656 		if (r) {
657 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
658 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
659 			amdgpu_virt_disable_access_debugfs(adev);
660 			return r;
661 		}
662 
663 		WREG32_SMC(*pos, value);
664 
665 		result += 4;
666 		buf += 4;
667 		*pos += 4;
668 		size -= 4;
669 	}
670 
671 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
672 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
673 
674 	amdgpu_virt_disable_access_debugfs(adev);
675 	return result;
676 }
677 
678 /**
679  * amdgpu_debugfs_gca_config_read - Read from gfx config data
680  *
681  * @f: open file handle
682  * @buf: User buffer to store read data in
683  * @size: Number of bytes to read
684  * @pos:  Offset to seek to
685  *
686  * This file is used to access configuration data in a somewhat
687  * stable fashion.  The format is a series of DWORDs with the first
688  * indicating which revision it is.  New content is appended to the
689  * end so that older software can still read the data.
690  */
691 
692 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
693 					size_t size, loff_t *pos)
694 {
695 	struct amdgpu_device *adev = file_inode(f)->i_private;
696 	ssize_t result = 0;
697 	int r;
698 	uint32_t *config, no_regs = 0;
699 
700 	if (size & 0x3 || *pos & 0x3)
701 		return -EINVAL;
702 
703 	config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
704 	if (!config)
705 		return -ENOMEM;
706 
707 	/* version, increment each time something is added */
708 	config[no_regs++] = 3;
709 	config[no_regs++] = adev->gfx.config.max_shader_engines;
710 	config[no_regs++] = adev->gfx.config.max_tile_pipes;
711 	config[no_regs++] = adev->gfx.config.max_cu_per_sh;
712 	config[no_regs++] = adev->gfx.config.max_sh_per_se;
713 	config[no_regs++] = adev->gfx.config.max_backends_per_se;
714 	config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
715 	config[no_regs++] = adev->gfx.config.max_gprs;
716 	config[no_regs++] = adev->gfx.config.max_gs_threads;
717 	config[no_regs++] = adev->gfx.config.max_hw_contexts;
718 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
719 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
720 	config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
721 	config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
722 	config[no_regs++] = adev->gfx.config.num_tile_pipes;
723 	config[no_regs++] = adev->gfx.config.backend_enable_mask;
724 	config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
725 	config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
726 	config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
727 	config[no_regs++] = adev->gfx.config.num_gpus;
728 	config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
729 	config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
730 	config[no_regs++] = adev->gfx.config.gb_addr_config;
731 	config[no_regs++] = adev->gfx.config.num_rbs;
732 
733 	/* rev==1 */
734 	config[no_regs++] = adev->rev_id;
735 	config[no_regs++] = adev->pg_flags;
736 	config[no_regs++] = adev->cg_flags;
737 
738 	/* rev==2 */
739 	config[no_regs++] = adev->family;
740 	config[no_regs++] = adev->external_rev_id;
741 
742 	/* rev==3 */
743 	config[no_regs++] = adev->pdev->device;
744 	config[no_regs++] = adev->pdev->revision;
745 	config[no_regs++] = adev->pdev->subsystem_device;
746 	config[no_regs++] = adev->pdev->subsystem_vendor;
747 
748 	while (size && (*pos < no_regs * 4)) {
749 		uint32_t value;
750 
751 		value = config[*pos >> 2];
752 		r = put_user(value, (uint32_t *)buf);
753 		if (r) {
754 			kfree(config);
755 			return r;
756 		}
757 
758 		result += 4;
759 		buf += 4;
760 		*pos += 4;
761 		size -= 4;
762 	}
763 
764 	kfree(config);
765 	return result;
766 }
767 
768 /**
769  * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
770  *
771  * @f: open file handle
772  * @buf: User buffer to store read data in
773  * @size: Number of bytes to read
774  * @pos:  Offset to seek to
775  *
776  * The offset is treated as the BYTE address of one of the sensors
777  * enumerated in amd/include/kgd_pp_interface.h under the
778  * 'amd_pp_sensors' enumeration.  For instance to read the UVD VCLK
779  * you would use the offset 3 * 4 = 12.
780  */
781 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
782 					size_t size, loff_t *pos)
783 {
784 	struct amdgpu_device *adev = file_inode(f)->i_private;
785 	int idx, x, outsize, r, valuesize;
786 	uint32_t values[16];
787 
788 	if (size & 3 || *pos & 0x3)
789 		return -EINVAL;
790 
791 	if (!adev->pm.dpm_enabled)
792 		return -EINVAL;
793 
794 	/* convert offset to sensor number */
795 	idx = *pos >> 2;
796 
797 	valuesize = sizeof(values);
798 
799 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
800 	if (r < 0) {
801 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
802 		return r;
803 	}
804 
805 	r = amdgpu_virt_enable_access_debugfs(adev);
806 	if (r < 0) {
807 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
808 		return r;
809 	}
810 
811 	r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
812 
813 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
814 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
815 
816 	if (r) {
817 		amdgpu_virt_disable_access_debugfs(adev);
818 		return r;
819 	}
820 
821 	if (size > valuesize) {
822 		amdgpu_virt_disable_access_debugfs(adev);
823 		return -EINVAL;
824 	}
825 
826 	outsize = 0;
827 	x = 0;
828 	if (!r) {
829 		while (size) {
830 			r = put_user(values[x++], (int32_t *)buf);
831 			buf += 4;
832 			size -= 4;
833 			outsize += 4;
834 		}
835 	}
836 
837 	amdgpu_virt_disable_access_debugfs(adev);
838 	return !r ? outsize : r;
839 }
840 
841 /** amdgpu_debugfs_wave_read - Read WAVE STATUS data
842  *
843  * @f: open file handle
844  * @buf: User buffer to store read data in
845  * @size: Number of bytes to read
846  * @pos:  Offset to seek to
847  *
848  * The offset being sought changes which wave that the status data
849  * will be returned for.  The bits are used as follows:
850  *
851  * Bits 0..6: 	Byte offset into data
852  * Bits 7..14:	SE selector
853  * Bits 15..22:	SH/SA selector
854  * Bits 23..30: CU/{WGP+SIMD} selector
855  * Bits 31..36: WAVE ID selector
856  * Bits 37..44: SIMD ID selector
857  *
858  * The returned data begins with one DWORD of version information
859  * Followed by WAVE STATUS registers relevant to the GFX IP version
860  * being used.  See gfx_v8_0_read_wave_data() for an example output.
861  */
862 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
863 					size_t size, loff_t *pos)
864 {
865 	struct amdgpu_device *adev = f->f_inode->i_private;
866 	int r, x;
867 	ssize_t result=0;
868 	uint32_t offset, se, sh, cu, wave, simd, data[32];
869 
870 	if (size & 3 || *pos & 3)
871 		return -EINVAL;
872 
873 	/* decode offset */
874 	offset = (*pos & GENMASK_ULL(6, 0));
875 	se = (*pos & GENMASK_ULL(14, 7)) >> 7;
876 	sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
877 	cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
878 	wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
879 	simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
880 
881 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
882 	if (r < 0) {
883 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
884 		return r;
885 	}
886 
887 	r = amdgpu_virt_enable_access_debugfs(adev);
888 	if (r < 0) {
889 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
890 		return r;
891 	}
892 
893 	/* switch to the specific se/sh/cu */
894 	mutex_lock(&adev->grbm_idx_mutex);
895 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
896 
897 	x = 0;
898 	if (adev->gfx.funcs->read_wave_data)
899 		adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
900 
901 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
902 	mutex_unlock(&adev->grbm_idx_mutex);
903 
904 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
905 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
906 
907 	if (!x) {
908 		amdgpu_virt_disable_access_debugfs(adev);
909 		return -EINVAL;
910 	}
911 
912 	while (size && (offset < x * 4)) {
913 		uint32_t value;
914 
915 		value = data[offset >> 2];
916 		r = put_user(value, (uint32_t *)buf);
917 		if (r) {
918 			amdgpu_virt_disable_access_debugfs(adev);
919 			return r;
920 		}
921 
922 		result += 4;
923 		buf += 4;
924 		offset += 4;
925 		size -= 4;
926 	}
927 
928 	amdgpu_virt_disable_access_debugfs(adev);
929 	return result;
930 }
931 
932 /** amdgpu_debugfs_gpr_read - Read wave gprs
933  *
934  * @f: open file handle
935  * @buf: User buffer to store read data in
936  * @size: Number of bytes to read
937  * @pos:  Offset to seek to
938  *
939  * The offset being sought changes which wave that the status data
940  * will be returned for.  The bits are used as follows:
941  *
942  * Bits 0..11:	Byte offset into data
943  * Bits 12..19:	SE selector
944  * Bits 20..27:	SH/SA selector
945  * Bits 28..35: CU/{WGP+SIMD} selector
946  * Bits 36..43: WAVE ID selector
947  * Bits 37..44: SIMD ID selector
948  * Bits 52..59: Thread selector
949  * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
950  *
951  * The return data comes from the SGPR or VGPR register bank for
952  * the selected operational unit.
953  */
954 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
955 					size_t size, loff_t *pos)
956 {
957 	struct amdgpu_device *adev = f->f_inode->i_private;
958 	int r;
959 	ssize_t result = 0;
960 	uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
961 
962 	if (size > 4096 || size & 3 || *pos & 3)
963 		return -EINVAL;
964 
965 	/* decode offset */
966 	offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
967 	se = (*pos & GENMASK_ULL(19, 12)) >> 12;
968 	sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
969 	cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
970 	wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
971 	simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
972 	thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
973 	bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
974 
975 	data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
976 	if (!data)
977 		return -ENOMEM;
978 
979 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
980 	if (r < 0)
981 		goto err;
982 
983 	r = amdgpu_virt_enable_access_debugfs(adev);
984 	if (r < 0)
985 		goto err;
986 
987 	/* switch to the specific se/sh/cu */
988 	mutex_lock(&adev->grbm_idx_mutex);
989 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
990 
991 	if (bank == 0) {
992 		if (adev->gfx.funcs->read_wave_vgprs)
993 			adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
994 	} else {
995 		if (adev->gfx.funcs->read_wave_sgprs)
996 			adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
997 	}
998 
999 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
1000 	mutex_unlock(&adev->grbm_idx_mutex);
1001 
1002 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1003 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1004 
1005 	while (size) {
1006 		uint32_t value;
1007 
1008 		value = data[result >> 2];
1009 		r = put_user(value, (uint32_t *)buf);
1010 		if (r) {
1011 			amdgpu_virt_disable_access_debugfs(adev);
1012 			goto err;
1013 		}
1014 
1015 		result += 4;
1016 		buf += 4;
1017 		size -= 4;
1018 	}
1019 
1020 	kfree(data);
1021 	amdgpu_virt_disable_access_debugfs(adev);
1022 	return result;
1023 
1024 err:
1025 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1026 	kfree(data);
1027 	return r;
1028 }
1029 
1030 /**
1031  * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
1032  *
1033  * @f: open file handle
1034  * @buf: User buffer to write data from
1035  * @size: Number of bytes to write
1036  * @pos:  Offset to seek to
1037  *
1038  * Write a 32-bit zero to disable or a 32-bit non-zero to enable
1039  */
1040 static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
1041 					 size_t size, loff_t *pos)
1042 {
1043 	struct amdgpu_device *adev = file_inode(f)->i_private;
1044 	ssize_t result = 0;
1045 	int r;
1046 
1047 	if (size & 0x3 || *pos & 0x3)
1048 		return -EINVAL;
1049 
1050 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1051 	if (r < 0) {
1052 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1053 		return r;
1054 	}
1055 
1056 	while (size) {
1057 		uint32_t value;
1058 
1059 		r = get_user(value, (uint32_t *)buf);
1060 		if (r) {
1061 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1062 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1063 			return r;
1064 		}
1065 
1066 		amdgpu_gfx_off_ctrl(adev, value ? true : false);
1067 
1068 		result += 4;
1069 		buf += 4;
1070 		*pos += 4;
1071 		size -= 4;
1072 	}
1073 
1074 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1075 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1076 
1077 	return result;
1078 }
1079 
1080 
1081 /**
1082  * amdgpu_debugfs_regs_gfxoff_status - read gfxoff status
1083  *
1084  * @f: open file handle
1085  * @buf: User buffer to store read data in
1086  * @size: Number of bytes to read
1087  * @pos:  Offset to seek to
1088  */
1089 static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
1090 					 size_t size, loff_t *pos)
1091 {
1092 	struct amdgpu_device *adev = file_inode(f)->i_private;
1093 	ssize_t result = 0;
1094 	int r;
1095 
1096 	if (size & 0x3 || *pos & 0x3)
1097 		return -EINVAL;
1098 
1099 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1100 	if (r < 0)
1101 		return r;
1102 
1103 	while (size) {
1104 		uint32_t value;
1105 
1106 		r = amdgpu_get_gfx_off_status(adev, &value);
1107 		if (r) {
1108 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1109 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1110 			return r;
1111 		}
1112 
1113 		r = put_user(value, (uint32_t *)buf);
1114 		if (r) {
1115 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1116 			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1117 			return r;
1118 		}
1119 
1120 		result += 4;
1121 		buf += 4;
1122 		*pos += 4;
1123 		size -= 4;
1124 	}
1125 
1126 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1127 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1128 
1129 	return result;
1130 }
1131 
1132 static const struct file_operations amdgpu_debugfs_regs_fops = {
1133 	.owner = THIS_MODULE,
1134 	.read = amdgpu_debugfs_regs_read,
1135 	.write = amdgpu_debugfs_regs_write,
1136 	.llseek = default_llseek
1137 };
1138 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1139 	.owner = THIS_MODULE,
1140 	.read = amdgpu_debugfs_regs_didt_read,
1141 	.write = amdgpu_debugfs_regs_didt_write,
1142 	.llseek = default_llseek
1143 };
1144 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1145 	.owner = THIS_MODULE,
1146 	.read = amdgpu_debugfs_regs_pcie_read,
1147 	.write = amdgpu_debugfs_regs_pcie_write,
1148 	.llseek = default_llseek
1149 };
1150 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1151 	.owner = THIS_MODULE,
1152 	.read = amdgpu_debugfs_regs_smc_read,
1153 	.write = amdgpu_debugfs_regs_smc_write,
1154 	.llseek = default_llseek
1155 };
1156 
1157 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1158 	.owner = THIS_MODULE,
1159 	.read = amdgpu_debugfs_gca_config_read,
1160 	.llseek = default_llseek
1161 };
1162 
1163 static const struct file_operations amdgpu_debugfs_sensors_fops = {
1164 	.owner = THIS_MODULE,
1165 	.read = amdgpu_debugfs_sensor_read,
1166 	.llseek = default_llseek
1167 };
1168 
1169 static const struct file_operations amdgpu_debugfs_wave_fops = {
1170 	.owner = THIS_MODULE,
1171 	.read = amdgpu_debugfs_wave_read,
1172 	.llseek = default_llseek
1173 };
1174 static const struct file_operations amdgpu_debugfs_gpr_fops = {
1175 	.owner = THIS_MODULE,
1176 	.read = amdgpu_debugfs_gpr_read,
1177 	.llseek = default_llseek
1178 };
1179 
1180 static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1181 	.owner = THIS_MODULE,
1182 	.read = amdgpu_debugfs_gfxoff_read,
1183 	.write = amdgpu_debugfs_gfxoff_write,
1184 	.llseek = default_llseek
1185 };
1186 
1187 static const struct file_operations *debugfs_regs[] = {
1188 	&amdgpu_debugfs_regs_fops,
1189 	&amdgpu_debugfs_regs_didt_fops,
1190 	&amdgpu_debugfs_regs_pcie_fops,
1191 	&amdgpu_debugfs_regs_smc_fops,
1192 	&amdgpu_debugfs_gca_config_fops,
1193 	&amdgpu_debugfs_sensors_fops,
1194 	&amdgpu_debugfs_wave_fops,
1195 	&amdgpu_debugfs_gpr_fops,
1196 	&amdgpu_debugfs_gfxoff_fops,
1197 };
1198 
1199 static const char *debugfs_regs_names[] = {
1200 	"amdgpu_regs",
1201 	"amdgpu_regs_didt",
1202 	"amdgpu_regs_pcie",
1203 	"amdgpu_regs_smc",
1204 	"amdgpu_gca_config",
1205 	"amdgpu_sensors",
1206 	"amdgpu_wave",
1207 	"amdgpu_gpr",
1208 	"amdgpu_gfxoff",
1209 };
1210 
1211 /**
1212  * amdgpu_debugfs_regs_init -	Initialize debugfs entries that provide
1213  * 								register access.
1214  *
1215  * @adev: The device to attach the debugfs entries to
1216  */
1217 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1218 {
1219 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1220 	struct dentry *ent, *root = minor->debugfs_root;
1221 	unsigned int i;
1222 
1223 	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1224 		ent = debugfs_create_file(debugfs_regs_names[i],
1225 					  S_IFREG | S_IRUGO, root,
1226 					  adev, debugfs_regs[i]);
1227 		if (!i && !IS_ERR_OR_NULL(ent))
1228 			i_size_write(ent->d_inode, adev->rmmio_size);
1229 		adev->debugfs_regs[i] = ent;
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
1236 {
1237 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1238 	struct drm_device *dev = node->minor->dev;
1239 	struct amdgpu_device *adev = drm_to_adev(dev);
1240 	int r = 0, i;
1241 
1242 	r = pm_runtime_get_sync(dev->dev);
1243 	if (r < 0) {
1244 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1245 		return r;
1246 	}
1247 
1248 	/* Avoid accidently unparking the sched thread during GPU reset */
1249 	r = down_read_killable(&adev->reset_sem);
1250 	if (r)
1251 		return r;
1252 
1253 	/* hold on the scheduler */
1254 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1255 		struct amdgpu_ring *ring = adev->rings[i];
1256 
1257 		if (!ring || !ring->sched.thread)
1258 			continue;
1259 		kthread_park(ring->sched.thread);
1260 	}
1261 
1262 	seq_printf(m, "run ib test:\n");
1263 	r = amdgpu_ib_ring_tests(adev);
1264 	if (r)
1265 		seq_printf(m, "ib ring tests failed (%d).\n", r);
1266 	else
1267 		seq_printf(m, "ib ring tests passed.\n");
1268 
1269 	/* go on the scheduler */
1270 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1271 		struct amdgpu_ring *ring = adev->rings[i];
1272 
1273 		if (!ring || !ring->sched.thread)
1274 			continue;
1275 		kthread_unpark(ring->sched.thread);
1276 	}
1277 
1278 	up_read(&adev->reset_sem);
1279 
1280 	pm_runtime_mark_last_busy(dev->dev);
1281 	pm_runtime_put_autosuspend(dev->dev);
1282 
1283 	return 0;
1284 }
1285 
1286 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
1287 {
1288 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1289 	struct drm_device *dev = node->minor->dev;
1290 	struct amdgpu_device *adev = drm_to_adev(dev);
1291 
1292 	seq_write(m, adev->bios, adev->bios_size);
1293 	return 0;
1294 }
1295 
1296 static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
1297 {
1298 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1299 	struct drm_device *dev = node->minor->dev;
1300 	struct amdgpu_device *adev = drm_to_adev(dev);
1301 	int r;
1302 
1303 	r = pm_runtime_get_sync(dev->dev);
1304 	if (r < 0) {
1305 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1306 		return r;
1307 	}
1308 
1309 	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
1310 
1311 	pm_runtime_mark_last_busy(dev->dev);
1312 	pm_runtime_put_autosuspend(dev->dev);
1313 
1314 	return 0;
1315 }
1316 
1317 static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
1318 {
1319 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1320 	struct drm_device *dev = node->minor->dev;
1321 	struct amdgpu_device *adev = drm_to_adev(dev);
1322 	int r;
1323 
1324 	r = pm_runtime_get_sync(dev->dev);
1325 	if (r < 0) {
1326 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1327 		return r;
1328 	}
1329 
1330 	seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
1331 
1332 	pm_runtime_mark_last_busy(dev->dev);
1333 	pm_runtime_put_autosuspend(dev->dev);
1334 
1335 	return 0;
1336 }
1337 
1338 static const struct drm_info_list amdgpu_debugfs_list[] = {
1339 	{"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
1340 	{"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
1341 	{"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
1342 	{"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
1343 };
1344 
1345 static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1346 					  struct dma_fence **fences)
1347 {
1348 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
1349 	uint32_t sync_seq, last_seq;
1350 
1351 	last_seq = atomic_read(&ring->fence_drv.last_seq);
1352 	sync_seq = ring->fence_drv.sync_seq;
1353 
1354 	last_seq &= drv->num_fences_mask;
1355 	sync_seq &= drv->num_fences_mask;
1356 
1357 	do {
1358 		struct dma_fence *fence, **ptr;
1359 
1360 		++last_seq;
1361 		last_seq &= drv->num_fences_mask;
1362 		ptr = &drv->fences[last_seq];
1363 
1364 		fence = rcu_dereference_protected(*ptr, 1);
1365 		RCU_INIT_POINTER(*ptr, NULL);
1366 
1367 		if (!fence)
1368 			continue;
1369 
1370 		fences[last_seq] = fence;
1371 
1372 	} while (last_seq != sync_seq);
1373 }
1374 
1375 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1376 					    int length)
1377 {
1378 	int i;
1379 	struct dma_fence *fence;
1380 
1381 	for (i = 0; i < length; i++) {
1382 		fence = fences[i];
1383 		if (!fence)
1384 			continue;
1385 		dma_fence_signal(fence);
1386 		dma_fence_put(fence);
1387 	}
1388 }
1389 
1390 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1391 {
1392 	struct drm_sched_job *s_job;
1393 	struct dma_fence *fence;
1394 
1395 	spin_lock(&sched->job_list_lock);
1396 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
1397 		fence = sched->ops->run_job(s_job);
1398 		dma_fence_put(fence);
1399 	}
1400 	spin_unlock(&sched->job_list_lock);
1401 }
1402 
1403 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1404 {
1405 	struct amdgpu_job *job;
1406 	struct drm_sched_job *s_job, *tmp;
1407 	uint32_t preempt_seq;
1408 	struct dma_fence *fence, **ptr;
1409 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
1410 	struct drm_gpu_scheduler *sched = &ring->sched;
1411 	bool preempted = true;
1412 
1413 	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1414 		return;
1415 
1416 	preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1417 	if (preempt_seq <= atomic_read(&drv->last_seq)) {
1418 		preempted = false;
1419 		goto no_preempt;
1420 	}
1421 
1422 	preempt_seq &= drv->num_fences_mask;
1423 	ptr = &drv->fences[preempt_seq];
1424 	fence = rcu_dereference_protected(*ptr, 1);
1425 
1426 no_preempt:
1427 	spin_lock(&sched->job_list_lock);
1428 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
1429 		if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1430 			/* remove job from ring_mirror_list */
1431 			list_del_init(&s_job->node);
1432 			sched->ops->free_job(s_job);
1433 			continue;
1434 		}
1435 		job = to_amdgpu_job(s_job);
1436 		if (preempted && job->fence == fence)
1437 			/* mark the job as preempted */
1438 			job->preemption_status |= AMDGPU_IB_PREEMPTED;
1439 	}
1440 	spin_unlock(&sched->job_list_lock);
1441 }
1442 
1443 static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1444 {
1445 	int r, resched, length;
1446 	struct amdgpu_ring *ring;
1447 	struct dma_fence **fences = NULL;
1448 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1449 
1450 	if (val >= AMDGPU_MAX_RINGS)
1451 		return -EINVAL;
1452 
1453 	ring = adev->rings[val];
1454 
1455 	if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
1456 		return -EINVAL;
1457 
1458 	/* the last preemption failed */
1459 	if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1460 		return -EBUSY;
1461 
1462 	length = ring->fence_drv.num_fences_mask + 1;
1463 	fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1464 	if (!fences)
1465 		return -ENOMEM;
1466 
1467 	/* Avoid accidently unparking the sched thread during GPU reset */
1468 	r = down_read_killable(&adev->reset_sem);
1469 	if (r)
1470 		goto pro_end;
1471 
1472 	/* stop the scheduler */
1473 	kthread_park(ring->sched.thread);
1474 
1475 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1476 
1477 	/* preempt the IB */
1478 	r = amdgpu_ring_preempt_ib(ring);
1479 	if (r) {
1480 		DRM_WARN("failed to preempt ring %d\n", ring->idx);
1481 		goto failure;
1482 	}
1483 
1484 	amdgpu_fence_process(ring);
1485 
1486 	if (atomic_read(&ring->fence_drv.last_seq) !=
1487 	    ring->fence_drv.sync_seq) {
1488 		DRM_INFO("ring %d was preempted\n", ring->idx);
1489 
1490 		amdgpu_ib_preempt_mark_partial_job(ring);
1491 
1492 		/* swap out the old fences */
1493 		amdgpu_ib_preempt_fences_swap(ring, fences);
1494 
1495 		amdgpu_fence_driver_force_completion(ring);
1496 
1497 		/* resubmit unfinished jobs */
1498 		amdgpu_ib_preempt_job_recovery(&ring->sched);
1499 
1500 		/* wait for jobs finished */
1501 		amdgpu_fence_wait_empty(ring);
1502 
1503 		/* signal the old fences */
1504 		amdgpu_ib_preempt_signal_fences(fences, length);
1505 	}
1506 
1507 failure:
1508 	/* restart the scheduler */
1509 	kthread_unpark(ring->sched.thread);
1510 
1511 	up_read(&adev->reset_sem);
1512 
1513 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1514 
1515 pro_end:
1516 	kfree(fences);
1517 
1518 	return r;
1519 }
1520 
1521 static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1522 {
1523 	int ret = 0;
1524 	uint32_t max_freq, min_freq;
1525 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
1526 
1527 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1528 		return -EINVAL;
1529 
1530 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1531 	if (ret < 0) {
1532 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1533 		return ret;
1534 	}
1535 
1536 	if (is_support_sw_smu(adev)) {
1537 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq);
1538 		if (ret || val > max_freq || val < min_freq)
1539 			return -EINVAL;
1540 		ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
1541 	} else {
1542 		return 0;
1543 	}
1544 
1545 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1546 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1547 
1548 	if (ret)
1549 		return -EINVAL;
1550 
1551 	return 0;
1552 }
1553 
1554 DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
1555 			amdgpu_debugfs_ib_preempt, "%llu\n");
1556 
1557 DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
1558 			amdgpu_debugfs_sclk_set, "%llu\n");
1559 
1560 int amdgpu_debugfs_init(struct amdgpu_device *adev)
1561 {
1562 	int r, i;
1563 
1564 	adev->debugfs_preempt =
1565 		debugfs_create_file("amdgpu_preempt_ib", 0600,
1566 				    adev_to_drm(adev)->primary->debugfs_root, adev,
1567 				    &fops_ib_preempt);
1568 	if (!(adev->debugfs_preempt)) {
1569 		DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1570 		return -EIO;
1571 	}
1572 
1573 	adev->smu.debugfs_sclk =
1574 		debugfs_create_file("amdgpu_force_sclk", 0200,
1575 				    adev_to_drm(adev)->primary->debugfs_root, adev,
1576 				    &fops_sclk_set);
1577 	if (!(adev->smu.debugfs_sclk)) {
1578 		DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1579 		return -EIO;
1580 	}
1581 
1582 	/* Register debugfs entries for amdgpu_ttm */
1583 	r = amdgpu_ttm_debugfs_init(adev);
1584 	if (r) {
1585 		DRM_ERROR("Failed to init debugfs\n");
1586 		return r;
1587 	}
1588 
1589 	r = amdgpu_debugfs_pm_init(adev);
1590 	if (r) {
1591 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
1592 		return r;
1593 	}
1594 
1595 	if (amdgpu_debugfs_sa_init(adev)) {
1596 		dev_err(adev->dev, "failed to register debugfs file for SA\n");
1597 	}
1598 
1599 	if (amdgpu_debugfs_fence_init(adev))
1600 		dev_err(adev->dev, "fence debugfs file creation failed\n");
1601 
1602 	r = amdgpu_debugfs_gem_init(adev);
1603 	if (r)
1604 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1605 
1606 	r = amdgpu_debugfs_regs_init(adev);
1607 	if (r)
1608 		DRM_ERROR("registering register debugfs failed (%d).\n", r);
1609 
1610 	r = amdgpu_debugfs_firmware_init(adev);
1611 	if (r)
1612 		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1613 
1614 #if defined(CONFIG_DRM_AMD_DC)
1615 	if (amdgpu_device_has_dc_support(adev)) {
1616 		if (dtn_debugfs_init(adev))
1617 			DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
1618 	}
1619 #endif
1620 
1621 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1622 		struct amdgpu_ring *ring = adev->rings[i];
1623 
1624 		if (!ring)
1625 			continue;
1626 
1627 		if (amdgpu_debugfs_ring_init(adev, ring)) {
1628 			DRM_ERROR("Failed to register debugfs file for rings !\n");
1629 		}
1630 	}
1631 
1632 	amdgpu_ras_debugfs_create_all(adev);
1633 
1634 	amdgpu_debugfs_autodump_init(adev);
1635 
1636 	amdgpu_rap_debugfs_init(adev);
1637 
1638 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
1639 					ARRAY_SIZE(amdgpu_debugfs_list));
1640 }
1641 
1642 #else
1643 int amdgpu_debugfs_init(struct amdgpu_device *adev)
1644 {
1645 	return 0;
1646 }
1647 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1648 {
1649 	return 0;
1650 }
1651 #endif
1652