1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_rlc.h"
29 #include "amdgpu_ras.h"
30 
31 /* delay 0.1 second to enable gfx off feature */
32 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
33 
34 /*
35  * GPU GFX IP block helpers function.
36  */
37 
38 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
39 				int pipe, int queue)
40 {
41 	int bit = 0;
42 
43 	bit += mec * adev->gfx.mec.num_pipe_per_mec
44 		* adev->gfx.mec.num_queue_per_pipe;
45 	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
46 	bit += queue;
47 
48 	return bit;
49 }
50 
51 void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
52 				 int *mec, int *pipe, int *queue)
53 {
54 	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
55 	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
56 		% adev->gfx.mec.num_pipe_per_mec;
57 	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
58 	       / adev->gfx.mec.num_pipe_per_mec;
59 
60 }
61 
62 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
63 				     int mec, int pipe, int queue)
64 {
65 	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
66 			adev->gfx.mec.queue_bitmap);
67 }
68 
69 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
70 			       int me, int pipe, int queue)
71 {
72 	int bit = 0;
73 
74 	bit += me * adev->gfx.me.num_pipe_per_me
75 		* adev->gfx.me.num_queue_per_pipe;
76 	bit += pipe * adev->gfx.me.num_queue_per_pipe;
77 	bit += queue;
78 
79 	return bit;
80 }
81 
82 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
83 				int *me, int *pipe, int *queue)
84 {
85 	*queue = bit % adev->gfx.me.num_queue_per_pipe;
86 	*pipe = (bit / adev->gfx.me.num_queue_per_pipe)
87 		% adev->gfx.me.num_pipe_per_me;
88 	*me = (bit / adev->gfx.me.num_queue_per_pipe)
89 		/ adev->gfx.me.num_pipe_per_me;
90 }
91 
92 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
93 				    int me, int pipe, int queue)
94 {
95 	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
96 			adev->gfx.me.queue_bitmap);
97 }
98 
99 /**
100  * amdgpu_gfx_scratch_get - Allocate a scratch register
101  *
102  * @adev: amdgpu_device pointer
103  * @reg: scratch register mmio offset
104  *
105  * Allocate a CP scratch register for use by the driver (all asics).
106  * Returns 0 on success or -EINVAL on failure.
107  */
108 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
109 {
110 	int i;
111 
112 	i = ffs(adev->gfx.scratch.free_mask);
113 	if (i != 0 && i <= adev->gfx.scratch.num_reg) {
114 		i--;
115 		adev->gfx.scratch.free_mask &= ~(1u << i);
116 		*reg = adev->gfx.scratch.reg_base + i;
117 		return 0;
118 	}
119 	return -EINVAL;
120 }
121 
122 /**
123  * amdgpu_gfx_scratch_free - Free a scratch register
124  *
125  * @adev: amdgpu_device pointer
126  * @reg: scratch register mmio offset
127  *
128  * Free a CP scratch register allocated for use by the driver (all asics)
129  */
130 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
131 {
132 	adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
133 }
134 
135 /**
136  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
137  *
138  * @mask: array in which the per-shader array disable masks will be stored
139  * @max_se: number of SEs
140  * @max_sh: number of SHs
141  *
142  * The bitmask of CUs to be disabled in the shader array determined by se and
143  * sh is stored in mask[se * max_sh + sh].
144  */
145 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
146 {
147 	unsigned se, sh, cu;
148 	const char *p;
149 
150 	memset(mask, 0, sizeof(*mask) * max_se * max_sh);
151 
152 	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
153 		return;
154 
155 	p = amdgpu_disable_cu;
156 	for (;;) {
157 		char *next;
158 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
159 		if (ret < 3) {
160 			DRM_ERROR("amdgpu: could not parse disable_cu\n");
161 			return;
162 		}
163 
164 		if (se < max_se && sh < max_sh && cu < 16) {
165 			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
166 			mask[se * max_sh + sh] |= 1u << cu;
167 		} else {
168 			DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
169 				  se, sh, cu);
170 		}
171 
172 		next = strchr(p, ',');
173 		if (!next)
174 			break;
175 		p = next + 1;
176 	}
177 }
178 
179 static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
180 {
181 	if (amdgpu_compute_multipipe != -1) {
182 		DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
183 			 amdgpu_compute_multipipe);
184 		return amdgpu_compute_multipipe == 1;
185 	}
186 
187 	/* FIXME: spreading the queues across pipes causes perf regressions
188 	 * on POLARIS11 compute workloads */
189 	if (adev->asic_type == CHIP_POLARIS11)
190 		return false;
191 
192 	return adev->gfx.mec.num_mec > 1;
193 }
194 
195 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
196 {
197 	int i, queue, pipe, mec;
198 	bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
199 
200 	/* policy for amdgpu compute queue ownership */
201 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
202 		queue = i % adev->gfx.mec.num_queue_per_pipe;
203 		pipe = (i / adev->gfx.mec.num_queue_per_pipe)
204 			% adev->gfx.mec.num_pipe_per_mec;
205 		mec = (i / adev->gfx.mec.num_queue_per_pipe)
206 			/ adev->gfx.mec.num_pipe_per_mec;
207 
208 		/* we've run out of HW */
209 		if (mec >= adev->gfx.mec.num_mec)
210 			break;
211 
212 		if (multipipe_policy) {
213 			/* policy: amdgpu owns the first two queues of the first MEC */
214 			if (mec == 0 && queue < 2)
215 				set_bit(i, adev->gfx.mec.queue_bitmap);
216 		} else {
217 			/* policy: amdgpu owns all queues in the first pipe */
218 			if (mec == 0 && pipe == 0)
219 				set_bit(i, adev->gfx.mec.queue_bitmap);
220 		}
221 	}
222 
223 	/* update the number of active compute rings */
224 	adev->gfx.num_compute_rings =
225 		bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
226 
227 	/* If you hit this case and edited the policy, you probably just
228 	 * need to increase AMDGPU_MAX_COMPUTE_RINGS */
229 	if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
230 		adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
231 }
232 
233 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
234 {
235 	int i, queue, me;
236 
237 	for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
238 		queue = i % adev->gfx.me.num_queue_per_pipe;
239 		me = (i / adev->gfx.me.num_queue_per_pipe)
240 		      / adev->gfx.me.num_pipe_per_me;
241 
242 		if (me >= adev->gfx.me.num_me)
243 			break;
244 		/* policy: amdgpu owns the first queue per pipe at this stage
245 		 * will extend to mulitple queues per pipe later */
246 		if (me == 0 && queue < 1)
247 			set_bit(i, adev->gfx.me.queue_bitmap);
248 	}
249 
250 	/* update the number of active graphics rings */
251 	adev->gfx.num_gfx_rings =
252 		bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
253 }
254 
255 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
256 				  struct amdgpu_ring *ring)
257 {
258 	int queue_bit;
259 	int mec, pipe, queue;
260 
261 	queue_bit = adev->gfx.mec.num_mec
262 		    * adev->gfx.mec.num_pipe_per_mec
263 		    * adev->gfx.mec.num_queue_per_pipe;
264 
265 	while (queue_bit-- >= 0) {
266 		if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
267 			continue;
268 
269 		amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
270 
271 		/*
272 		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
273 		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
274 		 * only can be issued on queue 0.
275 		 */
276 		if ((mec == 1 && pipe > 1) || queue != 0)
277 			continue;
278 
279 		ring->me = mec + 1;
280 		ring->pipe = pipe;
281 		ring->queue = queue;
282 
283 		return 0;
284 	}
285 
286 	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
287 	return -EINVAL;
288 }
289 
290 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
291 			     struct amdgpu_ring *ring,
292 			     struct amdgpu_irq_src *irq)
293 {
294 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
295 	int r = 0;
296 
297 	spin_lock_init(&kiq->ring_lock);
298 
299 	r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
300 	if (r)
301 		return r;
302 
303 	ring->adev = NULL;
304 	ring->ring_obj = NULL;
305 	ring->use_doorbell = true;
306 	ring->doorbell_index = adev->doorbell_index.kiq;
307 
308 	r = amdgpu_gfx_kiq_acquire(adev, ring);
309 	if (r)
310 		return r;
311 
312 	ring->eop_gpu_addr = kiq->eop_gpu_addr;
313 	sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
314 	r = amdgpu_ring_init(adev, ring, 1024,
315 			     irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
316 	if (r)
317 		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
318 
319 	return r;
320 }
321 
322 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
323 			      struct amdgpu_irq_src *irq)
324 {
325 	amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
326 	amdgpu_ring_fini(ring);
327 }
328 
329 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
330 {
331 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
332 
333 	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
334 }
335 
336 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
337 			unsigned hpd_size)
338 {
339 	int r;
340 	u32 *hpd;
341 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
342 
343 	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
344 				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
345 				    &kiq->eop_gpu_addr, (void **)&hpd);
346 	if (r) {
347 		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
348 		return r;
349 	}
350 
351 	memset(hpd, 0, hpd_size);
352 
353 	r = amdgpu_bo_reserve(kiq->eop_obj, true);
354 	if (unlikely(r != 0))
355 		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
356 	amdgpu_bo_kunmap(kiq->eop_obj);
357 	amdgpu_bo_unreserve(kiq->eop_obj);
358 
359 	return 0;
360 }
361 
362 /* create MQD for each compute/gfx queue */
363 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
364 			   unsigned mqd_size)
365 {
366 	struct amdgpu_ring *ring = NULL;
367 	int r, i;
368 
369 	/* create MQD for KIQ */
370 	ring = &adev->gfx.kiq.ring;
371 	if (!ring->mqd_obj) {
372 		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
373 		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
374 		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
375 		 * KIQ MQD no matter SRIOV or Bare-metal
376 		 */
377 		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
378 					    AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
379 					    &ring->mqd_gpu_addr, &ring->mqd_ptr);
380 		if (r) {
381 			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
382 			return r;
383 		}
384 
385 		/* prepare MQD backup */
386 		adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
387 		if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
388 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
389 	}
390 
391 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
392 		/* create MQD for each KGQ */
393 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
394 			ring = &adev->gfx.gfx_ring[i];
395 			if (!ring->mqd_obj) {
396 				r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
397 							    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
398 							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
399 				if (r) {
400 					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
401 					return r;
402 				}
403 
404 				/* prepare MQD backup */
405 				adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
406 				if (!adev->gfx.me.mqd_backup[i])
407 					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
408 			}
409 		}
410 	}
411 
412 	/* create MQD for each KCQ */
413 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
414 		ring = &adev->gfx.compute_ring[i];
415 		if (!ring->mqd_obj) {
416 			r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
417 						    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
418 						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
419 			if (r) {
420 				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
421 				return r;
422 			}
423 
424 			/* prepare MQD backup */
425 			adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
426 			if (!adev->gfx.mec.mqd_backup[i])
427 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
428 		}
429 	}
430 
431 	return 0;
432 }
433 
434 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
435 {
436 	struct amdgpu_ring *ring = NULL;
437 	int i;
438 
439 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
440 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
441 			ring = &adev->gfx.gfx_ring[i];
442 			kfree(adev->gfx.me.mqd_backup[i]);
443 			amdgpu_bo_free_kernel(&ring->mqd_obj,
444 					      &ring->mqd_gpu_addr,
445 					      &ring->mqd_ptr);
446 		}
447 	}
448 
449 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
450 		ring = &adev->gfx.compute_ring[i];
451 		kfree(adev->gfx.mec.mqd_backup[i]);
452 		amdgpu_bo_free_kernel(&ring->mqd_obj,
453 				      &ring->mqd_gpu_addr,
454 				      &ring->mqd_ptr);
455 	}
456 
457 	ring = &adev->gfx.kiq.ring;
458 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring)
459 		kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
460 	kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
461 	amdgpu_bo_free_kernel(&ring->mqd_obj,
462 			      &ring->mqd_gpu_addr,
463 			      &ring->mqd_ptr);
464 }
465 
466 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
467 {
468 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
469 	struct amdgpu_ring *kiq_ring = &kiq->ring;
470 	int i;
471 
472 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
473 		return -EINVAL;
474 
475 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
476 					adev->gfx.num_compute_rings))
477 		return -ENOMEM;
478 
479 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
480 		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
481 					   RESET_QUEUES, 0, 0);
482 
483 	return amdgpu_ring_test_ring(kiq_ring);
484 }
485 
486 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
487 {
488 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
489 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
490 	uint64_t queue_mask = 0;
491 	int r, i;
492 
493 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
494 		return -EINVAL;
495 
496 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
497 		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
498 			continue;
499 
500 		/* This situation may be hit in the future if a new HW
501 		 * generation exposes more than 64 queues. If so, the
502 		 * definition of queue_mask needs updating */
503 		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
504 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
505 			break;
506 		}
507 
508 		queue_mask |= (1ull << i);
509 	}
510 
511 	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
512 							kiq_ring->queue);
513 
514 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
515 					adev->gfx.num_compute_rings +
516 					kiq->pmf->set_resources_size);
517 	if (r) {
518 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
519 		return r;
520 	}
521 
522 	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
523 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
524 		kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
525 
526 	r = amdgpu_ring_test_helper(kiq_ring);
527 	if (r)
528 		DRM_ERROR("KCQ enable failed\n");
529 
530 	return r;
531 }
532 
533 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
534  *
535  * @adev: amdgpu_device pointer
536  * @bool enable true: enable gfx off feature, false: disable gfx off feature
537  *
538  * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
539  * 2. other client can send request to disable gfx off feature, the request should be honored.
540  * 3. other client can cancel their request of disable gfx off feature
541  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
542  */
543 
544 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
545 {
546 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
547 		return;
548 
549 	if (!is_support_sw_smu(adev) &&
550 	    (!adev->powerplay.pp_funcs ||
551 	     !adev->powerplay.pp_funcs->set_powergating_by_smu))
552 		return;
553 
554 
555 	mutex_lock(&adev->gfx.gfx_off_mutex);
556 
557 	if (!enable)
558 		adev->gfx.gfx_off_req_count++;
559 	else if (adev->gfx.gfx_off_req_count > 0)
560 		adev->gfx.gfx_off_req_count--;
561 
562 	if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
563 		schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
564 	} else if (!enable && adev->gfx.gfx_off_state) {
565 		if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false))
566 			adev->gfx.gfx_off_state = false;
567 	}
568 
569 	mutex_unlock(&adev->gfx.gfx_off_mutex);
570 }
571 
572 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
573 {
574 	int r;
575 	struct ras_fs_if fs_info = {
576 		.sysfs_name = "gfx_err_count",
577 		.debugfs_name = "gfx_err_inject",
578 	};
579 	struct ras_ih_if ih_info = {
580 		.cb = amdgpu_gfx_process_ras_data_cb,
581 	};
582 
583 	if (!adev->gfx.ras_if) {
584 		adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
585 		if (!adev->gfx.ras_if)
586 			return -ENOMEM;
587 		adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
588 		adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
589 		adev->gfx.ras_if->sub_block_index = 0;
590 		strcpy(adev->gfx.ras_if->name, "gfx");
591 	}
592 	fs_info.head = ih_info.head = *adev->gfx.ras_if;
593 
594 	r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
595 				 &fs_info, &ih_info);
596 	if (r)
597 		goto free;
598 
599 	if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
600 		r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
601 		if (r)
602 			goto late_fini;
603 	} else {
604 		/* free gfx ras_if if ras is not supported */
605 		r = 0;
606 		goto free;
607 	}
608 
609 	return 0;
610 late_fini:
611 	amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
612 free:
613 	kfree(adev->gfx.ras_if);
614 	adev->gfx.ras_if = NULL;
615 	return r;
616 }
617 
618 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
619 {
620 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
621 			adev->gfx.ras_if) {
622 		struct ras_common_if *ras_if = adev->gfx.ras_if;
623 		struct ras_ih_if ih_info = {
624 			.head = *ras_if,
625 			.cb = amdgpu_gfx_process_ras_data_cb,
626 		};
627 
628 		amdgpu_ras_late_fini(adev, ras_if, &ih_info);
629 		kfree(ras_if);
630 	}
631 }
632 
633 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
634 		void *err_data,
635 		struct amdgpu_iv_entry *entry)
636 {
637 	/* TODO ue will trigger an interrupt.
638 	 *
639 	 * When “Full RAS” is enabled, the per-IP interrupt sources should
640 	 * be disabled and the driver should only look for the aggregated
641 	 * interrupt via sync flood
642 	 */
643 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
644 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
645 		if (adev->gfx.funcs->query_ras_error_count)
646 			adev->gfx.funcs->query_ras_error_count(adev, err_data);
647 		amdgpu_ras_reset_gpu(adev, 0);
648 	}
649 	return AMDGPU_RAS_SUCCESS;
650 }
651 
652 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
653 				  struct amdgpu_irq_src *source,
654 				  struct amdgpu_iv_entry *entry)
655 {
656 	struct ras_common_if *ras_if = adev->gfx.ras_if;
657 	struct ras_dispatch_if ih_data = {
658 		.entry = entry,
659 	};
660 
661 	if (!ras_if)
662 		return 0;
663 
664 	ih_data.head = *ras_if;
665 
666 	DRM_ERROR("CP ECC ERROR IRQ\n");
667 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
668 	return 0;
669 }
670