1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_rlc.h"
29 #include "amdgpu_ras.h"
30 
31 /* delay 0.1 second to enable gfx off feature */
32 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
33 
34 #define GFX_OFF_NO_DELAY 0
35 
36 /*
37  * GPU GFX IP block helpers function.
38  */
39 
40 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
41 				int pipe, int queue)
42 {
43 	int bit = 0;
44 
45 	bit += mec * adev->gfx.mec.num_pipe_per_mec
46 		* adev->gfx.mec.num_queue_per_pipe;
47 	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
48 	bit += queue;
49 
50 	return bit;
51 }
52 
53 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
54 				 int *mec, int *pipe, int *queue)
55 {
56 	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
57 	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
58 		% adev->gfx.mec.num_pipe_per_mec;
59 	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
60 	       / adev->gfx.mec.num_pipe_per_mec;
61 
62 }
63 
64 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
65 				     int mec, int pipe, int queue)
66 {
67 	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
68 			adev->gfx.mec.queue_bitmap);
69 }
70 
71 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
72 			       int me, int pipe, int queue)
73 {
74 	int bit = 0;
75 
76 	bit += me * adev->gfx.me.num_pipe_per_me
77 		* adev->gfx.me.num_queue_per_pipe;
78 	bit += pipe * adev->gfx.me.num_queue_per_pipe;
79 	bit += queue;
80 
81 	return bit;
82 }
83 
84 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
85 				int *me, int *pipe, int *queue)
86 {
87 	*queue = bit % adev->gfx.me.num_queue_per_pipe;
88 	*pipe = (bit / adev->gfx.me.num_queue_per_pipe)
89 		% adev->gfx.me.num_pipe_per_me;
90 	*me = (bit / adev->gfx.me.num_queue_per_pipe)
91 		/ adev->gfx.me.num_pipe_per_me;
92 }
93 
94 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
95 				    int me, int pipe, int queue)
96 {
97 	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
98 			adev->gfx.me.queue_bitmap);
99 }
100 
101 /**
102  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
103  *
104  * @mask: array in which the per-shader array disable masks will be stored
105  * @max_se: number of SEs
106  * @max_sh: number of SHs
107  *
108  * The bitmask of CUs to be disabled in the shader array determined by se and
109  * sh is stored in mask[se * max_sh + sh].
110  */
111 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
112 {
113 	unsigned se, sh, cu;
114 	const char *p;
115 
116 	memset(mask, 0, sizeof(*mask) * max_se * max_sh);
117 
118 	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
119 		return;
120 
121 	p = amdgpu_disable_cu;
122 	for (;;) {
123 		char *next;
124 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
125 		if (ret < 3) {
126 			DRM_ERROR("amdgpu: could not parse disable_cu\n");
127 			return;
128 		}
129 
130 		if (se < max_se && sh < max_sh && cu < 16) {
131 			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
132 			mask[se * max_sh + sh] |= 1u << cu;
133 		} else {
134 			DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
135 				  se, sh, cu);
136 		}
137 
138 		next = strchr(p, ',');
139 		if (!next)
140 			break;
141 		p = next + 1;
142 	}
143 }
144 
145 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
146 {
147 	return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
148 }
149 
150 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
151 {
152 	if (amdgpu_compute_multipipe != -1) {
153 		DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
154 			 amdgpu_compute_multipipe);
155 		return amdgpu_compute_multipipe == 1;
156 	}
157 
158 	/* FIXME: spreading the queues across pipes causes perf regressions
159 	 * on POLARIS11 compute workloads */
160 	if (adev->asic_type == CHIP_POLARIS11)
161 		return false;
162 
163 	return adev->gfx.mec.num_mec > 1;
164 }
165 
166 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
167 						struct amdgpu_ring *ring)
168 {
169 	int queue = ring->queue;
170 	int pipe = ring->pipe;
171 
172 	/* Policy: use pipe1 queue0 as high priority graphics queue if we
173 	 * have more than one gfx pipe.
174 	 */
175 	if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
176 	    adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
177 		int me = ring->me;
178 		int bit;
179 
180 		bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
181 		if (ring == &adev->gfx.gfx_ring[bit])
182 			return true;
183 	}
184 
185 	return false;
186 }
187 
188 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
189 					       struct amdgpu_ring *ring)
190 {
191 	/* Policy: use 1st queue as high priority compute queue if we
192 	 * have more than one compute queue.
193 	 */
194 	if (adev->gfx.num_compute_rings > 1 &&
195 	    ring == &adev->gfx.compute_ring[0])
196 		return true;
197 
198 	return false;
199 }
200 
201 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
202 {
203 	int i, queue, pipe;
204 	bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
205 	int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
206 				     adev->gfx.mec.num_queue_per_pipe,
207 				     adev->gfx.num_compute_rings);
208 
209 	if (multipipe_policy) {
210 		/* policy: make queues evenly cross all pipes on MEC1 only */
211 		for (i = 0; i < max_queues_per_mec; i++) {
212 			pipe = i % adev->gfx.mec.num_pipe_per_mec;
213 			queue = (i / adev->gfx.mec.num_pipe_per_mec) %
214 				adev->gfx.mec.num_queue_per_pipe;
215 
216 			set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
217 					adev->gfx.mec.queue_bitmap);
218 		}
219 	} else {
220 		/* policy: amdgpu owns all queues in the given pipe */
221 		for (i = 0; i < max_queues_per_mec; ++i)
222 			set_bit(i, adev->gfx.mec.queue_bitmap);
223 	}
224 
225 	dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
226 }
227 
228 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
229 {
230 	int i, queue, pipe;
231 	bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
232 	int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
233 					adev->gfx.me.num_queue_per_pipe;
234 
235 	if (multipipe_policy) {
236 		/* policy: amdgpu owns the first queue per pipe at this stage
237 		 * will extend to mulitple queues per pipe later */
238 		for (i = 0; i < max_queues_per_me; i++) {
239 			pipe = i % adev->gfx.me.num_pipe_per_me;
240 			queue = (i / adev->gfx.me.num_pipe_per_me) %
241 				adev->gfx.me.num_queue_per_pipe;
242 
243 			set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
244 				adev->gfx.me.queue_bitmap);
245 		}
246 	} else {
247 		for (i = 0; i < max_queues_per_me; ++i)
248 			set_bit(i, adev->gfx.me.queue_bitmap);
249 	}
250 
251 	/* update the number of active graphics rings */
252 	adev->gfx.num_gfx_rings =
253 		bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
254 }
255 
256 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
257 				  struct amdgpu_ring *ring)
258 {
259 	int queue_bit;
260 	int mec, pipe, queue;
261 
262 	queue_bit = adev->gfx.mec.num_mec
263 		    * adev->gfx.mec.num_pipe_per_mec
264 		    * adev->gfx.mec.num_queue_per_pipe;
265 
266 	while (--queue_bit >= 0) {
267 		if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
268 			continue;
269 
270 		amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
271 
272 		/*
273 		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
274 		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
275 		 * only can be issued on queue 0.
276 		 */
277 		if ((mec == 1 && pipe > 1) || queue != 0)
278 			continue;
279 
280 		ring->me = mec + 1;
281 		ring->pipe = pipe;
282 		ring->queue = queue;
283 
284 		return 0;
285 	}
286 
287 	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
288 	return -EINVAL;
289 }
290 
291 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
292 			     struct amdgpu_ring *ring,
293 			     struct amdgpu_irq_src *irq)
294 {
295 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
296 	int r = 0;
297 
298 	spin_lock_init(&kiq->ring_lock);
299 
300 	ring->adev = NULL;
301 	ring->ring_obj = NULL;
302 	ring->use_doorbell = true;
303 	ring->doorbell_index = adev->doorbell_index.kiq;
304 
305 	r = amdgpu_gfx_kiq_acquire(adev, ring);
306 	if (r)
307 		return r;
308 
309 	ring->eop_gpu_addr = kiq->eop_gpu_addr;
310 	ring->no_scheduler = true;
311 	sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
312 	r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
313 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
314 	if (r)
315 		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
316 
317 	return r;
318 }
319 
320 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
321 {
322 	amdgpu_ring_fini(ring);
323 }
324 
325 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
326 {
327 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
328 
329 	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
330 }
331 
332 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
333 			unsigned hpd_size)
334 {
335 	int r;
336 	u32 *hpd;
337 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
338 
339 	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
340 				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
341 				    &kiq->eop_gpu_addr, (void **)&hpd);
342 	if (r) {
343 		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
344 		return r;
345 	}
346 
347 	memset(hpd, 0, hpd_size);
348 
349 	r = amdgpu_bo_reserve(kiq->eop_obj, true);
350 	if (unlikely(r != 0))
351 		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
352 	amdgpu_bo_kunmap(kiq->eop_obj);
353 	amdgpu_bo_unreserve(kiq->eop_obj);
354 
355 	return 0;
356 }
357 
358 /* create MQD for each compute/gfx queue */
359 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
360 			   unsigned mqd_size)
361 {
362 	struct amdgpu_ring *ring = NULL;
363 	int r, i;
364 
365 	/* create MQD for KIQ */
366 	ring = &adev->gfx.kiq.ring;
367 	if (!adev->enable_mes_kiq && !ring->mqd_obj) {
368 		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
369 		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
370 		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
371 		 * KIQ MQD no matter SRIOV or Bare-metal
372 		 */
373 		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
374 					    AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
375 					    &ring->mqd_gpu_addr, &ring->mqd_ptr);
376 		if (r) {
377 			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
378 			return r;
379 		}
380 
381 		/* prepare MQD backup */
382 		adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
383 		if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
384 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
385 	}
386 
387 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
388 		/* create MQD for each KGQ */
389 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
390 			ring = &adev->gfx.gfx_ring[i];
391 			if (!ring->mqd_obj) {
392 				r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
393 							    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
394 							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
395 				if (r) {
396 					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
397 					return r;
398 				}
399 
400 				/* prepare MQD backup */
401 				adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
402 				if (!adev->gfx.me.mqd_backup[i])
403 					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
404 			}
405 		}
406 	}
407 
408 	/* create MQD for each KCQ */
409 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
410 		ring = &adev->gfx.compute_ring[i];
411 		if (!ring->mqd_obj) {
412 			r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
413 						    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
414 						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
415 			if (r) {
416 				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
417 				return r;
418 			}
419 
420 			/* prepare MQD backup */
421 			adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
422 			if (!adev->gfx.mec.mqd_backup[i])
423 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
424 		}
425 	}
426 
427 	return 0;
428 }
429 
430 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
431 {
432 	struct amdgpu_ring *ring = NULL;
433 	int i;
434 
435 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
436 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
437 			ring = &adev->gfx.gfx_ring[i];
438 			kfree(adev->gfx.me.mqd_backup[i]);
439 			amdgpu_bo_free_kernel(&ring->mqd_obj,
440 					      &ring->mqd_gpu_addr,
441 					      &ring->mqd_ptr);
442 		}
443 	}
444 
445 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
446 		ring = &adev->gfx.compute_ring[i];
447 		kfree(adev->gfx.mec.mqd_backup[i]);
448 		amdgpu_bo_free_kernel(&ring->mqd_obj,
449 				      &ring->mqd_gpu_addr,
450 				      &ring->mqd_ptr);
451 	}
452 
453 	ring = &adev->gfx.kiq.ring;
454 	kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
455 	amdgpu_bo_free_kernel(&ring->mqd_obj,
456 			      &ring->mqd_gpu_addr,
457 			      &ring->mqd_ptr);
458 }
459 
460 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
461 {
462 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
463 	struct amdgpu_ring *kiq_ring = &kiq->ring;
464 	int i, r = 0;
465 
466 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
467 		return -EINVAL;
468 
469 	spin_lock(&adev->gfx.kiq.ring_lock);
470 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
471 					adev->gfx.num_compute_rings)) {
472 		spin_unlock(&adev->gfx.kiq.ring_lock);
473 		return -ENOMEM;
474 	}
475 
476 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
477 		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
478 					   RESET_QUEUES, 0, 0);
479 
480 	if (adev->gfx.kiq.ring.sched.ready)
481 		r = amdgpu_ring_test_helper(kiq_ring);
482 	spin_unlock(&adev->gfx.kiq.ring_lock);
483 
484 	return r;
485 }
486 
487 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
488 					int queue_bit)
489 {
490 	int mec, pipe, queue;
491 	int set_resource_bit = 0;
492 
493 	amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
494 
495 	set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
496 
497 	return set_resource_bit;
498 }
499 
500 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
501 {
502 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
503 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
504 	uint64_t queue_mask = 0;
505 	int r, i;
506 
507 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
508 		return -EINVAL;
509 
510 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
511 		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
512 			continue;
513 
514 		/* This situation may be hit in the future if a new HW
515 		 * generation exposes more than 64 queues. If so, the
516 		 * definition of queue_mask needs updating */
517 		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
518 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
519 			break;
520 		}
521 
522 		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
523 	}
524 
525 	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
526 							kiq_ring->queue);
527 	spin_lock(&adev->gfx.kiq.ring_lock);
528 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
529 					adev->gfx.num_compute_rings +
530 					kiq->pmf->set_resources_size);
531 	if (r) {
532 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
533 		spin_unlock(&adev->gfx.kiq.ring_lock);
534 		return r;
535 	}
536 
537 	if (adev->enable_mes)
538 		queue_mask = ~0ULL;
539 
540 	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
541 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
542 		kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
543 
544 	r = amdgpu_ring_test_helper(kiq_ring);
545 	spin_unlock(&adev->gfx.kiq.ring_lock);
546 	if (r)
547 		DRM_ERROR("KCQ enable failed\n");
548 
549 	return r;
550 }
551 
552 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
553  *
554  * @adev: amdgpu_device pointer
555  * @bool enable true: enable gfx off feature, false: disable gfx off feature
556  *
557  * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
558  * 2. other client can send request to disable gfx off feature, the request should be honored.
559  * 3. other client can cancel their request of disable gfx off feature
560  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
561  */
562 
563 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
564 {
565 	unsigned long delay = GFX_OFF_DELAY_ENABLE;
566 
567 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
568 		return;
569 
570 	mutex_lock(&adev->gfx.gfx_off_mutex);
571 
572 	if (enable) {
573 		/* If the count is already 0, it means there's an imbalance bug somewhere.
574 		 * Note that the bug may be in a different caller than the one which triggers the
575 		 * WARN_ON_ONCE.
576 		 */
577 		if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
578 			goto unlock;
579 
580 		adev->gfx.gfx_off_req_count--;
581 
582 		if (adev->gfx.gfx_off_req_count == 0 &&
583 		    !adev->gfx.gfx_off_state) {
584 			/* If going to s2idle, no need to wait */
585 			if (adev->in_s0ix)
586 				delay = GFX_OFF_NO_DELAY;
587 			schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
588 					      delay);
589 		}
590 	} else {
591 		if (adev->gfx.gfx_off_req_count == 0) {
592 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
593 
594 			if (adev->gfx.gfx_off_state &&
595 			    !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
596 				adev->gfx.gfx_off_state = false;
597 
598 				if (adev->gfx.funcs->init_spm_golden) {
599 					dev_dbg(adev->dev,
600 						"GFXOFF is disabled, re-init SPM golden settings\n");
601 					amdgpu_gfx_init_spm_golden(adev);
602 				}
603 			}
604 		}
605 
606 		adev->gfx.gfx_off_req_count++;
607 	}
608 
609 unlock:
610 	mutex_unlock(&adev->gfx.gfx_off_mutex);
611 }
612 
613 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
614 {
615 
616 	int r = 0;
617 
618 	mutex_lock(&adev->gfx.gfx_off_mutex);
619 
620 	r = amdgpu_dpm_get_status_gfxoff(adev, value);
621 
622 	mutex_unlock(&adev->gfx.gfx_off_mutex);
623 
624 	return r;
625 }
626 
627 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
628 {
629 	int r;
630 
631 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
632 		if (!amdgpu_persistent_edc_harvesting_supported(adev))
633 			amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
634 
635 		r = amdgpu_ras_block_late_init(adev, ras_block);
636 		if (r)
637 			return r;
638 
639 		r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
640 		if (r)
641 			goto late_fini;
642 	} else {
643 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
644 	}
645 
646 	return 0;
647 late_fini:
648 	amdgpu_ras_block_late_fini(adev, ras_block);
649 	return r;
650 }
651 
652 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
653 		void *err_data,
654 		struct amdgpu_iv_entry *entry)
655 {
656 	/* TODO ue will trigger an interrupt.
657 	 *
658 	 * When “Full RAS” is enabled, the per-IP interrupt sources should
659 	 * be disabled and the driver should only look for the aggregated
660 	 * interrupt via sync flood
661 	 */
662 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
663 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
664 		if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
665 		    adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
666 			adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
667 		amdgpu_ras_reset_gpu(adev);
668 	}
669 	return AMDGPU_RAS_SUCCESS;
670 }
671 
672 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
673 				  struct amdgpu_irq_src *source,
674 				  struct amdgpu_iv_entry *entry)
675 {
676 	struct ras_common_if *ras_if = adev->gfx.ras_if;
677 	struct ras_dispatch_if ih_data = {
678 		.entry = entry,
679 	};
680 
681 	if (!ras_if)
682 		return 0;
683 
684 	ih_data.head = *ras_if;
685 
686 	DRM_ERROR("CP ECC ERROR IRQ\n");
687 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
688 	return 0;
689 }
690 
691 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
692 {
693 	signed long r, cnt = 0;
694 	unsigned long flags;
695 	uint32_t seq, reg_val_offs = 0, value = 0;
696 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
697 	struct amdgpu_ring *ring = &kiq->ring;
698 
699 	if (amdgpu_device_skip_hw_access(adev))
700 		return 0;
701 
702 	if (adev->mes.ring.sched.ready)
703 		return amdgpu_mes_rreg(adev, reg);
704 
705 	BUG_ON(!ring->funcs->emit_rreg);
706 
707 	spin_lock_irqsave(&kiq->ring_lock, flags);
708 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
709 		pr_err("critical bug! too many kiq readers\n");
710 		goto failed_unlock;
711 	}
712 	amdgpu_ring_alloc(ring, 32);
713 	amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
714 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
715 	if (r)
716 		goto failed_undo;
717 
718 	amdgpu_ring_commit(ring);
719 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
720 
721 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
722 
723 	/* don't wait anymore for gpu reset case because this way may
724 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
725 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
726 	 * never return if we keep waiting in virt_kiq_rreg, which cause
727 	 * gpu_recover() hang there.
728 	 *
729 	 * also don't wait anymore for IRQ context
730 	 * */
731 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
732 		goto failed_kiq_read;
733 
734 	might_sleep();
735 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
736 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
737 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
738 	}
739 
740 	if (cnt > MAX_KIQ_REG_TRY)
741 		goto failed_kiq_read;
742 
743 	mb();
744 	value = adev->wb.wb[reg_val_offs];
745 	amdgpu_device_wb_free(adev, reg_val_offs);
746 	return value;
747 
748 failed_undo:
749 	amdgpu_ring_undo(ring);
750 failed_unlock:
751 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
752 failed_kiq_read:
753 	if (reg_val_offs)
754 		amdgpu_device_wb_free(adev, reg_val_offs);
755 	dev_err(adev->dev, "failed to read reg:%x\n", reg);
756 	return ~0;
757 }
758 
759 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
760 {
761 	signed long r, cnt = 0;
762 	unsigned long flags;
763 	uint32_t seq;
764 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
765 	struct amdgpu_ring *ring = &kiq->ring;
766 
767 	BUG_ON(!ring->funcs->emit_wreg);
768 
769 	if (amdgpu_device_skip_hw_access(adev))
770 		return;
771 
772 	if (adev->mes.ring.sched.ready) {
773 		amdgpu_mes_wreg(adev, reg, v);
774 		return;
775 	}
776 
777 	spin_lock_irqsave(&kiq->ring_lock, flags);
778 	amdgpu_ring_alloc(ring, 32);
779 	amdgpu_ring_emit_wreg(ring, reg, v);
780 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
781 	if (r)
782 		goto failed_undo;
783 
784 	amdgpu_ring_commit(ring);
785 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
786 
787 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
788 
789 	/* don't wait anymore for gpu reset case because this way may
790 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
791 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
792 	 * never return if we keep waiting in virt_kiq_rreg, which cause
793 	 * gpu_recover() hang there.
794 	 *
795 	 * also don't wait anymore for IRQ context
796 	 * */
797 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
798 		goto failed_kiq_write;
799 
800 	might_sleep();
801 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
802 
803 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
804 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
805 	}
806 
807 	if (cnt > MAX_KIQ_REG_TRY)
808 		goto failed_kiq_write;
809 
810 	return;
811 
812 failed_undo:
813 	amdgpu_ring_undo(ring);
814 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
815 failed_kiq_write:
816 	dev_err(adev->dev, "failed to write reg:%x\n", reg);
817 }
818 
819 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
820 {
821 	if (amdgpu_num_kcq == -1) {
822 		return 8;
823 	} else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
824 		dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
825 		return 8;
826 	}
827 	return amdgpu_num_kcq;
828 }
829