1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
29 #include <linux/sched.h>
30 #include "kfd_priv.h"
31 #include "kfd_device_queue_manager.h"
32 #include "kfd_mqd_manager.h"
33 #include "cik_regs.h"
34 #include "kfd_kernel_queue.h"
35 
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
39 
40 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
41 					unsigned int pasid, unsigned int vmid);
42 
43 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
44 					struct queue *q,
45 					struct qcm_process_device *qpd);
46 
47 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
48 static int destroy_queues_cpsch(struct device_queue_manager *dqm,
49 				bool preempt_static_queues, bool lock);
50 
51 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
52 					struct queue *q,
53 					struct qcm_process_device *qpd);
54 
55 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
56 				unsigned int sdma_queue_id);
57 
58 static inline
59 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
60 {
61 	if (type == KFD_QUEUE_TYPE_SDMA)
62 		return KFD_MQD_TYPE_SDMA;
63 	return KFD_MQD_TYPE_CP;
64 }
65 
66 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
67 {
68 	int i;
69 	int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
70 		+ pipe * dqm->dev->shared_resources.num_queue_per_pipe;
71 
72 	/* queue is available for KFD usage if bit is 1 */
73 	for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
74 		if (test_bit(pipe_offset + i,
75 			      dqm->dev->shared_resources.queue_bitmap))
76 			return true;
77 	return false;
78 }
79 
80 unsigned int get_mec_num(struct device_queue_manager *dqm)
81 {
82 	BUG_ON(!dqm || !dqm->dev);
83 
84 	return dqm->dev->shared_resources.num_mec;
85 }
86 
87 unsigned int get_queues_num(struct device_queue_manager *dqm)
88 {
89 	BUG_ON(!dqm || !dqm->dev);
90 	return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
91 				KGD_MAX_QUEUES);
92 }
93 
94 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
95 {
96 	BUG_ON(!dqm || !dqm->dev);
97 	return dqm->dev->shared_resources.num_queue_per_pipe;
98 }
99 
100 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
101 {
102 	BUG_ON(!dqm || !dqm->dev);
103 	return dqm->dev->shared_resources.num_pipe_per_mec;
104 }
105 
106 void program_sh_mem_settings(struct device_queue_manager *dqm,
107 					struct qcm_process_device *qpd)
108 {
109 	return dqm->dev->kfd2kgd->program_sh_mem_settings(
110 						dqm->dev->kgd, qpd->vmid,
111 						qpd->sh_mem_config,
112 						qpd->sh_mem_ape1_base,
113 						qpd->sh_mem_ape1_limit,
114 						qpd->sh_mem_bases);
115 }
116 
117 static int allocate_vmid(struct device_queue_manager *dqm,
118 			struct qcm_process_device *qpd,
119 			struct queue *q)
120 {
121 	int bit, allocated_vmid;
122 
123 	if (dqm->vmid_bitmap == 0)
124 		return -ENOMEM;
125 
126 	bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
127 	clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
128 
129 	/* Kaveri kfd vmid's starts from vmid 8 */
130 	allocated_vmid = bit + KFD_VMID_START_OFFSET;
131 	pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
132 	qpd->vmid = allocated_vmid;
133 	q->properties.vmid = allocated_vmid;
134 
135 	set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
136 	program_sh_mem_settings(dqm, qpd);
137 
138 	return 0;
139 }
140 
141 static void deallocate_vmid(struct device_queue_manager *dqm,
142 				struct qcm_process_device *qpd,
143 				struct queue *q)
144 {
145 	int bit = qpd->vmid - KFD_VMID_START_OFFSET;
146 
147 	/* Release the vmid mapping */
148 	set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
149 
150 	set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
151 	qpd->vmid = 0;
152 	q->properties.vmid = 0;
153 }
154 
155 static int create_queue_nocpsch(struct device_queue_manager *dqm,
156 				struct queue *q,
157 				struct qcm_process_device *qpd,
158 				int *allocated_vmid)
159 {
160 	int retval;
161 
162 	BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
163 
164 	pr_debug("kfd: In func %s\n", __func__);
165 	print_queue(q);
166 
167 	mutex_lock(&dqm->lock);
168 
169 	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
170 		pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
171 				dqm->total_queue_count);
172 		mutex_unlock(&dqm->lock);
173 		return -EPERM;
174 	}
175 
176 	if (list_empty(&qpd->queues_list)) {
177 		retval = allocate_vmid(dqm, qpd, q);
178 		if (retval != 0) {
179 			mutex_unlock(&dqm->lock);
180 			return retval;
181 		}
182 	}
183 	*allocated_vmid = qpd->vmid;
184 	q->properties.vmid = qpd->vmid;
185 
186 	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
187 		retval = create_compute_queue_nocpsch(dqm, q, qpd);
188 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
189 		retval = create_sdma_queue_nocpsch(dqm, q, qpd);
190 
191 	if (retval != 0) {
192 		if (list_empty(&qpd->queues_list)) {
193 			deallocate_vmid(dqm, qpd, q);
194 			*allocated_vmid = 0;
195 		}
196 		mutex_unlock(&dqm->lock);
197 		return retval;
198 	}
199 
200 	list_add(&q->list, &qpd->queues_list);
201 	if (q->properties.is_active)
202 		dqm->queue_count++;
203 
204 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
205 		dqm->sdma_queue_count++;
206 
207 	/*
208 	 * Unconditionally increment this counter, regardless of the queue's
209 	 * type or whether the queue is active.
210 	 */
211 	dqm->total_queue_count++;
212 	pr_debug("Total of %d queues are accountable so far\n",
213 			dqm->total_queue_count);
214 
215 	mutex_unlock(&dqm->lock);
216 	return 0;
217 }
218 
219 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
220 {
221 	bool set;
222 	int pipe, bit, i;
223 
224 	set = false;
225 
226 	for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm);
227 			pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
228 
229 		if (!is_pipe_enabled(dqm, 0, pipe))
230 			continue;
231 
232 		if (dqm->allocated_queues[pipe] != 0) {
233 			bit = find_first_bit(
234 				(unsigned long *)&dqm->allocated_queues[pipe],
235 				get_queues_per_pipe(dqm));
236 
237 			clear_bit(bit,
238 				(unsigned long *)&dqm->allocated_queues[pipe]);
239 			q->pipe = pipe;
240 			q->queue = bit;
241 			set = true;
242 			break;
243 		}
244 	}
245 
246 	if (!set)
247 		return -EBUSY;
248 
249 	pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
250 				__func__, q->pipe, q->queue);
251 	/* horizontal hqd allocation */
252 	dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
253 
254 	return 0;
255 }
256 
257 static inline void deallocate_hqd(struct device_queue_manager *dqm,
258 				struct queue *q)
259 {
260 	set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
261 }
262 
263 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
264 					struct queue *q,
265 					struct qcm_process_device *qpd)
266 {
267 	int retval;
268 	struct mqd_manager *mqd;
269 
270 	BUG_ON(!dqm || !q || !qpd);
271 
272 	mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
273 	if (mqd == NULL)
274 		return -ENOMEM;
275 
276 	retval = allocate_hqd(dqm, q);
277 	if (retval != 0)
278 		return retval;
279 
280 	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
281 				&q->gart_mqd_addr, &q->properties);
282 	if (retval != 0) {
283 		deallocate_hqd(dqm, q);
284 		return retval;
285 	}
286 
287 	pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
288 			q->pipe,
289 			q->queue);
290 
291 	retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
292 			q->queue, (uint32_t __user *) q->properties.write_ptr);
293 	if (retval != 0) {
294 		deallocate_hqd(dqm, q);
295 		mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
296 		return retval;
297 	}
298 
299 	return 0;
300 }
301 
302 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
303 				struct qcm_process_device *qpd,
304 				struct queue *q)
305 {
306 	int retval;
307 	struct mqd_manager *mqd;
308 
309 	BUG_ON(!dqm || !q || !q->mqd || !qpd);
310 
311 	retval = 0;
312 
313 	pr_debug("kfd: In Func %s\n", __func__);
314 
315 	mutex_lock(&dqm->lock);
316 
317 	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
318 		mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
319 		if (mqd == NULL) {
320 			retval = -ENOMEM;
321 			goto out;
322 		}
323 		deallocate_hqd(dqm, q);
324 	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
325 		mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
326 		if (mqd == NULL) {
327 			retval = -ENOMEM;
328 			goto out;
329 		}
330 		dqm->sdma_queue_count--;
331 		deallocate_sdma_queue(dqm, q->sdma_id);
332 	} else {
333 		pr_debug("q->properties.type is invalid (%d)\n",
334 				q->properties.type);
335 		retval = -EINVAL;
336 		goto out;
337 	}
338 
339 	retval = mqd->destroy_mqd(mqd, q->mqd,
340 				KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
341 				QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
342 				q->pipe, q->queue);
343 
344 	if (retval != 0)
345 		goto out;
346 
347 	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
348 
349 	list_del(&q->list);
350 	if (list_empty(&qpd->queues_list))
351 		deallocate_vmid(dqm, qpd, q);
352 	if (q->properties.is_active)
353 		dqm->queue_count--;
354 
355 	/*
356 	 * Unconditionally decrement this counter, regardless of the queue's
357 	 * type
358 	 */
359 	dqm->total_queue_count--;
360 	pr_debug("Total of %d queues are accountable so far\n",
361 			dqm->total_queue_count);
362 
363 out:
364 	mutex_unlock(&dqm->lock);
365 	return retval;
366 }
367 
368 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
369 {
370 	int retval;
371 	struct mqd_manager *mqd;
372 	bool prev_active = false;
373 
374 	BUG_ON(!dqm || !q || !q->mqd);
375 
376 	mutex_lock(&dqm->lock);
377 	mqd = dqm->ops.get_mqd_manager(dqm,
378 			get_mqd_type_from_queue_type(q->properties.type));
379 	if (mqd == NULL) {
380 		mutex_unlock(&dqm->lock);
381 		return -ENOMEM;
382 	}
383 
384 	if (q->properties.is_active)
385 		prev_active = true;
386 
387 	/*
388 	 *
389 	 * check active state vs. the previous state
390 	 * and modify counter accordingly
391 	 */
392 	retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
393 	if ((q->properties.is_active) && (!prev_active))
394 		dqm->queue_count++;
395 	else if ((!q->properties.is_active) && (prev_active))
396 		dqm->queue_count--;
397 
398 	if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
399 		retval = execute_queues_cpsch(dqm, false);
400 
401 	mutex_unlock(&dqm->lock);
402 	return retval;
403 }
404 
405 static struct mqd_manager *get_mqd_manager_nocpsch(
406 		struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
407 {
408 	struct mqd_manager *mqd;
409 
410 	BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
411 
412 	pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
413 
414 	mqd = dqm->mqds[type];
415 	if (!mqd) {
416 		mqd = mqd_manager_init(type, dqm->dev);
417 		if (mqd == NULL)
418 			pr_err("kfd: mqd manager is NULL");
419 		dqm->mqds[type] = mqd;
420 	}
421 
422 	return mqd;
423 }
424 
425 static int register_process_nocpsch(struct device_queue_manager *dqm,
426 					struct qcm_process_device *qpd)
427 {
428 	struct device_process_node *n;
429 	int retval;
430 
431 	BUG_ON(!dqm || !qpd);
432 
433 	pr_debug("kfd: In func %s\n", __func__);
434 
435 	n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
436 	if (!n)
437 		return -ENOMEM;
438 
439 	n->qpd = qpd;
440 
441 	mutex_lock(&dqm->lock);
442 	list_add(&n->list, &dqm->queues);
443 
444 	retval = dqm->ops_asic_specific.register_process(dqm, qpd);
445 
446 	dqm->processes_count++;
447 
448 	mutex_unlock(&dqm->lock);
449 
450 	return retval;
451 }
452 
453 static int unregister_process_nocpsch(struct device_queue_manager *dqm,
454 					struct qcm_process_device *qpd)
455 {
456 	int retval;
457 	struct device_process_node *cur, *next;
458 
459 	BUG_ON(!dqm || !qpd);
460 
461 	pr_debug("In func %s\n", __func__);
462 
463 	pr_debug("qpd->queues_list is %s\n",
464 			list_empty(&qpd->queues_list) ? "empty" : "not empty");
465 
466 	retval = 0;
467 	mutex_lock(&dqm->lock);
468 
469 	list_for_each_entry_safe(cur, next, &dqm->queues, list) {
470 		if (qpd == cur->qpd) {
471 			list_del(&cur->list);
472 			kfree(cur);
473 			dqm->processes_count--;
474 			goto out;
475 		}
476 	}
477 	/* qpd not found in dqm list */
478 	retval = 1;
479 out:
480 	mutex_unlock(&dqm->lock);
481 	return retval;
482 }
483 
484 static int
485 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
486 			unsigned int vmid)
487 {
488 	uint32_t pasid_mapping;
489 
490 	pasid_mapping = (pasid == 0) ? 0 :
491 		(uint32_t)pasid |
492 		ATC_VMID_PASID_MAPPING_VALID;
493 
494 	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
495 						dqm->dev->kgd, pasid_mapping,
496 						vmid);
497 }
498 
499 static void init_interrupts(struct device_queue_manager *dqm)
500 {
501 	unsigned int i;
502 
503 	BUG_ON(dqm == NULL);
504 
505 	for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
506 		if (is_pipe_enabled(dqm, 0, i))
507 			dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
508 }
509 
510 static int init_scheduler(struct device_queue_manager *dqm)
511 {
512 	int retval = 0;
513 
514 	BUG_ON(!dqm);
515 
516 	pr_debug("kfd: In %s\n", __func__);
517 
518 	return retval;
519 }
520 
521 static int initialize_nocpsch(struct device_queue_manager *dqm)
522 {
523 	int i;
524 
525 	BUG_ON(!dqm);
526 
527 	pr_debug("kfd: In func %s num of pipes: %d\n",
528 			__func__, get_pipes_per_mec(dqm));
529 
530 	mutex_init(&dqm->lock);
531 	INIT_LIST_HEAD(&dqm->queues);
532 	dqm->queue_count = dqm->next_pipe_to_allocate = 0;
533 	dqm->sdma_queue_count = 0;
534 	dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
535 					sizeof(unsigned int), GFP_KERNEL);
536 	if (!dqm->allocated_queues) {
537 		mutex_destroy(&dqm->lock);
538 		return -ENOMEM;
539 	}
540 
541 	for (i = 0; i < get_pipes_per_mec(dqm); i++)
542 		dqm->allocated_queues[i] = (1 << get_queues_per_pipe(dqm)) - 1;
543 
544 	dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
545 	dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
546 
547 	init_scheduler(dqm);
548 	return 0;
549 }
550 
551 static void uninitialize_nocpsch(struct device_queue_manager *dqm)
552 {
553 	int i;
554 
555 	BUG_ON(!dqm);
556 
557 	BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
558 
559 	kfree(dqm->allocated_queues);
560 	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
561 		kfree(dqm->mqds[i]);
562 	mutex_destroy(&dqm->lock);
563 	kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
564 }
565 
566 static int start_nocpsch(struct device_queue_manager *dqm)
567 {
568 	init_interrupts(dqm);
569 	return 0;
570 }
571 
572 static int stop_nocpsch(struct device_queue_manager *dqm)
573 {
574 	return 0;
575 }
576 
577 static int allocate_sdma_queue(struct device_queue_manager *dqm,
578 				unsigned int *sdma_queue_id)
579 {
580 	int bit;
581 
582 	if (dqm->sdma_bitmap == 0)
583 		return -ENOMEM;
584 
585 	bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
586 				CIK_SDMA_QUEUES);
587 
588 	clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
589 	*sdma_queue_id = bit;
590 
591 	return 0;
592 }
593 
594 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
595 				unsigned int sdma_queue_id)
596 {
597 	if (sdma_queue_id >= CIK_SDMA_QUEUES)
598 		return;
599 	set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
600 }
601 
602 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
603 					struct queue *q,
604 					struct qcm_process_device *qpd)
605 {
606 	struct mqd_manager *mqd;
607 	int retval;
608 
609 	mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
610 	if (!mqd)
611 		return -ENOMEM;
612 
613 	retval = allocate_sdma_queue(dqm, &q->sdma_id);
614 	if (retval != 0)
615 		return retval;
616 
617 	q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
618 	q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
619 
620 	pr_debug("kfd: sdma id is:    %d\n", q->sdma_id);
621 	pr_debug("     sdma queue id: %d\n", q->properties.sdma_queue_id);
622 	pr_debug("     sdma engine id: %d\n", q->properties.sdma_engine_id);
623 
624 	dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
625 	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
626 				&q->gart_mqd_addr, &q->properties);
627 	if (retval != 0) {
628 		deallocate_sdma_queue(dqm, q->sdma_id);
629 		return retval;
630 	}
631 
632 	retval = mqd->load_mqd(mqd, q->mqd, 0,
633 				0, NULL);
634 	if (retval != 0) {
635 		deallocate_sdma_queue(dqm, q->sdma_id);
636 		mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
637 		return retval;
638 	}
639 
640 	return 0;
641 }
642 
643 /*
644  * Device Queue Manager implementation for cp scheduler
645  */
646 
647 static int set_sched_resources(struct device_queue_manager *dqm)
648 {
649 	int i, mec;
650 	struct scheduling_resources res;
651 
652 	BUG_ON(!dqm);
653 
654 	pr_debug("kfd: In func %s\n", __func__);
655 
656 	res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
657 	res.vmid_mask <<= KFD_VMID_START_OFFSET;
658 
659 	res.queue_mask = 0;
660 	for (i = 0; i < KGD_MAX_QUEUES; ++i) {
661 		mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
662 			/ dqm->dev->shared_resources.num_pipe_per_mec;
663 
664 		if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
665 			continue;
666 
667 		/* only acquire queues from the first MEC */
668 		if (mec > 0)
669 			continue;
670 
671 		/* This situation may be hit in the future if a new HW
672 		 * generation exposes more than 64 queues. If so, the
673 		 * definition of res.queue_mask needs updating */
674 		if (WARN_ON(i > (sizeof(res.queue_mask)*8))) {
675 			pr_err("Invalid queue enabled by amdgpu: %d\n", i);
676 			break;
677 		}
678 
679 		res.queue_mask |= (1ull << i);
680 	}
681 	res.gws_mask = res.oac_mask = res.gds_heap_base =
682 						res.gds_heap_size = 0;
683 
684 	pr_debug("kfd: scheduling resources:\n"
685 			"      vmid mask: 0x%8X\n"
686 			"      queue mask: 0x%8llX\n",
687 			res.vmid_mask, res.queue_mask);
688 
689 	return pm_send_set_resources(&dqm->packets, &res);
690 }
691 
692 static int initialize_cpsch(struct device_queue_manager *dqm)
693 {
694 	int retval;
695 
696 	BUG_ON(!dqm);
697 
698 	pr_debug("kfd: In func %s num of pipes: %d\n",
699 			__func__, get_pipes_per_mec(dqm));
700 
701 	mutex_init(&dqm->lock);
702 	INIT_LIST_HEAD(&dqm->queues);
703 	dqm->queue_count = dqm->processes_count = 0;
704 	dqm->sdma_queue_count = 0;
705 	dqm->active_runlist = false;
706 	retval = dqm->ops_asic_specific.initialize(dqm);
707 	if (retval != 0)
708 		goto fail_init_pipelines;
709 
710 	return 0;
711 
712 fail_init_pipelines:
713 	mutex_destroy(&dqm->lock);
714 	return retval;
715 }
716 
717 static int start_cpsch(struct device_queue_manager *dqm)
718 {
719 	struct device_process_node *node;
720 	int retval;
721 
722 	BUG_ON(!dqm);
723 
724 	retval = 0;
725 
726 	retval = pm_init(&dqm->packets, dqm);
727 	if (retval != 0)
728 		goto fail_packet_manager_init;
729 
730 	retval = set_sched_resources(dqm);
731 	if (retval != 0)
732 		goto fail_set_sched_resources;
733 
734 	pr_debug("kfd: allocating fence memory\n");
735 
736 	/* allocate fence memory on the gart */
737 	retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
738 					&dqm->fence_mem);
739 
740 	if (retval != 0)
741 		goto fail_allocate_vidmem;
742 
743 	dqm->fence_addr = dqm->fence_mem->cpu_ptr;
744 	dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
745 
746 	init_interrupts(dqm);
747 
748 	list_for_each_entry(node, &dqm->queues, list)
749 		if (node->qpd->pqm->process && dqm->dev)
750 			kfd_bind_process_to_device(dqm->dev,
751 						node->qpd->pqm->process);
752 
753 	execute_queues_cpsch(dqm, true);
754 
755 	return 0;
756 fail_allocate_vidmem:
757 fail_set_sched_resources:
758 	pm_uninit(&dqm->packets);
759 fail_packet_manager_init:
760 	return retval;
761 }
762 
763 static int stop_cpsch(struct device_queue_manager *dqm)
764 {
765 	struct device_process_node *node;
766 	struct kfd_process_device *pdd;
767 
768 	BUG_ON(!dqm);
769 
770 	destroy_queues_cpsch(dqm, true, true);
771 
772 	list_for_each_entry(node, &dqm->queues, list) {
773 		pdd = qpd_to_pdd(node->qpd);
774 		pdd->bound = false;
775 	}
776 	kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
777 	pm_uninit(&dqm->packets);
778 
779 	return 0;
780 }
781 
782 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
783 					struct kernel_queue *kq,
784 					struct qcm_process_device *qpd)
785 {
786 	BUG_ON(!dqm || !kq || !qpd);
787 
788 	pr_debug("kfd: In func %s\n", __func__);
789 
790 	mutex_lock(&dqm->lock);
791 	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
792 		pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
793 				dqm->total_queue_count);
794 		mutex_unlock(&dqm->lock);
795 		return -EPERM;
796 	}
797 
798 	/*
799 	 * Unconditionally increment this counter, regardless of the queue's
800 	 * type or whether the queue is active.
801 	 */
802 	dqm->total_queue_count++;
803 	pr_debug("Total of %d queues are accountable so far\n",
804 			dqm->total_queue_count);
805 
806 	list_add(&kq->list, &qpd->priv_queue_list);
807 	dqm->queue_count++;
808 	qpd->is_debug = true;
809 	execute_queues_cpsch(dqm, false);
810 	mutex_unlock(&dqm->lock);
811 
812 	return 0;
813 }
814 
815 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
816 					struct kernel_queue *kq,
817 					struct qcm_process_device *qpd)
818 {
819 	BUG_ON(!dqm || !kq);
820 
821 	pr_debug("kfd: In %s\n", __func__);
822 
823 	mutex_lock(&dqm->lock);
824 	/* here we actually preempt the DIQ */
825 	destroy_queues_cpsch(dqm, true, false);
826 	list_del(&kq->list);
827 	dqm->queue_count--;
828 	qpd->is_debug = false;
829 	execute_queues_cpsch(dqm, false);
830 	/*
831 	 * Unconditionally decrement this counter, regardless of the queue's
832 	 * type.
833 	 */
834 	dqm->total_queue_count--;
835 	pr_debug("Total of %d queues are accountable so far\n",
836 			dqm->total_queue_count);
837 	mutex_unlock(&dqm->lock);
838 }
839 
840 static void select_sdma_engine_id(struct queue *q)
841 {
842 	static int sdma_id;
843 
844 	q->sdma_id = sdma_id;
845 	sdma_id = (sdma_id + 1) % 2;
846 }
847 
848 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
849 			struct qcm_process_device *qpd, int *allocate_vmid)
850 {
851 	int retval;
852 	struct mqd_manager *mqd;
853 
854 	BUG_ON(!dqm || !q || !qpd);
855 
856 	retval = 0;
857 
858 	if (allocate_vmid)
859 		*allocate_vmid = 0;
860 
861 	mutex_lock(&dqm->lock);
862 
863 	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
864 		pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
865 				dqm->total_queue_count);
866 		retval = -EPERM;
867 		goto out;
868 	}
869 
870 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
871 		select_sdma_engine_id(q);
872 
873 	mqd = dqm->ops.get_mqd_manager(dqm,
874 			get_mqd_type_from_queue_type(q->properties.type));
875 
876 	if (mqd == NULL) {
877 		mutex_unlock(&dqm->lock);
878 		return -ENOMEM;
879 	}
880 
881 	dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
882 	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
883 				&q->gart_mqd_addr, &q->properties);
884 	if (retval != 0)
885 		goto out;
886 
887 	list_add(&q->list, &qpd->queues_list);
888 	if (q->properties.is_active) {
889 		dqm->queue_count++;
890 		retval = execute_queues_cpsch(dqm, false);
891 	}
892 
893 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
894 			dqm->sdma_queue_count++;
895 	/*
896 	 * Unconditionally increment this counter, regardless of the queue's
897 	 * type or whether the queue is active.
898 	 */
899 	dqm->total_queue_count++;
900 
901 	pr_debug("Total of %d queues are accountable so far\n",
902 			dqm->total_queue_count);
903 
904 out:
905 	mutex_unlock(&dqm->lock);
906 	return retval;
907 }
908 
909 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
910 				unsigned int fence_value,
911 				unsigned long timeout)
912 {
913 	BUG_ON(!fence_addr);
914 	timeout += jiffies;
915 
916 	while (*fence_addr != fence_value) {
917 		if (time_after(jiffies, timeout)) {
918 			pr_err("kfd: qcm fence wait loop timeout expired\n");
919 			return -ETIME;
920 		}
921 		schedule();
922 	}
923 
924 	return 0;
925 }
926 
927 static int destroy_sdma_queues(struct device_queue_manager *dqm,
928 				unsigned int sdma_engine)
929 {
930 	return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
931 			KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES, 0, false,
932 			sdma_engine);
933 }
934 
935 static int destroy_queues_cpsch(struct device_queue_manager *dqm,
936 				bool preempt_static_queues, bool lock)
937 {
938 	int retval;
939 	enum kfd_preempt_type_filter preempt_type;
940 	struct kfd_process_device *pdd;
941 
942 	BUG_ON(!dqm);
943 
944 	retval = 0;
945 
946 	if (lock)
947 		mutex_lock(&dqm->lock);
948 	if (!dqm->active_runlist)
949 		goto out;
950 
951 	pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
952 		dqm->sdma_queue_count);
953 
954 	if (dqm->sdma_queue_count > 0) {
955 		destroy_sdma_queues(dqm, 0);
956 		destroy_sdma_queues(dqm, 1);
957 	}
958 
959 	preempt_type = preempt_static_queues ?
960 			KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
961 			KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES;
962 
963 	retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
964 			preempt_type, 0, false, 0);
965 	if (retval != 0)
966 		goto out;
967 
968 	*dqm->fence_addr = KFD_FENCE_INIT;
969 	pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
970 				KFD_FENCE_COMPLETED);
971 	/* should be timed out */
972 	retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
973 				QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
974 	if (retval != 0) {
975 		pdd = kfd_get_process_device_data(dqm->dev,
976 				kfd_get_process(current));
977 		pdd->reset_wavefronts = true;
978 		goto out;
979 	}
980 	pm_release_ib(&dqm->packets);
981 	dqm->active_runlist = false;
982 
983 out:
984 	if (lock)
985 		mutex_unlock(&dqm->lock);
986 	return retval;
987 }
988 
989 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
990 {
991 	int retval;
992 
993 	BUG_ON(!dqm);
994 
995 	if (lock)
996 		mutex_lock(&dqm->lock);
997 
998 	retval = destroy_queues_cpsch(dqm, false, false);
999 	if (retval != 0) {
1000 		pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
1001 		goto out;
1002 	}
1003 
1004 	if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
1005 		retval = 0;
1006 		goto out;
1007 	}
1008 
1009 	if (dqm->active_runlist) {
1010 		retval = 0;
1011 		goto out;
1012 	}
1013 
1014 	retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1015 	if (retval != 0) {
1016 		pr_err("kfd: failed to execute runlist");
1017 		goto out;
1018 	}
1019 	dqm->active_runlist = true;
1020 
1021 out:
1022 	if (lock)
1023 		mutex_unlock(&dqm->lock);
1024 	return retval;
1025 }
1026 
1027 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1028 				struct qcm_process_device *qpd,
1029 				struct queue *q)
1030 {
1031 	int retval;
1032 	struct mqd_manager *mqd;
1033 	bool preempt_all_queues;
1034 
1035 	BUG_ON(!dqm || !qpd || !q);
1036 
1037 	preempt_all_queues = false;
1038 
1039 	retval = 0;
1040 
1041 	/* remove queue from list to prevent rescheduling after preemption */
1042 	mutex_lock(&dqm->lock);
1043 
1044 	if (qpd->is_debug) {
1045 		/*
1046 		 * error, currently we do not allow to destroy a queue
1047 		 * of a currently debugged process
1048 		 */
1049 		retval = -EBUSY;
1050 		goto failed_try_destroy_debugged_queue;
1051 
1052 	}
1053 
1054 	mqd = dqm->ops.get_mqd_manager(dqm,
1055 			get_mqd_type_from_queue_type(q->properties.type));
1056 	if (!mqd) {
1057 		retval = -ENOMEM;
1058 		goto failed;
1059 	}
1060 
1061 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1062 		dqm->sdma_queue_count--;
1063 
1064 	list_del(&q->list);
1065 	if (q->properties.is_active)
1066 		dqm->queue_count--;
1067 
1068 	execute_queues_cpsch(dqm, false);
1069 
1070 	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1071 
1072 	/*
1073 	 * Unconditionally decrement this counter, regardless of the queue's
1074 	 * type
1075 	 */
1076 	dqm->total_queue_count--;
1077 	pr_debug("Total of %d queues are accountable so far\n",
1078 			dqm->total_queue_count);
1079 
1080 	mutex_unlock(&dqm->lock);
1081 
1082 	return 0;
1083 
1084 failed:
1085 failed_try_destroy_debugged_queue:
1086 
1087 	mutex_unlock(&dqm->lock);
1088 	return retval;
1089 }
1090 
1091 /*
1092  * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1093  * stay in user mode.
1094  */
1095 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1096 /* APE1 limit is inclusive and 64K aligned. */
1097 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1098 
1099 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1100 				   struct qcm_process_device *qpd,
1101 				   enum cache_policy default_policy,
1102 				   enum cache_policy alternate_policy,
1103 				   void __user *alternate_aperture_base,
1104 				   uint64_t alternate_aperture_size)
1105 {
1106 	bool retval;
1107 
1108 	pr_debug("kfd: In func %s\n", __func__);
1109 
1110 	mutex_lock(&dqm->lock);
1111 
1112 	if (alternate_aperture_size == 0) {
1113 		/* base > limit disables APE1 */
1114 		qpd->sh_mem_ape1_base = 1;
1115 		qpd->sh_mem_ape1_limit = 0;
1116 	} else {
1117 		/*
1118 		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1119 		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
1120 		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1121 		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1122 		 * Verify that the base and size parameters can be
1123 		 * represented in this format and convert them.
1124 		 * Additionally restrict APE1 to user-mode addresses.
1125 		 */
1126 
1127 		uint64_t base = (uintptr_t)alternate_aperture_base;
1128 		uint64_t limit = base + alternate_aperture_size - 1;
1129 
1130 		if (limit <= base)
1131 			goto out;
1132 
1133 		if ((base & APE1_FIXED_BITS_MASK) != 0)
1134 			goto out;
1135 
1136 		if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
1137 			goto out;
1138 
1139 		qpd->sh_mem_ape1_base = base >> 16;
1140 		qpd->sh_mem_ape1_limit = limit >> 16;
1141 	}
1142 
1143 	retval = dqm->ops_asic_specific.set_cache_memory_policy(
1144 			dqm,
1145 			qpd,
1146 			default_policy,
1147 			alternate_policy,
1148 			alternate_aperture_base,
1149 			alternate_aperture_size);
1150 
1151 	if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1152 		program_sh_mem_settings(dqm, qpd);
1153 
1154 	pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1155 		qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1156 		qpd->sh_mem_ape1_limit);
1157 
1158 	mutex_unlock(&dqm->lock);
1159 	return retval;
1160 
1161 out:
1162 	mutex_unlock(&dqm->lock);
1163 	return false;
1164 }
1165 
1166 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1167 {
1168 	struct device_queue_manager *dqm;
1169 
1170 	BUG_ON(!dev);
1171 
1172 	pr_debug("kfd: loading device queue manager\n");
1173 
1174 	dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
1175 	if (!dqm)
1176 		return NULL;
1177 
1178 	dqm->dev = dev;
1179 	switch (sched_policy) {
1180 	case KFD_SCHED_POLICY_HWS:
1181 	case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1182 		/* initialize dqm for cp scheduling */
1183 		dqm->ops.create_queue = create_queue_cpsch;
1184 		dqm->ops.initialize = initialize_cpsch;
1185 		dqm->ops.start = start_cpsch;
1186 		dqm->ops.stop = stop_cpsch;
1187 		dqm->ops.destroy_queue = destroy_queue_cpsch;
1188 		dqm->ops.update_queue = update_queue;
1189 		dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1190 		dqm->ops.register_process = register_process_nocpsch;
1191 		dqm->ops.unregister_process = unregister_process_nocpsch;
1192 		dqm->ops.uninitialize = uninitialize_nocpsch;
1193 		dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1194 		dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1195 		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1196 		break;
1197 	case KFD_SCHED_POLICY_NO_HWS:
1198 		/* initialize dqm for no cp scheduling */
1199 		dqm->ops.start = start_nocpsch;
1200 		dqm->ops.stop = stop_nocpsch;
1201 		dqm->ops.create_queue = create_queue_nocpsch;
1202 		dqm->ops.destroy_queue = destroy_queue_nocpsch;
1203 		dqm->ops.update_queue = update_queue;
1204 		dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1205 		dqm->ops.register_process = register_process_nocpsch;
1206 		dqm->ops.unregister_process = unregister_process_nocpsch;
1207 		dqm->ops.initialize = initialize_nocpsch;
1208 		dqm->ops.uninitialize = uninitialize_nocpsch;
1209 		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1210 		break;
1211 	default:
1212 		BUG();
1213 		break;
1214 	}
1215 
1216 	switch (dev->device_info->asic_family) {
1217 	case CHIP_CARRIZO:
1218 		device_queue_manager_init_vi(&dqm->ops_asic_specific);
1219 		break;
1220 
1221 	case CHIP_KAVERI:
1222 		device_queue_manager_init_cik(&dqm->ops_asic_specific);
1223 		break;
1224 	}
1225 
1226 	if (dqm->ops.initialize(dqm) != 0) {
1227 		kfree(dqm);
1228 		return NULL;
1229 	}
1230 
1231 	return dqm;
1232 }
1233 
1234 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1235 {
1236 	BUG_ON(!dqm);
1237 
1238 	dqm->ops.uninitialize(dqm);
1239 	kfree(dqm);
1240 }
1241