xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_events.c (revision 28efb0046512e8a13ed9f9bdf0d68d10bbfbe9cf)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/mm_types.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/sched/signal.h>
27 #include <linux/uaccess.h>
28 #include <linux/mm.h>
29 #include <linux/mman.h>
30 #include <linux/memory.h>
31 #include "kfd_priv.h"
32 #include "kfd_events.h"
33 #include <linux/device.h>
34 
35 /*
36  * A task can only be on a single wait_queue at a time, but we need to support
37  * waiting on multiple events (any/all).
38  * Instead of each event simply having a wait_queue with sleeping tasks, it
39  * has a singly-linked list of tasks.
40  * A thread that wants to sleep creates an array of these, one for each event
41  * and adds one to each event's waiter chain.
42  */
43 struct kfd_event_waiter {
44 	struct list_head waiters;
45 	struct task_struct *sleeping_task;
46 
47 	/* Transitions to true when the event this belongs to is signaled. */
48 	bool activated;
49 
50 	/* Event */
51 	struct kfd_event *event;
52 	uint32_t input_index;
53 };
54 
55 /*
56  * Over-complicated pooled allocator for event notification slots.
57  *
58  * Each signal event needs a 64-bit signal slot where the signaler will write
59  * a 1 before sending an interrupt.l (This is needed because some interrupts
60  * do not contain enough spare data bits to identify an event.)
61  * We get whole pages from vmalloc and map them to the process VA.
62  * Individual signal events are then allocated a slot in a page.
63  */
64 
65 struct signal_page {
66 	struct list_head event_pages;	/* kfd_process.signal_event_pages */
67 	uint64_t *kernel_address;
68 	uint64_t __user *user_address;
69 	uint32_t page_index;		/* Index into the mmap aperture. */
70 	unsigned int free_slots;
71 	unsigned long used_slot_bitmap[0];
72 };
73 
74 #define SLOTS_PER_PAGE KFD_SIGNAL_EVENT_LIMIT
75 #define SLOT_BITMAP_SIZE BITS_TO_LONGS(SLOTS_PER_PAGE)
76 #define BITS_PER_PAGE (ilog2(SLOTS_PER_PAGE)+1)
77 #define SIGNAL_PAGE_SIZE (sizeof(struct signal_page) + \
78 				SLOT_BITMAP_SIZE * sizeof(long))
79 
80 /*
81  * For signal events, the event ID is used as the interrupt user data.
82  * For SQ s_sendmsg interrupts, this is limited to 8 bits.
83  */
84 
85 #define INTERRUPT_DATA_BITS 8
86 #define SIGNAL_EVENT_ID_SLOT_SHIFT 0
87 
88 static uint64_t *page_slots(struct signal_page *page)
89 {
90 	return page->kernel_address;
91 }
92 
93 static bool allocate_free_slot(struct kfd_process *process,
94 				struct signal_page **out_page,
95 				unsigned int *out_slot_index)
96 {
97 	struct signal_page *page;
98 
99 	list_for_each_entry(page, &process->signal_event_pages, event_pages) {
100 		if (page->free_slots > 0) {
101 			unsigned int slot =
102 				find_first_zero_bit(page->used_slot_bitmap,
103 							SLOTS_PER_PAGE);
104 
105 			__set_bit(slot, page->used_slot_bitmap);
106 			page->free_slots--;
107 
108 			page_slots(page)[slot] = UNSIGNALED_EVENT_SLOT;
109 
110 			*out_page = page;
111 			*out_slot_index = slot;
112 
113 			pr_debug("Allocated event signal slot in page %p, slot %d\n",
114 					page, slot);
115 
116 			return true;
117 		}
118 	}
119 
120 	pr_debug("No free event signal slots were found for process %p\n",
121 			process);
122 
123 	return false;
124 }
125 
126 #define list_tail_entry(head, type, member) \
127 	list_entry((head)->prev, type, member)
128 
129 static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
130 {
131 	void *backing_store;
132 	struct signal_page *page;
133 
134 	page = kzalloc(SIGNAL_PAGE_SIZE, GFP_KERNEL);
135 	if (!page)
136 		goto fail_alloc_signal_page;
137 
138 	page->free_slots = SLOTS_PER_PAGE;
139 
140 	backing_store = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
141 					get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
142 	if (!backing_store)
143 		goto fail_alloc_signal_store;
144 
145 	/* prevent user-mode info leaks */
146 	memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
147 		KFD_SIGNAL_EVENT_LIMIT * 8);
148 
149 	page->kernel_address = backing_store;
150 
151 	if (list_empty(&p->signal_event_pages))
152 		page->page_index = 0;
153 	else
154 		page->page_index = list_tail_entry(&p->signal_event_pages,
155 						   struct signal_page,
156 						   event_pages)->page_index + 1;
157 
158 	pr_debug("Allocated new event signal page at %p, for process %p\n",
159 			page, p);
160 	pr_debug("Page index is %d\n", page->page_index);
161 
162 	list_add(&page->event_pages, &p->signal_event_pages);
163 
164 	return true;
165 
166 fail_alloc_signal_store:
167 	kfree(page);
168 fail_alloc_signal_page:
169 	return false;
170 }
171 
172 static bool allocate_event_notification_slot(struct file *devkfd,
173 					struct kfd_process *p,
174 					struct signal_page **page,
175 					unsigned int *signal_slot_index)
176 {
177 	bool ret;
178 
179 	ret = allocate_free_slot(p, page, signal_slot_index);
180 	if (!ret) {
181 		ret = allocate_signal_page(devkfd, p);
182 		if (ret)
183 			ret = allocate_free_slot(p, page, signal_slot_index);
184 	}
185 
186 	return ret;
187 }
188 
189 /* Assumes that the process's event_mutex is locked. */
190 static void release_event_notification_slot(struct signal_page *page,
191 						size_t slot_index)
192 {
193 	__clear_bit(slot_index, page->used_slot_bitmap);
194 	page->free_slots++;
195 
196 	/* We don't free signal pages, they are retained by the process
197 	 * and reused until it exits.
198 	 */
199 }
200 
201 static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
202 						unsigned int page_index)
203 {
204 	struct signal_page *page;
205 
206 	/*
207 	 * This is safe because we don't delete signal pages until the
208 	 * process exits.
209 	 */
210 	list_for_each_entry(page, &p->signal_event_pages, event_pages)
211 		if (page->page_index == page_index)
212 			return page;
213 
214 	return NULL;
215 }
216 
217 /*
218  * Assumes that p->event_mutex is held and of course that p is not going
219  * away (current or locked).
220  */
221 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
222 {
223 	struct kfd_event *ev;
224 
225 	hash_for_each_possible(p->events, ev, events, id)
226 		if (ev->event_id == id)
227 			return ev;
228 
229 	return NULL;
230 }
231 
232 static u32 make_signal_event_id(struct signal_page *page,
233 					 unsigned int signal_slot_index)
234 {
235 	return page->page_index |
236 			(signal_slot_index << SIGNAL_EVENT_ID_SLOT_SHIFT);
237 }
238 
239 /*
240  * Produce a kfd event id for a nonsignal event.
241  * These are arbitrary numbers, so we do a sequential search through
242  * the hash table for an unused number.
243  */
244 static u32 make_nonsignal_event_id(struct kfd_process *p)
245 {
246 	u32 id;
247 
248 	for (id = p->next_nonsignal_event_id;
249 		id < KFD_LAST_NONSIGNAL_EVENT_ID &&
250 		lookup_event_by_id(p, id);
251 		id++)
252 		;
253 
254 	if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
255 
256 		/*
257 		 * What if id == LAST_NONSIGNAL_EVENT_ID - 1?
258 		 * Then next_nonsignal_event_id = LAST_NONSIGNAL_EVENT_ID so
259 		 * the first loop fails immediately and we proceed with the
260 		 * wraparound loop below.
261 		 */
262 		p->next_nonsignal_event_id = id + 1;
263 
264 		return id;
265 	}
266 
267 	for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
268 		id < KFD_LAST_NONSIGNAL_EVENT_ID &&
269 		lookup_event_by_id(p, id);
270 		id++)
271 		;
272 
273 
274 	if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
275 		p->next_nonsignal_event_id = id + 1;
276 		return id;
277 	}
278 
279 	p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
280 	return 0;
281 }
282 
283 static struct kfd_event *lookup_event_by_page_slot(struct kfd_process *p,
284 						struct signal_page *page,
285 						unsigned int signal_slot)
286 {
287 	return lookup_event_by_id(p, make_signal_event_id(page, signal_slot));
288 }
289 
290 static int create_signal_event(struct file *devkfd,
291 				struct kfd_process *p,
292 				struct kfd_event *ev)
293 {
294 	if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
295 		pr_warn("Signal event wasn't created because limit was reached\n");
296 		return -ENOMEM;
297 	}
298 
299 	if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
300 						&ev->signal_slot_index)) {
301 		pr_warn("Signal event wasn't created because out of kernel memory\n");
302 		return -ENOMEM;
303 	}
304 
305 	p->signal_event_count++;
306 
307 	ev->user_signal_address =
308 			&ev->signal_page->user_address[ev->signal_slot_index];
309 
310 	ev->event_id = make_signal_event_id(ev->signal_page,
311 						ev->signal_slot_index);
312 
313 	pr_debug("Signal event number %zu created with id %d, address %p\n",
314 			p->signal_event_count, ev->event_id,
315 			ev->user_signal_address);
316 
317 	return 0;
318 }
319 
320 /*
321  * No non-signal events are supported yet.
322  * We create them as events that never signal.
323  * Set event calls from user-mode are failed.
324  */
325 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
326 {
327 	ev->event_id = make_nonsignal_event_id(p);
328 	if (ev->event_id == 0)
329 		return -ENOMEM;
330 
331 	return 0;
332 }
333 
334 void kfd_event_init_process(struct kfd_process *p)
335 {
336 	mutex_init(&p->event_mutex);
337 	hash_init(p->events);
338 	INIT_LIST_HEAD(&p->signal_event_pages);
339 	p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
340 	p->signal_event_count = 0;
341 }
342 
343 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
344 {
345 	if (ev->signal_page) {
346 		release_event_notification_slot(ev->signal_page,
347 						ev->signal_slot_index);
348 		p->signal_event_count--;
349 	}
350 
351 	/*
352 	 * Abandon the list of waiters. Individual waiting threads will
353 	 * clean up their own data.
354 	 */
355 	list_del(&ev->waiters);
356 
357 	hash_del(&ev->events);
358 	kfree(ev);
359 }
360 
361 static void destroy_events(struct kfd_process *p)
362 {
363 	struct kfd_event *ev;
364 	struct hlist_node *tmp;
365 	unsigned int hash_bkt;
366 
367 	hash_for_each_safe(p->events, hash_bkt, tmp, ev, events)
368 		destroy_event(p, ev);
369 }
370 
371 /*
372  * We assume that the process is being destroyed and there is no need to
373  * unmap the pages or keep bookkeeping data in order.
374  */
375 static void shutdown_signal_pages(struct kfd_process *p)
376 {
377 	struct signal_page *page, *tmp;
378 
379 	list_for_each_entry_safe(page, tmp, &p->signal_event_pages,
380 					event_pages) {
381 		free_pages((unsigned long)page->kernel_address,
382 				get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
383 		kfree(page);
384 	}
385 }
386 
387 void kfd_event_free_process(struct kfd_process *p)
388 {
389 	destroy_events(p);
390 	shutdown_signal_pages(p);
391 }
392 
393 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
394 {
395 	return ev->type == KFD_EVENT_TYPE_SIGNAL ||
396 					ev->type == KFD_EVENT_TYPE_DEBUG;
397 }
398 
399 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
400 {
401 	return ev->type == KFD_EVENT_TYPE_SIGNAL;
402 }
403 
404 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
405 		     uint32_t event_type, bool auto_reset, uint32_t node_id,
406 		     uint32_t *event_id, uint32_t *event_trigger_data,
407 		     uint64_t *event_page_offset, uint32_t *event_slot_index)
408 {
409 	int ret = 0;
410 	struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
411 
412 	if (!ev)
413 		return -ENOMEM;
414 
415 	ev->type = event_type;
416 	ev->auto_reset = auto_reset;
417 	ev->signaled = false;
418 
419 	INIT_LIST_HEAD(&ev->waiters);
420 
421 	*event_page_offset = 0;
422 
423 	mutex_lock(&p->event_mutex);
424 
425 	switch (event_type) {
426 	case KFD_EVENT_TYPE_SIGNAL:
427 	case KFD_EVENT_TYPE_DEBUG:
428 		ret = create_signal_event(devkfd, p, ev);
429 		if (!ret) {
430 			*event_page_offset = (ev->signal_page->page_index |
431 					KFD_MMAP_EVENTS_MASK);
432 			*event_page_offset <<= PAGE_SHIFT;
433 			*event_slot_index = ev->signal_slot_index;
434 		}
435 		break;
436 	default:
437 		ret = create_other_event(p, ev);
438 		break;
439 	}
440 
441 	if (!ret) {
442 		hash_add(p->events, &ev->events, ev->event_id);
443 
444 		*event_id = ev->event_id;
445 		*event_trigger_data = ev->event_id;
446 	} else {
447 		kfree(ev);
448 	}
449 
450 	mutex_unlock(&p->event_mutex);
451 
452 	return ret;
453 }
454 
455 /* Assumes that p is current. */
456 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
457 {
458 	struct kfd_event *ev;
459 	int ret = 0;
460 
461 	mutex_lock(&p->event_mutex);
462 
463 	ev = lookup_event_by_id(p, event_id);
464 
465 	if (ev)
466 		destroy_event(p, ev);
467 	else
468 		ret = -EINVAL;
469 
470 	mutex_unlock(&p->event_mutex);
471 	return ret;
472 }
473 
474 static void set_event(struct kfd_event *ev)
475 {
476 	struct kfd_event_waiter *waiter;
477 	struct kfd_event_waiter *next;
478 
479 	/* Auto reset if the list is non-empty and we're waking someone. */
480 	ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
481 
482 	list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
483 		waiter->activated = true;
484 
485 		/* _init because free_waiters will call list_del */
486 		list_del_init(&waiter->waiters);
487 
488 		wake_up_process(waiter->sleeping_task);
489 	}
490 }
491 
492 /* Assumes that p is current. */
493 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
494 {
495 	int ret = 0;
496 	struct kfd_event *ev;
497 
498 	mutex_lock(&p->event_mutex);
499 
500 	ev = lookup_event_by_id(p, event_id);
501 
502 	if (ev && event_can_be_cpu_signaled(ev))
503 		set_event(ev);
504 	else
505 		ret = -EINVAL;
506 
507 	mutex_unlock(&p->event_mutex);
508 	return ret;
509 }
510 
511 static void reset_event(struct kfd_event *ev)
512 {
513 	ev->signaled = false;
514 }
515 
516 /* Assumes that p is current. */
517 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
518 {
519 	int ret = 0;
520 	struct kfd_event *ev;
521 
522 	mutex_lock(&p->event_mutex);
523 
524 	ev = lookup_event_by_id(p, event_id);
525 
526 	if (ev && event_can_be_cpu_signaled(ev))
527 		reset_event(ev);
528 	else
529 		ret = -EINVAL;
530 
531 	mutex_unlock(&p->event_mutex);
532 	return ret;
533 
534 }
535 
536 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
537 {
538 	page_slots(ev->signal_page)[ev->signal_slot_index] =
539 						UNSIGNALED_EVENT_SLOT;
540 }
541 
542 static bool is_slot_signaled(struct signal_page *page, unsigned int index)
543 {
544 	return page_slots(page)[index] != UNSIGNALED_EVENT_SLOT;
545 }
546 
547 static void set_event_from_interrupt(struct kfd_process *p,
548 					struct kfd_event *ev)
549 {
550 	if (ev && event_can_be_gpu_signaled(ev)) {
551 		acknowledge_signal(p, ev);
552 		set_event(ev);
553 	}
554 }
555 
556 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
557 				uint32_t valid_id_bits)
558 {
559 	struct kfd_event *ev;
560 
561 	/*
562 	 * Because we are called from arbitrary context (workqueue) as opposed
563 	 * to process context, kfd_process could attempt to exit while we are
564 	 * running so the lookup function returns a locked process.
565 	 */
566 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
567 
568 	if (!p)
569 		return; /* Presumably process exited. */
570 
571 	mutex_lock(&p->event_mutex);
572 
573 	if (valid_id_bits >= INTERRUPT_DATA_BITS) {
574 		/* Partial ID is a full ID. */
575 		ev = lookup_event_by_id(p, partial_id);
576 		set_event_from_interrupt(p, ev);
577 	} else {
578 		/*
579 		 * Partial ID is in fact partial. For now we completely
580 		 * ignore it, but we could use any bits we did receive to
581 		 * search faster.
582 		 */
583 		struct signal_page *page;
584 		unsigned int i;
585 
586 		list_for_each_entry(page, &p->signal_event_pages, event_pages)
587 			for (i = 0; i < SLOTS_PER_PAGE; i++)
588 				if (is_slot_signaled(page, i)) {
589 					ev = lookup_event_by_page_slot(p,
590 								page, i);
591 					set_event_from_interrupt(p, ev);
592 				}
593 	}
594 
595 	mutex_unlock(&p->event_mutex);
596 	mutex_unlock(&p->mutex);
597 }
598 
599 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
600 {
601 	struct kfd_event_waiter *event_waiters;
602 	uint32_t i;
603 
604 	event_waiters = kmalloc_array(num_events,
605 					sizeof(struct kfd_event_waiter),
606 					GFP_KERNEL);
607 
608 	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
609 		INIT_LIST_HEAD(&event_waiters[i].waiters);
610 		event_waiters[i].sleeping_task = current;
611 		event_waiters[i].activated = false;
612 	}
613 
614 	return event_waiters;
615 }
616 
617 static int init_event_waiter(struct kfd_process *p,
618 		struct kfd_event_waiter *waiter,
619 		uint32_t event_id,
620 		uint32_t input_index)
621 {
622 	struct kfd_event *ev = lookup_event_by_id(p, event_id);
623 
624 	if (!ev)
625 		return -EINVAL;
626 
627 	waiter->event = ev;
628 	waiter->input_index = input_index;
629 	waiter->activated = ev->signaled;
630 	ev->signaled = ev->signaled && !ev->auto_reset;
631 
632 	list_add(&waiter->waiters, &ev->waiters);
633 
634 	return 0;
635 }
636 
637 static bool test_event_condition(bool all, uint32_t num_events,
638 				struct kfd_event_waiter *event_waiters)
639 {
640 	uint32_t i;
641 	uint32_t activated_count = 0;
642 
643 	for (i = 0; i < num_events; i++) {
644 		if (event_waiters[i].activated) {
645 			if (!all)
646 				return true;
647 
648 			activated_count++;
649 		}
650 	}
651 
652 	return activated_count == num_events;
653 }
654 
655 /*
656  * Copy event specific data, if defined.
657  * Currently only memory exception events have additional data to copy to user
658  */
659 static bool copy_signaled_event_data(uint32_t num_events,
660 		struct kfd_event_waiter *event_waiters,
661 		struct kfd_event_data __user *data)
662 {
663 	struct kfd_hsa_memory_exception_data *src;
664 	struct kfd_hsa_memory_exception_data __user *dst;
665 	struct kfd_event_waiter *waiter;
666 	struct kfd_event *event;
667 	uint32_t i;
668 
669 	for (i = 0; i < num_events; i++) {
670 		waiter = &event_waiters[i];
671 		event = waiter->event;
672 		if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
673 			dst = &data[waiter->input_index].memory_exception_data;
674 			src = &event->memory_exception_data;
675 			if (copy_to_user(dst, src,
676 				sizeof(struct kfd_hsa_memory_exception_data)))
677 				return false;
678 		}
679 	}
680 
681 	return true;
682 
683 }
684 
685 
686 
687 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
688 {
689 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
690 		return 0;
691 
692 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
693 		return MAX_SCHEDULE_TIMEOUT;
694 
695 	/*
696 	 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
697 	 * but we consider them finite.
698 	 * This hack is wrong, but nobody is likely to notice.
699 	 */
700 	user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
701 
702 	return msecs_to_jiffies(user_timeout_ms) + 1;
703 }
704 
705 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
706 {
707 	uint32_t i;
708 
709 	for (i = 0; i < num_events; i++)
710 		list_del(&waiters[i].waiters);
711 
712 	kfree(waiters);
713 }
714 
715 int kfd_wait_on_events(struct kfd_process *p,
716 		       uint32_t num_events, void __user *data,
717 		       bool all, uint32_t user_timeout_ms,
718 		       enum kfd_event_wait_result *wait_result)
719 {
720 	struct kfd_event_data __user *events =
721 			(struct kfd_event_data __user *) data;
722 	uint32_t i;
723 	int ret = 0;
724 	struct kfd_event_waiter *event_waiters = NULL;
725 	long timeout = user_timeout_to_jiffies(user_timeout_ms);
726 
727 	mutex_lock(&p->event_mutex);
728 
729 	event_waiters = alloc_event_waiters(num_events);
730 	if (!event_waiters) {
731 		ret = -ENOMEM;
732 		goto fail;
733 	}
734 
735 	for (i = 0; i < num_events; i++) {
736 		struct kfd_event_data event_data;
737 
738 		if (copy_from_user(&event_data, &events[i],
739 				sizeof(struct kfd_event_data))) {
740 			ret = -EFAULT;
741 			goto fail;
742 		}
743 
744 		ret = init_event_waiter(p, &event_waiters[i],
745 				event_data.event_id, i);
746 		if (ret)
747 			goto fail;
748 	}
749 
750 	mutex_unlock(&p->event_mutex);
751 
752 	while (true) {
753 		if (fatal_signal_pending(current)) {
754 			ret = -EINTR;
755 			break;
756 		}
757 
758 		if (signal_pending(current)) {
759 			/*
760 			 * This is wrong when a nonzero, non-infinite timeout
761 			 * is specified. We need to use
762 			 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
763 			 * contains a union with data for each user and it's
764 			 * in generic kernel code that I don't want to
765 			 * touch yet.
766 			 */
767 			ret = -ERESTARTSYS;
768 			break;
769 		}
770 
771 		if (test_event_condition(all, num_events, event_waiters)) {
772 			if (copy_signaled_event_data(num_events,
773 					event_waiters, events))
774 				*wait_result = KFD_WAIT_COMPLETE;
775 			else
776 				*wait_result = KFD_WAIT_ERROR;
777 			break;
778 		}
779 
780 		if (timeout <= 0) {
781 			*wait_result = KFD_WAIT_TIMEOUT;
782 			break;
783 		}
784 
785 		timeout = schedule_timeout_interruptible(timeout);
786 	}
787 	__set_current_state(TASK_RUNNING);
788 
789 	mutex_lock(&p->event_mutex);
790 	free_waiters(num_events, event_waiters);
791 	mutex_unlock(&p->event_mutex);
792 
793 	return ret;
794 
795 fail:
796 	if (event_waiters)
797 		free_waiters(num_events, event_waiters);
798 
799 	mutex_unlock(&p->event_mutex);
800 
801 	*wait_result = KFD_WAIT_ERROR;
802 
803 	return ret;
804 }
805 
806 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
807 {
808 
809 	unsigned int page_index;
810 	unsigned long pfn;
811 	struct signal_page *page;
812 
813 	/* check required size is logical */
814 	if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
815 			get_order(vma->vm_end - vma->vm_start)) {
816 		pr_err("Event page mmap requested illegal size\n");
817 		return -EINVAL;
818 	}
819 
820 	page_index = vma->vm_pgoff;
821 
822 	page = lookup_signal_page_by_index(p, page_index);
823 	if (!page) {
824 		/* Probably KFD bug, but mmap is user-accessible. */
825 		pr_debug("Signal page could not be found for page_index %u\n",
826 				page_index);
827 		return -EINVAL;
828 	}
829 
830 	pfn = __pa(page->kernel_address);
831 	pfn >>= PAGE_SHIFT;
832 
833 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
834 		       | VM_DONTDUMP | VM_PFNMAP;
835 
836 	pr_debug("Mapping signal page\n");
837 	pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
838 	pr_debug("     end user address    == 0x%08lx\n", vma->vm_end);
839 	pr_debug("     pfn                 == 0x%016lX\n", pfn);
840 	pr_debug("     vm_flags            == 0x%08lX\n", vma->vm_flags);
841 	pr_debug("     size                == 0x%08lX\n",
842 			vma->vm_end - vma->vm_start);
843 
844 	page->user_address = (uint64_t __user *)vma->vm_start;
845 
846 	/* mapping the page to user process */
847 	return remap_pfn_range(vma, vma->vm_start, pfn,
848 			vma->vm_end - vma->vm_start, vma->vm_page_prot);
849 }
850 
851 /*
852  * Assumes that p->event_mutex is held and of course
853  * that p is not going away (current or locked).
854  */
855 static void lookup_events_by_type_and_signal(struct kfd_process *p,
856 		int type, void *event_data)
857 {
858 	struct kfd_hsa_memory_exception_data *ev_data;
859 	struct kfd_event *ev;
860 	int bkt;
861 	bool send_signal = true;
862 
863 	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
864 
865 	hash_for_each(p->events, bkt, ev, events)
866 		if (ev->type == type) {
867 			send_signal = false;
868 			dev_dbg(kfd_device,
869 					"Event found: id %X type %d",
870 					ev->event_id, ev->type);
871 			set_event(ev);
872 			if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
873 				ev->memory_exception_data = *ev_data;
874 		}
875 
876 	/* Send SIGTERM no event of type "type" has been found*/
877 	if (send_signal) {
878 		if (send_sigterm) {
879 			dev_warn(kfd_device,
880 				"Sending SIGTERM to HSA Process with PID %d ",
881 					p->lead_thread->pid);
882 			send_sig(SIGTERM, p->lead_thread, 0);
883 		} else {
884 			dev_err(kfd_device,
885 				"HSA Process (PID %d) got unhandled exception",
886 				p->lead_thread->pid);
887 		}
888 	}
889 }
890 
891 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
892 		unsigned long address, bool is_write_requested,
893 		bool is_execute_requested)
894 {
895 	struct kfd_hsa_memory_exception_data memory_exception_data;
896 	struct vm_area_struct *vma;
897 
898 	/*
899 	 * Because we are called from arbitrary context (workqueue) as opposed
900 	 * to process context, kfd_process could attempt to exit while we are
901 	 * running so the lookup function returns a locked process.
902 	 */
903 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
904 
905 	if (!p)
906 		return; /* Presumably process exited. */
907 
908 	memset(&memory_exception_data, 0, sizeof(memory_exception_data));
909 
910 	down_read(&p->mm->mmap_sem);
911 	vma = find_vma(p->mm, address);
912 
913 	memory_exception_data.gpu_id = dev->id;
914 	memory_exception_data.va = address;
915 	/* Set failure reason */
916 	memory_exception_data.failure.NotPresent = 1;
917 	memory_exception_data.failure.NoExecute = 0;
918 	memory_exception_data.failure.ReadOnly = 0;
919 	if (vma) {
920 		if (vma->vm_start > address) {
921 			memory_exception_data.failure.NotPresent = 1;
922 			memory_exception_data.failure.NoExecute = 0;
923 			memory_exception_data.failure.ReadOnly = 0;
924 		} else {
925 			memory_exception_data.failure.NotPresent = 0;
926 			if (is_write_requested && !(vma->vm_flags & VM_WRITE))
927 				memory_exception_data.failure.ReadOnly = 1;
928 			else
929 				memory_exception_data.failure.ReadOnly = 0;
930 			if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
931 				memory_exception_data.failure.NoExecute = 1;
932 			else
933 				memory_exception_data.failure.NoExecute = 0;
934 		}
935 	}
936 
937 	up_read(&p->mm->mmap_sem);
938 
939 	mutex_lock(&p->event_mutex);
940 
941 	/* Lookup events by type and signal them */
942 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
943 			&memory_exception_data);
944 
945 	mutex_unlock(&p->event_mutex);
946 	mutex_unlock(&p->mutex);
947 }
948 
949 void kfd_signal_hw_exception_event(unsigned int pasid)
950 {
951 	/*
952 	 * Because we are called from arbitrary context (workqueue) as opposed
953 	 * to process context, kfd_process could attempt to exit while we are
954 	 * running so the lookup function returns a locked process.
955 	 */
956 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
957 
958 	if (!p)
959 		return; /* Presumably process exited. */
960 
961 	mutex_lock(&p->event_mutex);
962 
963 	/* Lookup events by type and signal them */
964 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
965 
966 	mutex_unlock(&p->event_mutex);
967 	mutex_unlock(&p->mutex);
968 }
969