1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/mm_types.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/mm.h>
28 #include <linux/uaccess.h>
29 #include <linux/mman.h>
30 #include <linux/memory.h>
31 #include "kfd_priv.h"
32 #include "kfd_events.h"
33 #include <linux/device.h>
34 
35 /*
36  * Wrapper around wait_queue_entry_t
37  */
38 struct kfd_event_waiter {
39 	wait_queue_entry_t wait;
40 	struct kfd_event *event; /* Event to wait for */
41 	bool activated;		 /* Becomes true when event is signaled */
42 };
43 
44 /*
45  * Each signal event needs a 64-bit signal slot where the signaler will write
46  * a 1 before sending an interrupt. (This is needed because some interrupts
47  * do not contain enough spare data bits to identify an event.)
48  * We get whole pages and map them to the process VA.
49  * Individual signal events use their event_id as slot index.
50  */
51 struct kfd_signal_page {
52 	uint64_t *kernel_address;
53 	uint64_t __user *user_address;
54 };
55 
56 
57 static uint64_t *page_slots(struct kfd_signal_page *page)
58 {
59 	return page->kernel_address;
60 }
61 
62 static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
63 {
64 	void *backing_store;
65 	struct kfd_signal_page *page;
66 
67 	page = kzalloc(sizeof(*page), GFP_KERNEL);
68 	if (!page)
69 		return NULL;
70 
71 	backing_store = (void *) __get_free_pages(GFP_KERNEL,
72 					get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
73 	if (!backing_store)
74 		goto fail_alloc_signal_store;
75 
76 	/* Initialize all events to unsignaled */
77 	memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
78 	       KFD_SIGNAL_EVENT_LIMIT * 8);
79 
80 	page->kernel_address = backing_store;
81 	pr_debug("Allocated new event signal page at %p, for process %p\n",
82 			page, p);
83 
84 	return page;
85 
86 fail_alloc_signal_store:
87 	kfree(page);
88 	return NULL;
89 }
90 
91 static int allocate_event_notification_slot(struct kfd_process *p,
92 					    struct kfd_event *ev)
93 {
94 	int id;
95 
96 	if (!p->signal_page) {
97 		p->signal_page = allocate_signal_page(p);
98 		if (!p->signal_page)
99 			return -ENOMEM;
100 		/* Oldest user mode expects 256 event slots */
101 		p->signal_mapped_size = 256*8;
102 	}
103 
104 	/*
105 	 * Compatibility with old user mode: Only use signal slots
106 	 * user mode has mapped, may be less than
107 	 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
108 	 * of the event limit without breaking user mode.
109 	 */
110 	id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
111 		       GFP_KERNEL);
112 	if (id < 0)
113 		return id;
114 
115 	ev->event_id = id;
116 	page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
117 
118 	return 0;
119 }
120 
121 /*
122  * Assumes that p->event_mutex is held and of course that p is not going
123  * away (current or locked).
124  */
125 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
126 {
127 	return idr_find(&p->event_idr, id);
128 }
129 
130 /**
131  * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
132  * @p:     Pointer to struct kfd_process
133  * @id:    ID to look up
134  * @bits:  Number of valid bits in @id
135  *
136  * Finds the first signaled event with a matching partial ID. If no
137  * matching signaled event is found, returns NULL. In that case the
138  * caller should assume that the partial ID is invalid and do an
139  * exhaustive search of all siglaned events.
140  *
141  * If multiple events with the same partial ID signal at the same
142  * time, they will be found one interrupt at a time, not necessarily
143  * in the same order the interrupts occurred. As long as the number of
144  * interrupts is correct, all signaled events will be seen by the
145  * driver.
146  */
147 static struct kfd_event *lookup_signaled_event_by_partial_id(
148 	struct kfd_process *p, uint32_t id, uint32_t bits)
149 {
150 	struct kfd_event *ev;
151 
152 	if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
153 		return NULL;
154 
155 	/* Fast path for the common case that @id is not a partial ID
156 	 * and we only need a single lookup.
157 	 */
158 	if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
159 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
160 			return NULL;
161 
162 		return idr_find(&p->event_idr, id);
163 	}
164 
165 	/* General case for partial IDs: Iterate over all matching IDs
166 	 * and find the first one that has signaled.
167 	 */
168 	for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
169 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
170 			continue;
171 
172 		ev = idr_find(&p->event_idr, id);
173 	}
174 
175 	return ev;
176 }
177 
178 static int create_signal_event(struct file *devkfd,
179 				struct kfd_process *p,
180 				struct kfd_event *ev)
181 {
182 	int ret;
183 
184 	if (p->signal_mapped_size &&
185 	    p->signal_event_count == p->signal_mapped_size / 8) {
186 		if (!p->signal_event_limit_reached) {
187 			pr_warn("Signal event wasn't created because limit was reached\n");
188 			p->signal_event_limit_reached = true;
189 		}
190 		return -ENOSPC;
191 	}
192 
193 	ret = allocate_event_notification_slot(p, ev);
194 	if (ret) {
195 		pr_warn("Signal event wasn't created because out of kernel memory\n");
196 		return ret;
197 	}
198 
199 	p->signal_event_count++;
200 
201 	ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
202 	pr_debug("Signal event number %zu created with id %d, address %p\n",
203 			p->signal_event_count, ev->event_id,
204 			ev->user_signal_address);
205 
206 	return 0;
207 }
208 
209 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
210 {
211 	/* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
212 	 * intentional integer overflow to -1 without a compiler
213 	 * warning. idr_alloc treats a negative value as "maximum
214 	 * signed integer".
215 	 */
216 	int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
217 			   (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
218 			   GFP_KERNEL);
219 
220 	if (id < 0)
221 		return id;
222 	ev->event_id = id;
223 
224 	return 0;
225 }
226 
227 void kfd_event_init_process(struct kfd_process *p)
228 {
229 	mutex_init(&p->event_mutex);
230 	idr_init(&p->event_idr);
231 	p->signal_page = NULL;
232 	p->signal_event_count = 0;
233 }
234 
235 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
236 {
237 	struct kfd_event_waiter *waiter;
238 
239 	/* Wake up pending waiters. They will return failure */
240 	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
241 		waiter->event = NULL;
242 	wake_up_all(&ev->wq);
243 
244 	if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
245 	    ev->type == KFD_EVENT_TYPE_DEBUG)
246 		p->signal_event_count--;
247 
248 	idr_remove(&p->event_idr, ev->event_id);
249 	kfree(ev);
250 }
251 
252 static void destroy_events(struct kfd_process *p)
253 {
254 	struct kfd_event *ev;
255 	uint32_t id;
256 
257 	idr_for_each_entry(&p->event_idr, ev, id)
258 		destroy_event(p, ev);
259 	idr_destroy(&p->event_idr);
260 }
261 
262 /*
263  * We assume that the process is being destroyed and there is no need to
264  * unmap the pages or keep bookkeeping data in order.
265  */
266 static void shutdown_signal_page(struct kfd_process *p)
267 {
268 	struct kfd_signal_page *page = p->signal_page;
269 
270 	if (page) {
271 		free_pages((unsigned long)page->kernel_address,
272 				get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
273 		kfree(page);
274 	}
275 }
276 
277 void kfd_event_free_process(struct kfd_process *p)
278 {
279 	destroy_events(p);
280 	shutdown_signal_page(p);
281 }
282 
283 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
284 {
285 	return ev->type == KFD_EVENT_TYPE_SIGNAL ||
286 					ev->type == KFD_EVENT_TYPE_DEBUG;
287 }
288 
289 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
290 {
291 	return ev->type == KFD_EVENT_TYPE_SIGNAL;
292 }
293 
294 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
295 		     uint32_t event_type, bool auto_reset, uint32_t node_id,
296 		     uint32_t *event_id, uint32_t *event_trigger_data,
297 		     uint64_t *event_page_offset, uint32_t *event_slot_index)
298 {
299 	int ret = 0;
300 	struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
301 
302 	if (!ev)
303 		return -ENOMEM;
304 
305 	ev->type = event_type;
306 	ev->auto_reset = auto_reset;
307 	ev->signaled = false;
308 
309 	init_waitqueue_head(&ev->wq);
310 
311 	*event_page_offset = 0;
312 
313 	mutex_lock(&p->event_mutex);
314 
315 	switch (event_type) {
316 	case KFD_EVENT_TYPE_SIGNAL:
317 	case KFD_EVENT_TYPE_DEBUG:
318 		ret = create_signal_event(devkfd, p, ev);
319 		if (!ret) {
320 			*event_page_offset = KFD_MMAP_EVENTS_MASK;
321 			*event_page_offset <<= PAGE_SHIFT;
322 			*event_slot_index = ev->event_id;
323 		}
324 		break;
325 	default:
326 		ret = create_other_event(p, ev);
327 		break;
328 	}
329 
330 	if (!ret) {
331 		*event_id = ev->event_id;
332 		*event_trigger_data = ev->event_id;
333 	} else {
334 		kfree(ev);
335 	}
336 
337 	mutex_unlock(&p->event_mutex);
338 
339 	return ret;
340 }
341 
342 /* Assumes that p is current. */
343 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
344 {
345 	struct kfd_event *ev;
346 	int ret = 0;
347 
348 	mutex_lock(&p->event_mutex);
349 
350 	ev = lookup_event_by_id(p, event_id);
351 
352 	if (ev)
353 		destroy_event(p, ev);
354 	else
355 		ret = -EINVAL;
356 
357 	mutex_unlock(&p->event_mutex);
358 	return ret;
359 }
360 
361 static void set_event(struct kfd_event *ev)
362 {
363 	struct kfd_event_waiter *waiter;
364 
365 	/* Auto reset if the list is non-empty and we're waking
366 	 * someone. waitqueue_active is safe here because we're
367 	 * protected by the p->event_mutex, which is also held when
368 	 * updating the wait queues in kfd_wait_on_events.
369 	 */
370 	ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
371 
372 	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
373 		waiter->activated = true;
374 
375 	wake_up_all(&ev->wq);
376 }
377 
378 /* Assumes that p is current. */
379 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
380 {
381 	int ret = 0;
382 	struct kfd_event *ev;
383 
384 	mutex_lock(&p->event_mutex);
385 
386 	ev = lookup_event_by_id(p, event_id);
387 
388 	if (ev && event_can_be_cpu_signaled(ev))
389 		set_event(ev);
390 	else
391 		ret = -EINVAL;
392 
393 	mutex_unlock(&p->event_mutex);
394 	return ret;
395 }
396 
397 static void reset_event(struct kfd_event *ev)
398 {
399 	ev->signaled = false;
400 }
401 
402 /* Assumes that p is current. */
403 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
404 {
405 	int ret = 0;
406 	struct kfd_event *ev;
407 
408 	mutex_lock(&p->event_mutex);
409 
410 	ev = lookup_event_by_id(p, event_id);
411 
412 	if (ev && event_can_be_cpu_signaled(ev))
413 		reset_event(ev);
414 	else
415 		ret = -EINVAL;
416 
417 	mutex_unlock(&p->event_mutex);
418 	return ret;
419 
420 }
421 
422 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
423 {
424 	page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
425 }
426 
427 static void set_event_from_interrupt(struct kfd_process *p,
428 					struct kfd_event *ev)
429 {
430 	if (ev && event_can_be_gpu_signaled(ev)) {
431 		acknowledge_signal(p, ev);
432 		set_event(ev);
433 	}
434 }
435 
436 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
437 				uint32_t valid_id_bits)
438 {
439 	struct kfd_event *ev = NULL;
440 
441 	/*
442 	 * Because we are called from arbitrary context (workqueue) as opposed
443 	 * to process context, kfd_process could attempt to exit while we are
444 	 * running so the lookup function increments the process ref count.
445 	 */
446 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
447 
448 	if (!p)
449 		return; /* Presumably process exited. */
450 
451 	mutex_lock(&p->event_mutex);
452 
453 	if (valid_id_bits)
454 		ev = lookup_signaled_event_by_partial_id(p, partial_id,
455 							 valid_id_bits);
456 	if (ev) {
457 		set_event_from_interrupt(p, ev);
458 	} else if (p->signal_page) {
459 		/*
460 		 * Partial ID lookup failed. Assume that the event ID
461 		 * in the interrupt payload was invalid and do an
462 		 * exhaustive search of signaled events.
463 		 */
464 		uint64_t *slots = page_slots(p->signal_page);
465 		uint32_t id;
466 
467 		if (valid_id_bits)
468 			pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
469 					     partial_id, valid_id_bits);
470 
471 		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
472 			/* With relatively few events, it's faster to
473 			 * iterate over the event IDR
474 			 */
475 			idr_for_each_entry(&p->event_idr, ev, id) {
476 				if (id >= KFD_SIGNAL_EVENT_LIMIT)
477 					break;
478 
479 				if (slots[id] != UNSIGNALED_EVENT_SLOT)
480 					set_event_from_interrupt(p, ev);
481 			}
482 		} else {
483 			/* With relatively many events, it's faster to
484 			 * iterate over the signal slots and lookup
485 			 * only signaled events from the IDR.
486 			 */
487 			for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
488 				if (slots[id] != UNSIGNALED_EVENT_SLOT) {
489 					ev = lookup_event_by_id(p, id);
490 					set_event_from_interrupt(p, ev);
491 				}
492 		}
493 	}
494 
495 	mutex_unlock(&p->event_mutex);
496 	kfd_unref_process(p);
497 }
498 
499 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
500 {
501 	struct kfd_event_waiter *event_waiters;
502 	uint32_t i;
503 
504 	event_waiters = kmalloc_array(num_events,
505 					sizeof(struct kfd_event_waiter),
506 					GFP_KERNEL);
507 
508 	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
509 		init_wait(&event_waiters[i].wait);
510 		event_waiters[i].activated = false;
511 	}
512 
513 	return event_waiters;
514 }
515 
516 static int init_event_waiter_get_status(struct kfd_process *p,
517 		struct kfd_event_waiter *waiter,
518 		uint32_t event_id)
519 {
520 	struct kfd_event *ev = lookup_event_by_id(p, event_id);
521 
522 	if (!ev)
523 		return -EINVAL;
524 
525 	waiter->event = ev;
526 	waiter->activated = ev->signaled;
527 	ev->signaled = ev->signaled && !ev->auto_reset;
528 
529 	return 0;
530 }
531 
532 static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
533 {
534 	struct kfd_event *ev = waiter->event;
535 
536 	/* Only add to the wait list if we actually need to
537 	 * wait on this event.
538 	 */
539 	if (!waiter->activated)
540 		add_wait_queue(&ev->wq, &waiter->wait);
541 }
542 
543 /* test_event_condition - Test condition of events being waited for
544  * @all:           Return completion only if all events have signaled
545  * @num_events:    Number of events to wait for
546  * @event_waiters: Array of event waiters, one per event
547  *
548  * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
549  * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
550  * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
551  * the events have been destroyed.
552  */
553 static uint32_t test_event_condition(bool all, uint32_t num_events,
554 				struct kfd_event_waiter *event_waiters)
555 {
556 	uint32_t i;
557 	uint32_t activated_count = 0;
558 
559 	for (i = 0; i < num_events; i++) {
560 		if (!event_waiters[i].event)
561 			return KFD_IOC_WAIT_RESULT_FAIL;
562 
563 		if (event_waiters[i].activated) {
564 			if (!all)
565 				return KFD_IOC_WAIT_RESULT_COMPLETE;
566 
567 			activated_count++;
568 		}
569 	}
570 
571 	return activated_count == num_events ?
572 		KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
573 }
574 
575 /*
576  * Copy event specific data, if defined.
577  * Currently only memory exception events have additional data to copy to user
578  */
579 static int copy_signaled_event_data(uint32_t num_events,
580 		struct kfd_event_waiter *event_waiters,
581 		struct kfd_event_data __user *data)
582 {
583 	struct kfd_hsa_memory_exception_data *src;
584 	struct kfd_hsa_memory_exception_data __user *dst;
585 	struct kfd_event_waiter *waiter;
586 	struct kfd_event *event;
587 	uint32_t i;
588 
589 	for (i = 0; i < num_events; i++) {
590 		waiter = &event_waiters[i];
591 		event = waiter->event;
592 		if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
593 			dst = &data[i].memory_exception_data;
594 			src = &event->memory_exception_data;
595 			if (copy_to_user(dst, src,
596 				sizeof(struct kfd_hsa_memory_exception_data)))
597 				return -EFAULT;
598 		}
599 	}
600 
601 	return 0;
602 
603 }
604 
605 
606 
607 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
608 {
609 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
610 		return 0;
611 
612 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
613 		return MAX_SCHEDULE_TIMEOUT;
614 
615 	/*
616 	 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
617 	 * but we consider them finite.
618 	 * This hack is wrong, but nobody is likely to notice.
619 	 */
620 	user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
621 
622 	return msecs_to_jiffies(user_timeout_ms) + 1;
623 }
624 
625 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
626 {
627 	uint32_t i;
628 
629 	for (i = 0; i < num_events; i++)
630 		if (waiters[i].event)
631 			remove_wait_queue(&waiters[i].event->wq,
632 					  &waiters[i].wait);
633 
634 	kfree(waiters);
635 }
636 
637 int kfd_wait_on_events(struct kfd_process *p,
638 		       uint32_t num_events, void __user *data,
639 		       bool all, uint32_t user_timeout_ms,
640 		       uint32_t *wait_result)
641 {
642 	struct kfd_event_data __user *events =
643 			(struct kfd_event_data __user *) data;
644 	uint32_t i;
645 	int ret = 0;
646 
647 	struct kfd_event_waiter *event_waiters = NULL;
648 	long timeout = user_timeout_to_jiffies(user_timeout_ms);
649 
650 	event_waiters = alloc_event_waiters(num_events);
651 	if (!event_waiters) {
652 		ret = -ENOMEM;
653 		goto out;
654 	}
655 
656 	mutex_lock(&p->event_mutex);
657 
658 	for (i = 0; i < num_events; i++) {
659 		struct kfd_event_data event_data;
660 
661 		if (copy_from_user(&event_data, &events[i],
662 				sizeof(struct kfd_event_data))) {
663 			ret = -EFAULT;
664 			goto out_unlock;
665 		}
666 
667 		ret = init_event_waiter_get_status(p, &event_waiters[i],
668 				event_data.event_id);
669 		if (ret)
670 			goto out_unlock;
671 	}
672 
673 	/* Check condition once. */
674 	*wait_result = test_event_condition(all, num_events, event_waiters);
675 	if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
676 		ret = copy_signaled_event_data(num_events,
677 					       event_waiters, events);
678 		goto out_unlock;
679 	} else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
680 		/* This should not happen. Events shouldn't be
681 		 * destroyed while we're holding the event_mutex
682 		 */
683 		goto out_unlock;
684 	}
685 
686 	/* Add to wait lists if we need to wait. */
687 	for (i = 0; i < num_events; i++)
688 		init_event_waiter_add_to_waitlist(&event_waiters[i]);
689 
690 	mutex_unlock(&p->event_mutex);
691 
692 	while (true) {
693 		if (fatal_signal_pending(current)) {
694 			ret = -EINTR;
695 			break;
696 		}
697 
698 		if (signal_pending(current)) {
699 			/*
700 			 * This is wrong when a nonzero, non-infinite timeout
701 			 * is specified. We need to use
702 			 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
703 			 * contains a union with data for each user and it's
704 			 * in generic kernel code that I don't want to
705 			 * touch yet.
706 			 */
707 			ret = -ERESTARTSYS;
708 			break;
709 		}
710 
711 		/* Set task state to interruptible sleep before
712 		 * checking wake-up conditions. A concurrent wake-up
713 		 * will put the task back into runnable state. In that
714 		 * case schedule_timeout will not put the task to
715 		 * sleep and we'll get a chance to re-check the
716 		 * updated conditions almost immediately. Otherwise,
717 		 * this race condition would lead to a soft hang or a
718 		 * very long sleep.
719 		 */
720 		set_current_state(TASK_INTERRUPTIBLE);
721 
722 		*wait_result = test_event_condition(all, num_events,
723 						    event_waiters);
724 		if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
725 			break;
726 
727 		if (timeout <= 0)
728 			break;
729 
730 		timeout = schedule_timeout(timeout);
731 	}
732 	__set_current_state(TASK_RUNNING);
733 
734 	/* copy_signaled_event_data may sleep. So this has to happen
735 	 * after the task state is set back to RUNNING.
736 	 */
737 	if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
738 		ret = copy_signaled_event_data(num_events,
739 					       event_waiters, events);
740 
741 	mutex_lock(&p->event_mutex);
742 out_unlock:
743 	free_waiters(num_events, event_waiters);
744 	mutex_unlock(&p->event_mutex);
745 out:
746 	if (ret)
747 		*wait_result = KFD_IOC_WAIT_RESULT_FAIL;
748 	else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
749 		ret = -EIO;
750 
751 	return ret;
752 }
753 
754 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
755 {
756 	unsigned long pfn;
757 	struct kfd_signal_page *page;
758 	int ret;
759 
760 	/* check required size doesn't exceed the allocated size */
761 	if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
762 			get_order(vma->vm_end - vma->vm_start)) {
763 		pr_err("Event page mmap requested illegal size\n");
764 		return -EINVAL;
765 	}
766 
767 	page = p->signal_page;
768 	if (!page) {
769 		/* Probably KFD bug, but mmap is user-accessible. */
770 		pr_debug("Signal page could not be found\n");
771 		return -EINVAL;
772 	}
773 
774 	pfn = __pa(page->kernel_address);
775 	pfn >>= PAGE_SHIFT;
776 
777 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
778 		       | VM_DONTDUMP | VM_PFNMAP;
779 
780 	pr_debug("Mapping signal page\n");
781 	pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
782 	pr_debug("     end user address    == 0x%08lx\n", vma->vm_end);
783 	pr_debug("     pfn                 == 0x%016lX\n", pfn);
784 	pr_debug("     vm_flags            == 0x%08lX\n", vma->vm_flags);
785 	pr_debug("     size                == 0x%08lX\n",
786 			vma->vm_end - vma->vm_start);
787 
788 	page->user_address = (uint64_t __user *)vma->vm_start;
789 
790 	/* mapping the page to user process */
791 	ret = remap_pfn_range(vma, vma->vm_start, pfn,
792 			vma->vm_end - vma->vm_start, vma->vm_page_prot);
793 	if (!ret)
794 		p->signal_mapped_size = vma->vm_end - vma->vm_start;
795 
796 	return ret;
797 }
798 
799 /*
800  * Assumes that p->event_mutex is held and of course
801  * that p is not going away (current or locked).
802  */
803 static void lookup_events_by_type_and_signal(struct kfd_process *p,
804 		int type, void *event_data)
805 {
806 	struct kfd_hsa_memory_exception_data *ev_data;
807 	struct kfd_event *ev;
808 	uint32_t id;
809 	bool send_signal = true;
810 
811 	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
812 
813 	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
814 	idr_for_each_entry_continue(&p->event_idr, ev, id)
815 		if (ev->type == type) {
816 			send_signal = false;
817 			dev_dbg(kfd_device,
818 					"Event found: id %X type %d",
819 					ev->event_id, ev->type);
820 			set_event(ev);
821 			if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
822 				ev->memory_exception_data = *ev_data;
823 		}
824 
825 	/* Send SIGTERM no event of type "type" has been found*/
826 	if (send_signal) {
827 		if (send_sigterm) {
828 			dev_warn(kfd_device,
829 				"Sending SIGTERM to HSA Process with PID %d ",
830 					p->lead_thread->pid);
831 			send_sig(SIGTERM, p->lead_thread, 0);
832 		} else {
833 			dev_err(kfd_device,
834 				"HSA Process (PID %d) got unhandled exception",
835 				p->lead_thread->pid);
836 		}
837 	}
838 }
839 
840 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
841 		unsigned long address, bool is_write_requested,
842 		bool is_execute_requested)
843 {
844 	struct kfd_hsa_memory_exception_data memory_exception_data;
845 	struct vm_area_struct *vma;
846 
847 	/*
848 	 * Because we are called from arbitrary context (workqueue) as opposed
849 	 * to process context, kfd_process could attempt to exit while we are
850 	 * running so the lookup function increments the process ref count.
851 	 */
852 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
853 	struct mm_struct *mm;
854 
855 	if (!p)
856 		return; /* Presumably process exited. */
857 
858 	/* Take a safe reference to the mm_struct, which may otherwise
859 	 * disappear even while the kfd_process is still referenced.
860 	 */
861 	mm = get_task_mm(p->lead_thread);
862 	if (!mm) {
863 		kfd_unref_process(p);
864 		return; /* Process is exiting */
865 	}
866 
867 	memset(&memory_exception_data, 0, sizeof(memory_exception_data));
868 
869 	down_read(&mm->mmap_sem);
870 	vma = find_vma(mm, address);
871 
872 	memory_exception_data.gpu_id = dev->id;
873 	memory_exception_data.va = address;
874 	/* Set failure reason */
875 	memory_exception_data.failure.NotPresent = 1;
876 	memory_exception_data.failure.NoExecute = 0;
877 	memory_exception_data.failure.ReadOnly = 0;
878 	if (vma) {
879 		if (vma->vm_start > address) {
880 			memory_exception_data.failure.NotPresent = 1;
881 			memory_exception_data.failure.NoExecute = 0;
882 			memory_exception_data.failure.ReadOnly = 0;
883 		} else {
884 			memory_exception_data.failure.NotPresent = 0;
885 			if (is_write_requested && !(vma->vm_flags & VM_WRITE))
886 				memory_exception_data.failure.ReadOnly = 1;
887 			else
888 				memory_exception_data.failure.ReadOnly = 0;
889 			if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
890 				memory_exception_data.failure.NoExecute = 1;
891 			else
892 				memory_exception_data.failure.NoExecute = 0;
893 		}
894 	}
895 
896 	up_read(&mm->mmap_sem);
897 	mmput(mm);
898 
899 	mutex_lock(&p->event_mutex);
900 
901 	/* Lookup events by type and signal them */
902 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
903 			&memory_exception_data);
904 
905 	mutex_unlock(&p->event_mutex);
906 	kfd_unref_process(p);
907 }
908 
909 void kfd_signal_hw_exception_event(unsigned int pasid)
910 {
911 	/*
912 	 * Because we are called from arbitrary context (workqueue) as opposed
913 	 * to process context, kfd_process could attempt to exit while we are
914 	 * running so the lookup function increments the process ref count.
915 	 */
916 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
917 
918 	if (!p)
919 		return; /* Presumably process exited. */
920 
921 	mutex_lock(&p->event_mutex);
922 
923 	/* Lookup events by type and signal them */
924 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
925 
926 	mutex_unlock(&p->event_mutex);
927 	kfd_unref_process(p);
928 }
929