1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/poll.h>
24 #include <linux/wait.h>
25 #include <linux/anon_inodes.h>
26 #include <uapi/linux/kfd_ioctl.h>
27 #include "amdgpu_vm.h"
28 #include "kfd_priv.h"
29 #include "kfd_smi_events.h"
30 
31 struct kfd_smi_client {
32 	struct list_head list;
33 	struct kfifo fifo;
34 	wait_queue_head_t wait_queue;
35 	/* events enabled */
36 	uint64_t events;
37 	struct kfd_dev *dev;
38 	spinlock_t lock;
39 };
40 
41 #define MAX_KFIFO_SIZE	1024
42 
43 static __poll_t kfd_smi_ev_poll(struct file *, struct poll_table_struct *);
44 static ssize_t kfd_smi_ev_read(struct file *, char __user *, size_t, loff_t *);
45 static ssize_t kfd_smi_ev_write(struct file *, const char __user *, size_t,
46 				loff_t *);
47 static int kfd_smi_ev_release(struct inode *, struct file *);
48 
49 static const char kfd_smi_name[] = "kfd_smi_ev";
50 
51 static const struct file_operations kfd_smi_ev_fops = {
52 	.owner = THIS_MODULE,
53 	.poll = kfd_smi_ev_poll,
54 	.read = kfd_smi_ev_read,
55 	.write = kfd_smi_ev_write,
56 	.release = kfd_smi_ev_release
57 };
58 
59 static __poll_t kfd_smi_ev_poll(struct file *filep,
60 				struct poll_table_struct *wait)
61 {
62 	struct kfd_smi_client *client = filep->private_data;
63 	__poll_t mask = 0;
64 
65 	poll_wait(filep, &client->wait_queue, wait);
66 
67 	spin_lock(&client->lock);
68 	if (!kfifo_is_empty(&client->fifo))
69 		mask = EPOLLIN | EPOLLRDNORM;
70 	spin_unlock(&client->lock);
71 
72 	return mask;
73 }
74 
75 static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
76 			       size_t size, loff_t *offset)
77 {
78 	int ret;
79 	size_t to_copy;
80 	struct kfd_smi_client *client = filep->private_data;
81 	unsigned char *buf;
82 
83 	buf = kmalloc(MAX_KFIFO_SIZE * sizeof(*buf), GFP_KERNEL);
84 	if (!buf)
85 		return -ENOMEM;
86 
87 	/* kfifo_to_user can sleep so we can't use spinlock protection around
88 	 * it. Instead, we kfifo out as spinlocked then copy them to the user.
89 	 */
90 	spin_lock(&client->lock);
91 	to_copy = kfifo_len(&client->fifo);
92 	if (!to_copy) {
93 		spin_unlock(&client->lock);
94 		ret = -EAGAIN;
95 		goto ret_err;
96 	}
97 	to_copy = min3(size, sizeof(buf), to_copy);
98 	ret = kfifo_out(&client->fifo, buf, to_copy);
99 	spin_unlock(&client->lock);
100 	if (ret <= 0) {
101 		ret = -EAGAIN;
102 		goto ret_err;
103 	}
104 
105 	ret = copy_to_user(user, buf, to_copy);
106 	if (ret) {
107 		ret = -EFAULT;
108 		goto ret_err;
109 	}
110 
111 	kfree(buf);
112 	return to_copy;
113 
114 ret_err:
115 	kfree(buf);
116 	return ret;
117 }
118 
119 static ssize_t kfd_smi_ev_write(struct file *filep, const char __user *user,
120 				size_t size, loff_t *offset)
121 {
122 	struct kfd_smi_client *client = filep->private_data;
123 	uint64_t events;
124 
125 	if (!access_ok(user, size) || size < sizeof(events))
126 		return -EFAULT;
127 	if (copy_from_user(&events, user, sizeof(events)))
128 		return -EFAULT;
129 
130 	WRITE_ONCE(client->events, events);
131 
132 	return sizeof(events);
133 }
134 
135 static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
136 {
137 	struct kfd_smi_client *client = filep->private_data;
138 	struct kfd_dev *dev = client->dev;
139 
140 	spin_lock(&dev->smi_lock);
141 	list_del_rcu(&client->list);
142 	spin_unlock(&dev->smi_lock);
143 
144 	synchronize_rcu();
145 	kfifo_free(&client->fifo);
146 	kfree(client);
147 
148 	return 0;
149 }
150 
151 void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
152 {
153 	struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
154 	struct amdgpu_task_info task_info;
155 	/* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */
156 	/* 16 bytes event + 1 byte space + 25 bytes msg + 1 byte \n = 43
157 	 */
158 	char fifo_in[43];
159 	struct kfd_smi_client *client;
160 	int len;
161 
162 	if (list_empty(&dev->smi_clients))
163 		return;
164 
165 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
166 	amdgpu_vm_get_task_info(adev, pasid, &task_info);
167 	/* Report VM faults from user applications, not retry from kernel */
168 	if (!task_info.pid)
169 		return;
170 
171 	len = snprintf(fifo_in, 43, "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT,
172 		task_info.pid, task_info.task_name);
173 
174 	rcu_read_lock();
175 
176 	list_for_each_entry_rcu(client, &dev->smi_clients, list) {
177 		if (!(READ_ONCE(client->events) & KFD_SMI_EVENT_VMFAULT))
178 			continue;
179 		spin_lock(&client->lock);
180 		if (kfifo_avail(&client->fifo) >= len) {
181 			kfifo_in(&client->fifo, fifo_in, len);
182 			wake_up_all(&client->wait_queue);
183 		}
184 		else
185 			pr_debug("smi_event(vmfault): no space left\n");
186 		spin_unlock(&client->lock);
187 	}
188 
189 	rcu_read_unlock();
190 }
191 
192 int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
193 {
194 	struct kfd_smi_client *client;
195 	int ret;
196 
197 	client = kzalloc(sizeof(struct kfd_smi_client), GFP_KERNEL);
198 	if (!client)
199 		return -ENOMEM;
200 	INIT_LIST_HEAD(&client->list);
201 
202 	ret = kfifo_alloc(&client->fifo, MAX_KFIFO_SIZE, GFP_KERNEL);
203 	if (ret) {
204 		kfree(client);
205 		return ret;
206 	}
207 
208 	ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
209 			       O_RDWR);
210 	if (ret < 0) {
211 		kfifo_free(&client->fifo);
212 		kfree(client);
213 		return ret;
214 	}
215 	*fd = ret;
216 
217 	init_waitqueue_head(&client->wait_queue);
218 	spin_lock_init(&client->lock);
219 	client->events = 0;
220 	client->dev = dev;
221 
222 	spin_lock(&dev->smi_lock);
223 	list_add_rcu(&client->list, &dev->smi_clients);
224 	spin_unlock(&dev->smi_lock);
225 
226 	return 0;
227 }
228