1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/poll.h>
25 #include <linux/wait.h>
26 #include <linux/anon_inodes.h>
27 #include <uapi/linux/kfd_ioctl.h>
28 #include "amdgpu.h"
29 #include "amdgpu_vm.h"
30 #include "kfd_priv.h"
31 #include "kfd_smi_events.h"
32 
33 struct kfd_smi_client {
34 	struct list_head list;
35 	struct kfifo fifo;
36 	wait_queue_head_t wait_queue;
37 	/* events enabled */
38 	uint64_t events;
39 	struct kfd_dev *dev;
40 	spinlock_t lock;
41 };
42 
43 #define MAX_KFIFO_SIZE	1024
44 
45 static __poll_t kfd_smi_ev_poll(struct file *, struct poll_table_struct *);
46 static ssize_t kfd_smi_ev_read(struct file *, char __user *, size_t, loff_t *);
47 static ssize_t kfd_smi_ev_write(struct file *, const char __user *, size_t,
48 				loff_t *);
49 static int kfd_smi_ev_release(struct inode *, struct file *);
50 
51 static const char kfd_smi_name[] = "kfd_smi_ev";
52 
53 static const struct file_operations kfd_smi_ev_fops = {
54 	.owner = THIS_MODULE,
55 	.poll = kfd_smi_ev_poll,
56 	.read = kfd_smi_ev_read,
57 	.write = kfd_smi_ev_write,
58 	.release = kfd_smi_ev_release
59 };
60 
61 static __poll_t kfd_smi_ev_poll(struct file *filep,
62 				struct poll_table_struct *wait)
63 {
64 	struct kfd_smi_client *client = filep->private_data;
65 	__poll_t mask = 0;
66 
67 	poll_wait(filep, &client->wait_queue, wait);
68 
69 	spin_lock(&client->lock);
70 	if (!kfifo_is_empty(&client->fifo))
71 		mask = EPOLLIN | EPOLLRDNORM;
72 	spin_unlock(&client->lock);
73 
74 	return mask;
75 }
76 
77 static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
78 			       size_t size, loff_t *offset)
79 {
80 	int ret;
81 	size_t to_copy;
82 	struct kfd_smi_client *client = filep->private_data;
83 	unsigned char *buf;
84 
85 	size = min_t(size_t, size, MAX_KFIFO_SIZE);
86 	buf = kmalloc(size, GFP_KERNEL);
87 	if (!buf)
88 		return -ENOMEM;
89 
90 	/* kfifo_to_user can sleep so we can't use spinlock protection around
91 	 * it. Instead, we kfifo out as spinlocked then copy them to the user.
92 	 */
93 	spin_lock(&client->lock);
94 	to_copy = kfifo_len(&client->fifo);
95 	if (!to_copy) {
96 		spin_unlock(&client->lock);
97 		ret = -EAGAIN;
98 		goto ret_err;
99 	}
100 	to_copy = min(size, to_copy);
101 	ret = kfifo_out(&client->fifo, buf, to_copy);
102 	spin_unlock(&client->lock);
103 	if (ret <= 0) {
104 		ret = -EAGAIN;
105 		goto ret_err;
106 	}
107 
108 	ret = copy_to_user(user, buf, to_copy);
109 	if (ret) {
110 		ret = -EFAULT;
111 		goto ret_err;
112 	}
113 
114 	kfree(buf);
115 	return to_copy;
116 
117 ret_err:
118 	kfree(buf);
119 	return ret;
120 }
121 
122 static ssize_t kfd_smi_ev_write(struct file *filep, const char __user *user,
123 				size_t size, loff_t *offset)
124 {
125 	struct kfd_smi_client *client = filep->private_data;
126 	uint64_t events;
127 
128 	if (!access_ok(user, size) || size < sizeof(events))
129 		return -EFAULT;
130 	if (copy_from_user(&events, user, sizeof(events)))
131 		return -EFAULT;
132 
133 	WRITE_ONCE(client->events, events);
134 
135 	return sizeof(events);
136 }
137 
138 static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
139 {
140 	struct kfd_smi_client *client = filep->private_data;
141 	struct kfd_dev *dev = client->dev;
142 
143 	spin_lock(&dev->smi_lock);
144 	list_del_rcu(&client->list);
145 	spin_unlock(&dev->smi_lock);
146 
147 	synchronize_rcu();
148 	kfifo_free(&client->fifo);
149 	kfree(client);
150 
151 	return 0;
152 }
153 
154 static void add_event_to_kfifo(struct kfd_dev *dev, unsigned int smi_event,
155 			      char *event_msg, int len)
156 {
157 	struct kfd_smi_client *client;
158 
159 	rcu_read_lock();
160 
161 	list_for_each_entry_rcu(client, &dev->smi_clients, list) {
162 		if (!(READ_ONCE(client->events) &
163 				KFD_SMI_EVENT_MASK_FROM_INDEX(smi_event)))
164 			continue;
165 		spin_lock(&client->lock);
166 		if (kfifo_avail(&client->fifo) >= len) {
167 			kfifo_in(&client->fifo, event_msg, len);
168 			wake_up_all(&client->wait_queue);
169 		} else {
170 			pr_debug("smi_event(EventID: %u): no space left\n",
171 					smi_event);
172 		}
173 		spin_unlock(&client->lock);
174 	}
175 
176 	rcu_read_unlock();
177 }
178 
179 __printf(3, 4)
180 static void kfd_smi_event_add(struct kfd_dev *dev, unsigned int event,
181 			      char *fmt, ...)
182 {
183 	char fifo_in[KFD_SMI_EVENT_MSG_SIZE];
184 	int len;
185 	va_list args;
186 
187 	if (list_empty(&dev->smi_clients))
188 		return;
189 
190 	len = snprintf(fifo_in, sizeof(fifo_in), "%x ", event);
191 
192 	va_start(args, fmt);
193 	len += vsnprintf(fifo_in + len, sizeof(fifo_in) - len, fmt, args);
194 	va_end(args);
195 
196 	add_event_to_kfifo(dev, event, fifo_in, len);
197 }
198 
199 void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
200 {
201 	unsigned int event;
202 
203 	if (post_reset) {
204 		event = KFD_SMI_EVENT_GPU_POST_RESET;
205 	} else {
206 		event = KFD_SMI_EVENT_GPU_PRE_RESET;
207 		++(dev->reset_seq_num);
208 	}
209 	kfd_smi_event_add(dev, event, "%x\n", dev->reset_seq_num);
210 }
211 
212 void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
213 					     uint64_t throttle_bitmask)
214 {
215 	kfd_smi_event_add(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n",
216 			  throttle_bitmask,
217 			  amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
218 }
219 
220 void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
221 {
222 	struct amdgpu_task_info task_info;
223 
224 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
225 	amdgpu_vm_get_task_info(dev->adev, pasid, &task_info);
226 	/* Report VM faults from user applications, not retry from kernel */
227 	if (!task_info.pid)
228 		return;
229 
230 	kfd_smi_event_add(dev, KFD_SMI_EVENT_VMFAULT, "%x:%s\n",
231 			  task_info.pid, task_info.task_name);
232 }
233 
234 int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
235 {
236 	struct kfd_smi_client *client;
237 	int ret;
238 
239 	client = kzalloc(sizeof(struct kfd_smi_client), GFP_KERNEL);
240 	if (!client)
241 		return -ENOMEM;
242 	INIT_LIST_HEAD(&client->list);
243 
244 	ret = kfifo_alloc(&client->fifo, MAX_KFIFO_SIZE, GFP_KERNEL);
245 	if (ret) {
246 		kfree(client);
247 		return ret;
248 	}
249 
250 	init_waitqueue_head(&client->wait_queue);
251 	spin_lock_init(&client->lock);
252 	client->events = 0;
253 	client->dev = dev;
254 
255 	spin_lock(&dev->smi_lock);
256 	list_add_rcu(&client->list, &dev->smi_clients);
257 	spin_unlock(&dev->smi_lock);
258 
259 	ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
260 			       O_RDWR);
261 	if (ret < 0) {
262 		spin_lock(&dev->smi_lock);
263 		list_del_rcu(&client->list);
264 		spin_unlock(&dev->smi_lock);
265 
266 		synchronize_rcu();
267 
268 		kfifo_free(&client->fifo);
269 		kfree(client);
270 		return ret;
271 	}
272 	*fd = ret;
273 
274 	return 0;
275 }
276