1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OPAL Runtime Diagnostics interface driver
4  * Supported on POWERNV platform
5  *
6  * Copyright IBM Corporation 2015
7  */
8 
9 #define pr_fmt(fmt) "opal-prd: " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/miscdevice.h>
15 #include <linux/fs.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/poll.h>
19 #include <linux/mm.h>
20 #include <linux/slab.h>
21 #include <asm/opal-prd.h>
22 #include <asm/opal.h>
23 #include <asm/io.h>
24 #include <linux/uaccess.h>
25 
26 
27 /**
28  * The msg member must be at the end of the struct, as it's followed by the
29  * message data.
30  */
31 struct opal_prd_msg_queue_item {
32 	struct list_head		list;
33 	struct opal_prd_msg_header	msg;
34 };
35 
36 static struct device_node *prd_node;
37 static LIST_HEAD(opal_prd_msg_queue);
38 static DEFINE_SPINLOCK(opal_prd_msg_queue_lock);
39 static DECLARE_WAIT_QUEUE_HEAD(opal_prd_msg_wait);
40 static atomic_t prd_usage;
41 
42 static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size)
43 {
44 	struct device_node *parent, *node;
45 	bool found;
46 
47 	if (addr + size < addr)
48 		return false;
49 
50 	parent = of_find_node_by_path("/reserved-memory");
51 	if (!parent)
52 		return false;
53 
54 	found = false;
55 
56 	for_each_child_of_node(parent, node) {
57 		uint64_t range_addr, range_size, range_end;
58 		const __be32 *addrp;
59 		const char *label;
60 
61 		addrp = of_get_address(node, 0, &range_size, NULL);
62 
63 		range_addr = of_read_number(addrp, 2);
64 		range_end = range_addr + range_size;
65 
66 		label = of_get_property(node, "ibm,prd-label", NULL);
67 
68 		/* PRD ranges need a label */
69 		if (!label)
70 			continue;
71 
72 		if (range_end <= range_addr)
73 			continue;
74 
75 		if (addr >= range_addr && addr + size <= range_end) {
76 			found = true;
77 			of_node_put(node);
78 			break;
79 		}
80 	}
81 
82 	of_node_put(parent);
83 	return found;
84 }
85 
86 static int opal_prd_open(struct inode *inode, struct file *file)
87 {
88 	/*
89 	 * Prevent multiple (separate) processes from concurrent interactions
90 	 * with the FW PRD channel
91 	 */
92 	if (atomic_xchg(&prd_usage, 1) == 1)
93 		return -EBUSY;
94 
95 	return 0;
96 }
97 
98 /*
99  * opal_prd_mmap - maps firmware-provided ranges into userspace
100  * @file: file structure for the device
101  * @vma: VMA to map the registers into
102  */
103 
104 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
105 {
106 	size_t addr, size;
107 	pgprot_t page_prot;
108 	int rc;
109 
110 	pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
111 			vma->vm_start, vma->vm_end, vma->vm_pgoff,
112 			vma->vm_flags);
113 
114 	addr = vma->vm_pgoff << PAGE_SHIFT;
115 	size = vma->vm_end - vma->vm_start;
116 
117 	/* ensure we're mapping within one of the allowable ranges */
118 	if (!opal_prd_range_is_valid(addr, size))
119 		return -EINVAL;
120 
121 	page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
122 					 size, vma->vm_page_prot);
123 
124 	rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
125 				page_prot);
126 
127 	return rc;
128 }
129 
130 static bool opal_msg_queue_empty(void)
131 {
132 	unsigned long flags;
133 	bool ret;
134 
135 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
136 	ret = list_empty(&opal_prd_msg_queue);
137 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
138 
139 	return ret;
140 }
141 
142 static __poll_t opal_prd_poll(struct file *file,
143 		struct poll_table_struct *wait)
144 {
145 	poll_wait(file, &opal_prd_msg_wait, wait);
146 
147 	if (!opal_msg_queue_empty())
148 		return EPOLLIN | EPOLLRDNORM;
149 
150 	return 0;
151 }
152 
153 static ssize_t opal_prd_read(struct file *file, char __user *buf,
154 		size_t count, loff_t *ppos)
155 {
156 	struct opal_prd_msg_queue_item *item;
157 	unsigned long flags;
158 	ssize_t size, err;
159 	int rc;
160 
161 	/* we need at least a header's worth of data */
162 	if (count < sizeof(item->msg))
163 		return -EINVAL;
164 
165 	if (*ppos)
166 		return -ESPIPE;
167 
168 	item = NULL;
169 
170 	for (;;) {
171 
172 		spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
173 		if (!list_empty(&opal_prd_msg_queue)) {
174 			item = list_first_entry(&opal_prd_msg_queue,
175 					struct opal_prd_msg_queue_item, list);
176 			list_del(&item->list);
177 		}
178 		spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
179 
180 		if (item)
181 			break;
182 
183 		if (file->f_flags & O_NONBLOCK)
184 			return -EAGAIN;
185 
186 		rc = wait_event_interruptible(opal_prd_msg_wait,
187 				!opal_msg_queue_empty());
188 		if (rc)
189 			return -EINTR;
190 	}
191 
192 	size = be16_to_cpu(item->msg.size);
193 	if (size > count) {
194 		err = -EINVAL;
195 		goto err_requeue;
196 	}
197 
198 	rc = copy_to_user(buf, &item->msg, size);
199 	if (rc) {
200 		err = -EFAULT;
201 		goto err_requeue;
202 	}
203 
204 	kfree(item);
205 
206 	return size;
207 
208 err_requeue:
209 	/* eep! re-queue at the head of the list */
210 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
211 	list_add(&item->list, &opal_prd_msg_queue);
212 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
213 	return err;
214 }
215 
216 static ssize_t opal_prd_write(struct file *file, const char __user *buf,
217 		size_t count, loff_t *ppos)
218 {
219 	struct opal_prd_msg_header hdr;
220 	ssize_t size;
221 	void *msg;
222 	int rc;
223 
224 	size = sizeof(hdr);
225 
226 	if (count < size)
227 		return -EINVAL;
228 
229 	/* grab the header */
230 	rc = copy_from_user(&hdr, buf, sizeof(hdr));
231 	if (rc)
232 		return -EFAULT;
233 
234 	size = be16_to_cpu(hdr.size);
235 
236 	msg = memdup_user(buf, size);
237 	if (IS_ERR(msg))
238 		return PTR_ERR(msg);
239 
240 	rc = opal_prd_msg(msg);
241 	if (rc) {
242 		pr_warn("write: opal_prd_msg returned %d\n", rc);
243 		size = -EIO;
244 	}
245 
246 	kfree(msg);
247 
248 	return size;
249 }
250 
251 static int opal_prd_release(struct inode *inode, struct file *file)
252 {
253 	struct opal_prd_msg_header msg;
254 
255 	msg.size = cpu_to_be16(sizeof(msg));
256 	msg.type = OPAL_PRD_MSG_TYPE_FINI;
257 
258 	opal_prd_msg((struct opal_prd_msg *)&msg);
259 
260 	atomic_xchg(&prd_usage, 0);
261 
262 	return 0;
263 }
264 
265 static long opal_prd_ioctl(struct file *file, unsigned int cmd,
266 		unsigned long param)
267 {
268 	struct opal_prd_info info;
269 	struct opal_prd_scom scom;
270 	int rc = 0;
271 
272 	switch (cmd) {
273 	case OPAL_PRD_GET_INFO:
274 		memset(&info, 0, sizeof(info));
275 		info.version = OPAL_PRD_KERNEL_VERSION;
276 		rc = copy_to_user((void __user *)param, &info, sizeof(info));
277 		if (rc)
278 			return -EFAULT;
279 		break;
280 
281 	case OPAL_PRD_SCOM_READ:
282 		rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
283 		if (rc)
284 			return -EFAULT;
285 
286 		scom.rc = opal_xscom_read(scom.chip, scom.addr,
287 				(__be64 *)&scom.data);
288 		scom.data = be64_to_cpu(scom.data);
289 		pr_devel("ioctl SCOM_READ: chip %llx addr %016llx data %016llx rc %lld\n",
290 				scom.chip, scom.addr, scom.data, scom.rc);
291 
292 		rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
293 		if (rc)
294 			return -EFAULT;
295 		break;
296 
297 	case OPAL_PRD_SCOM_WRITE:
298 		rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
299 		if (rc)
300 			return -EFAULT;
301 
302 		scom.rc = opal_xscom_write(scom.chip, scom.addr, scom.data);
303 		pr_devel("ioctl SCOM_WRITE: chip %llx addr %016llx data %016llx rc %lld\n",
304 				scom.chip, scom.addr, scom.data, scom.rc);
305 
306 		rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
307 		if (rc)
308 			return -EFAULT;
309 		break;
310 
311 	default:
312 		rc = -EINVAL;
313 	}
314 
315 	return rc;
316 }
317 
318 static const struct file_operations opal_prd_fops = {
319 	.open		= opal_prd_open,
320 	.mmap		= opal_prd_mmap,
321 	.poll		= opal_prd_poll,
322 	.read		= opal_prd_read,
323 	.write		= opal_prd_write,
324 	.unlocked_ioctl	= opal_prd_ioctl,
325 	.release	= opal_prd_release,
326 	.owner		= THIS_MODULE,
327 };
328 
329 static struct miscdevice opal_prd_dev = {
330 	.minor		= MISC_DYNAMIC_MINOR,
331 	.name		= "opal-prd",
332 	.fops		= &opal_prd_fops,
333 };
334 
335 /* opal interface */
336 static int opal_prd_msg_notifier(struct notifier_block *nb,
337 		unsigned long msg_type, void *_msg)
338 {
339 	struct opal_prd_msg_queue_item *item;
340 	struct opal_prd_msg_header *hdr;
341 	struct opal_msg *msg = _msg;
342 	int msg_size, item_size;
343 	unsigned long flags;
344 
345 	if (msg_type != OPAL_MSG_PRD && msg_type != OPAL_MSG_PRD2)
346 		return 0;
347 
348 	/* Calculate total size of the message and item we need to store. The
349 	 * 'size' field in the header includes the header itself. */
350 	hdr = (void *)msg->params;
351 	msg_size = be16_to_cpu(hdr->size);
352 	item_size = msg_size + sizeof(*item) - sizeof(item->msg);
353 
354 	item = kzalloc(item_size, GFP_ATOMIC);
355 	if (!item)
356 		return -ENOMEM;
357 
358 	memcpy(&item->msg, msg->params, msg_size);
359 
360 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
361 	list_add_tail(&item->list, &opal_prd_msg_queue);
362 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
363 
364 	wake_up_interruptible(&opal_prd_msg_wait);
365 
366 	return 0;
367 }
368 
369 static struct notifier_block opal_prd_event_nb = {
370 	.notifier_call	= opal_prd_msg_notifier,
371 	.next		= NULL,
372 	.priority	= 0,
373 };
374 
375 static int opal_prd_probe(struct platform_device *pdev)
376 {
377 	int rc;
378 
379 	if (!pdev || !pdev->dev.of_node)
380 		return -ENODEV;
381 
382 	/* We should only have one prd driver instance per machine; ensure
383 	 * that we only get a valid probe on a single OF node.
384 	 */
385 	if (prd_node)
386 		return -EBUSY;
387 
388 	prd_node = pdev->dev.of_node;
389 
390 	rc = opal_message_notifier_register(OPAL_MSG_PRD, &opal_prd_event_nb);
391 	if (rc) {
392 		pr_err("Couldn't register event notifier\n");
393 		return rc;
394 	}
395 
396 	rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb);
397 	if (rc) {
398 		pr_err("Couldn't register PRD2 event notifier\n");
399 		return rc;
400 	}
401 
402 	rc = misc_register(&opal_prd_dev);
403 	if (rc) {
404 		pr_err("failed to register miscdev\n");
405 		opal_message_notifier_unregister(OPAL_MSG_PRD,
406 				&opal_prd_event_nb);
407 		return rc;
408 	}
409 
410 	return 0;
411 }
412 
413 static int opal_prd_remove(struct platform_device *pdev)
414 {
415 	misc_deregister(&opal_prd_dev);
416 	opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
417 	return 0;
418 }
419 
420 static const struct of_device_id opal_prd_match[] = {
421 	{ .compatible = "ibm,opal-prd" },
422 	{ },
423 };
424 
425 static struct platform_driver opal_prd_driver = {
426 	.driver = {
427 		.name		= "opal-prd",
428 		.of_match_table	= opal_prd_match,
429 	},
430 	.probe	= opal_prd_probe,
431 	.remove	= opal_prd_remove,
432 };
433 
434 module_platform_driver(opal_prd_driver);
435 
436 MODULE_DEVICE_TABLE(of, opal_prd_match);
437 MODULE_DESCRIPTION("PowerNV OPAL runtime diagnostic driver");
438 MODULE_LICENSE("GPL");
439