1 /*
2  * OPAL Runtime Diagnostics interface driver
3  * Supported on POWERNV platform
4  *
5  * Copyright IBM Corporation 2015
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  */
16 
17 #define pr_fmt(fmt) "opal-prd: " fmt
18 
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/miscdevice.h>
23 #include <linux/fs.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/poll.h>
27 #include <linux/mm.h>
28 #include <linux/slab.h>
29 #include <asm/opal-prd.h>
30 #include <asm/opal.h>
31 #include <asm/io.h>
32 #include <asm/uaccess.h>
33 
34 
35 /**
36  * The msg member must be at the end of the struct, as it's followed by the
37  * message data.
38  */
39 struct opal_prd_msg_queue_item {
40 	struct list_head		list;
41 	struct opal_prd_msg_header	msg;
42 };
43 
44 static struct device_node *prd_node;
45 static LIST_HEAD(opal_prd_msg_queue);
46 static DEFINE_SPINLOCK(opal_prd_msg_queue_lock);
47 static DECLARE_WAIT_QUEUE_HEAD(opal_prd_msg_wait);
48 static atomic_t prd_usage;
49 
50 static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size)
51 {
52 	struct device_node *parent, *node;
53 	bool found;
54 
55 	if (addr + size < addr)
56 		return false;
57 
58 	parent = of_find_node_by_path("/reserved-memory");
59 	if (!parent)
60 		return false;
61 
62 	found = false;
63 
64 	for_each_child_of_node(parent, node) {
65 		uint64_t range_addr, range_size, range_end;
66 		const __be32 *addrp;
67 		const char *label;
68 
69 		addrp = of_get_address(node, 0, &range_size, NULL);
70 
71 		range_addr = of_read_number(addrp, 2);
72 		range_end = range_addr + range_size;
73 
74 		label = of_get_property(node, "ibm,prd-label", NULL);
75 
76 		/* PRD ranges need a label */
77 		if (!label)
78 			continue;
79 
80 		if (range_end <= range_addr)
81 			continue;
82 
83 		if (addr >= range_addr && addr + size <= range_end) {
84 			found = true;
85 			of_node_put(node);
86 			break;
87 		}
88 	}
89 
90 	of_node_put(parent);
91 	return found;
92 }
93 
94 static int opal_prd_open(struct inode *inode, struct file *file)
95 {
96 	/*
97 	 * Prevent multiple (separate) processes from concurrent interactions
98 	 * with the FW PRD channel
99 	 */
100 	if (atomic_xchg(&prd_usage, 1) == 1)
101 		return -EBUSY;
102 
103 	return 0;
104 }
105 
106 /*
107  * opal_prd_mmap - maps firmware-provided ranges into userspace
108  * @file: file structure for the device
109  * @vma: VMA to map the registers into
110  */
111 
112 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
113 {
114 	size_t addr, size;
115 	pgprot_t page_prot;
116 	int rc;
117 
118 	pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
119 			vma->vm_start, vma->vm_end, vma->vm_pgoff,
120 			vma->vm_flags);
121 
122 	addr = vma->vm_pgoff << PAGE_SHIFT;
123 	size = vma->vm_end - vma->vm_start;
124 
125 	/* ensure we're mapping within one of the allowable ranges */
126 	if (!opal_prd_range_is_valid(addr, size))
127 		return -EINVAL;
128 
129 	page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
130 					 size, vma->vm_page_prot);
131 
132 	rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
133 				page_prot);
134 
135 	return rc;
136 }
137 
138 static bool opal_msg_queue_empty(void)
139 {
140 	unsigned long flags;
141 	bool ret;
142 
143 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
144 	ret = list_empty(&opal_prd_msg_queue);
145 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
146 
147 	return ret;
148 }
149 
150 static unsigned int opal_prd_poll(struct file *file,
151 		struct poll_table_struct *wait)
152 {
153 	poll_wait(file, &opal_prd_msg_wait, wait);
154 
155 	if (!opal_msg_queue_empty())
156 		return POLLIN | POLLRDNORM;
157 
158 	return 0;
159 }
160 
161 static ssize_t opal_prd_read(struct file *file, char __user *buf,
162 		size_t count, loff_t *ppos)
163 {
164 	struct opal_prd_msg_queue_item *item;
165 	unsigned long flags;
166 	ssize_t size, err;
167 	int rc;
168 
169 	/* we need at least a header's worth of data */
170 	if (count < sizeof(item->msg))
171 		return -EINVAL;
172 
173 	if (*ppos)
174 		return -ESPIPE;
175 
176 	item = NULL;
177 
178 	for (;;) {
179 
180 		spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
181 		if (!list_empty(&opal_prd_msg_queue)) {
182 			item = list_first_entry(&opal_prd_msg_queue,
183 					struct opal_prd_msg_queue_item, list);
184 			list_del(&item->list);
185 		}
186 		spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
187 
188 		if (item)
189 			break;
190 
191 		if (file->f_flags & O_NONBLOCK)
192 			return -EAGAIN;
193 
194 		rc = wait_event_interruptible(opal_prd_msg_wait,
195 				!opal_msg_queue_empty());
196 		if (rc)
197 			return -EINTR;
198 	}
199 
200 	size = be16_to_cpu(item->msg.size);
201 	if (size > count) {
202 		err = -EINVAL;
203 		goto err_requeue;
204 	}
205 
206 	rc = copy_to_user(buf, &item->msg, size);
207 	if (rc) {
208 		err = -EFAULT;
209 		goto err_requeue;
210 	}
211 
212 	kfree(item);
213 
214 	return size;
215 
216 err_requeue:
217 	/* eep! re-queue at the head of the list */
218 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
219 	list_add(&item->list, &opal_prd_msg_queue);
220 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
221 	return err;
222 }
223 
224 static ssize_t opal_prd_write(struct file *file, const char __user *buf,
225 		size_t count, loff_t *ppos)
226 {
227 	struct opal_prd_msg_header hdr;
228 	ssize_t size;
229 	void *msg;
230 	int rc;
231 
232 	size = sizeof(hdr);
233 
234 	if (count < size)
235 		return -EINVAL;
236 
237 	/* grab the header */
238 	rc = copy_from_user(&hdr, buf, sizeof(hdr));
239 	if (rc)
240 		return -EFAULT;
241 
242 	size = be16_to_cpu(hdr.size);
243 
244 	msg = kmalloc(size, GFP_KERNEL);
245 	if (!msg)
246 		return -ENOMEM;
247 
248 	rc = copy_from_user(msg, buf, size);
249 	if (rc) {
250 		size = -EFAULT;
251 		goto out_free;
252 	}
253 
254 	rc = opal_prd_msg(msg);
255 	if (rc) {
256 		pr_warn("write: opal_prd_msg returned %d\n", rc);
257 		size = -EIO;
258 	}
259 
260 out_free:
261 	kfree(msg);
262 
263 	return size;
264 }
265 
266 static int opal_prd_release(struct inode *inode, struct file *file)
267 {
268 	struct opal_prd_msg_header msg;
269 
270 	msg.size = cpu_to_be16(sizeof(msg));
271 	msg.type = OPAL_PRD_MSG_TYPE_FINI;
272 
273 	opal_prd_msg((struct opal_prd_msg *)&msg);
274 
275 	atomic_xchg(&prd_usage, 0);
276 
277 	return 0;
278 }
279 
280 static long opal_prd_ioctl(struct file *file, unsigned int cmd,
281 		unsigned long param)
282 {
283 	struct opal_prd_info info;
284 	struct opal_prd_scom scom;
285 	int rc = 0;
286 
287 	switch (cmd) {
288 	case OPAL_PRD_GET_INFO:
289 		memset(&info, 0, sizeof(info));
290 		info.version = OPAL_PRD_KERNEL_VERSION;
291 		rc = copy_to_user((void __user *)param, &info, sizeof(info));
292 		if (rc)
293 			return -EFAULT;
294 		break;
295 
296 	case OPAL_PRD_SCOM_READ:
297 		rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
298 		if (rc)
299 			return -EFAULT;
300 
301 		scom.rc = opal_xscom_read(scom.chip, scom.addr,
302 				(__be64 *)&scom.data);
303 		scom.data = be64_to_cpu(scom.data);
304 		pr_devel("ioctl SCOM_READ: chip %llx addr %016llx data %016llx rc %lld\n",
305 				scom.chip, scom.addr, scom.data, scom.rc);
306 
307 		rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
308 		if (rc)
309 			return -EFAULT;
310 		break;
311 
312 	case OPAL_PRD_SCOM_WRITE:
313 		rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
314 		if (rc)
315 			return -EFAULT;
316 
317 		scom.rc = opal_xscom_write(scom.chip, scom.addr, scom.data);
318 		pr_devel("ioctl SCOM_WRITE: chip %llx addr %016llx data %016llx rc %lld\n",
319 				scom.chip, scom.addr, scom.data, scom.rc);
320 
321 		rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
322 		if (rc)
323 			return -EFAULT;
324 		break;
325 
326 	default:
327 		rc = -EINVAL;
328 	}
329 
330 	return rc;
331 }
332 
333 static const struct file_operations opal_prd_fops = {
334 	.open		= opal_prd_open,
335 	.mmap		= opal_prd_mmap,
336 	.poll		= opal_prd_poll,
337 	.read		= opal_prd_read,
338 	.write		= opal_prd_write,
339 	.unlocked_ioctl	= opal_prd_ioctl,
340 	.release	= opal_prd_release,
341 	.owner		= THIS_MODULE,
342 };
343 
344 static struct miscdevice opal_prd_dev = {
345 	.minor		= MISC_DYNAMIC_MINOR,
346 	.name		= "opal-prd",
347 	.fops		= &opal_prd_fops,
348 };
349 
350 /* opal interface */
351 static int opal_prd_msg_notifier(struct notifier_block *nb,
352 		unsigned long msg_type, void *_msg)
353 {
354 	struct opal_prd_msg_queue_item *item;
355 	struct opal_prd_msg_header *hdr;
356 	struct opal_msg *msg = _msg;
357 	int msg_size, item_size;
358 	unsigned long flags;
359 
360 	if (msg_type != OPAL_MSG_PRD)
361 		return 0;
362 
363 	/* Calculate total size of the message and item we need to store. The
364 	 * 'size' field in the header includes the header itself. */
365 	hdr = (void *)msg->params;
366 	msg_size = be16_to_cpu(hdr->size);
367 	item_size = msg_size + sizeof(*item) - sizeof(item->msg);
368 
369 	item = kzalloc(item_size, GFP_ATOMIC);
370 	if (!item)
371 		return -ENOMEM;
372 
373 	memcpy(&item->msg, msg->params, msg_size);
374 
375 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
376 	list_add_tail(&item->list, &opal_prd_msg_queue);
377 	spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
378 
379 	wake_up_interruptible(&opal_prd_msg_wait);
380 
381 	return 0;
382 }
383 
384 static struct notifier_block opal_prd_event_nb = {
385 	.notifier_call	= opal_prd_msg_notifier,
386 	.next		= NULL,
387 	.priority	= 0,
388 };
389 
390 static int opal_prd_probe(struct platform_device *pdev)
391 {
392 	int rc;
393 
394 	if (!pdev || !pdev->dev.of_node)
395 		return -ENODEV;
396 
397 	/* We should only have one prd driver instance per machine; ensure
398 	 * that we only get a valid probe on a single OF node.
399 	 */
400 	if (prd_node)
401 		return -EBUSY;
402 
403 	prd_node = pdev->dev.of_node;
404 
405 	rc = opal_message_notifier_register(OPAL_MSG_PRD, &opal_prd_event_nb);
406 	if (rc) {
407 		pr_err("Couldn't register event notifier\n");
408 		return rc;
409 	}
410 
411 	rc = misc_register(&opal_prd_dev);
412 	if (rc) {
413 		pr_err("failed to register miscdev\n");
414 		opal_message_notifier_unregister(OPAL_MSG_PRD,
415 				&opal_prd_event_nb);
416 		return rc;
417 	}
418 
419 	return 0;
420 }
421 
422 static int opal_prd_remove(struct platform_device *pdev)
423 {
424 	misc_deregister(&opal_prd_dev);
425 	opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
426 	return 0;
427 }
428 
429 static const struct of_device_id opal_prd_match[] = {
430 	{ .compatible = "ibm,opal-prd" },
431 	{ },
432 };
433 
434 static struct platform_driver opal_prd_driver = {
435 	.driver = {
436 		.name		= "opal-prd",
437 		.of_match_table	= opal_prd_match,
438 	},
439 	.probe	= opal_prd_probe,
440 	.remove	= opal_prd_remove,
441 };
442 
443 module_platform_driver(opal_prd_driver);
444 
445 MODULE_DEVICE_TABLE(of, opal_prd_match);
446 MODULE_DESCRIPTION("PowerNV OPAL runtime diagnostic driver");
447 MODULE_LICENSE("GPL");
448