1 /*
2  * Copyright (C) 2011 Google, Inc.
3  * Copyright (C) 2012 Intel, Inc.
4  * Copyright (C) 2013 Intel, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 /* This source file contains the implementation of a special device driver
18  * that intends to provide a *very* fast communication channel between the
19  * guest system and the QEMU emulator.
20  *
21  * Usage from the guest is simply the following (error handling simplified):
22  *
23  *    int  fd = open("/dev/qemu_pipe",O_RDWR);
24  *    .... write() or read() through the pipe.
25  *
26  * This driver doesn't deal with the exact protocol used during the session.
27  * It is intended to be as simple as something like:
28  *
29  *    // do this _just_ after opening the fd to connect to a specific
30  *    // emulator service.
31  *    const char*  msg = "<pipename>";
32  *    if (write(fd, msg, strlen(msg)+1) < 0) {
33  *       ... could not connect to <pipename> service
34  *       close(fd);
35  *    }
36  *
37  *    // after this, simply read() and write() to communicate with the
38  *    // service. Exact protocol details left as an exercise to the reader.
39  *
40  * This driver is very fast because it doesn't copy any data through
41  * intermediate buffers, since the emulator is capable of translating
42  * guest user addresses into host ones.
43  *
44  * Note that we must however ensure that each user page involved in the
45  * exchange is properly mapped during a transfer.
46  */
47 
48 #include <linux/module.h>
49 #include <linux/interrupt.h>
50 #include <linux/kernel.h>
51 #include <linux/spinlock.h>
52 #include <linux/miscdevice.h>
53 #include <linux/platform_device.h>
54 #include <linux/poll.h>
55 #include <linux/sched.h>
56 #include <linux/bitops.h>
57 #include <linux/slab.h>
58 #include <linux/io.h>
59 
60 /*
61  * IMPORTANT: The following constants must match the ones used and defined
62  * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
63  */
64 
65 /* pipe device registers */
66 #define PIPE_REG_COMMAND		0x00  /* write: value = command */
67 #define PIPE_REG_STATUS			0x04  /* read */
68 #define PIPE_REG_CHANNEL		0x08  /* read/write: channel id */
69 #define PIPE_REG_SIZE			0x0c  /* read/write: buffer size */
70 #define PIPE_REG_ADDRESS		0x10  /* write: physical address */
71 #define PIPE_REG_WAKES			0x14  /* read: wake flags */
72 #define PIPE_REG_PARAMS_ADDR_LOW	0x18  /* read/write: batch data address */
73 #define PIPE_REG_PARAMS_ADDR_HIGH	0x1c  /* read/write: batch data address */
74 #define PIPE_REG_ACCESS_PARAMS		0x20  /* write: batch access */
75 
76 /* list of commands for PIPE_REG_COMMAND */
77 #define CMD_OPEN			1  /* open new channel */
78 #define CMD_CLOSE			2  /* close channel (from guest) */
79 #define CMD_POLL			3  /* poll read/write status */
80 
81 /* List of bitflags returned in status of CMD_POLL command */
82 #define PIPE_POLL_IN			(1 << 0)
83 #define PIPE_POLL_OUT			(1 << 1)
84 #define PIPE_POLL_HUP			(1 << 2)
85 
86 /* The following commands are related to write operations */
87 #define CMD_WRITE_BUFFER	4  /* send a user buffer to the emulator */
88 #define CMD_WAKE_ON_WRITE	5  /* tell the emulator to wake us when writing
89 				     is possible */
90 
91 /* The following commands are related to read operations, they must be
92  * listed in the same order than the corresponding write ones, since we
93  * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
94  * in goldfish_pipe_read_write() below.
95  */
96 #define CMD_READ_BUFFER        6  /* receive a user buffer from the emulator */
97 #define CMD_WAKE_ON_READ       7  /* tell the emulator to wake us when reading
98 				   * is possible */
99 
100 /* Possible status values used to signal errors - see goldfish_pipe_error_convert */
101 #define PIPE_ERROR_INVAL       -1
102 #define PIPE_ERROR_AGAIN       -2
103 #define PIPE_ERROR_NOMEM       -3
104 #define PIPE_ERROR_IO          -4
105 
106 /* Bit-flags used to signal events from the emulator */
107 #define PIPE_WAKE_CLOSED       (1 << 0)  /* emulator closed pipe */
108 #define PIPE_WAKE_READ         (1 << 1)  /* pipe can now be read from */
109 #define PIPE_WAKE_WRITE        (1 << 2)  /* pipe can now be written to */
110 
111 struct access_params {
112 	u32 channel;
113 	u32 size;
114 	u32 address;
115 	u32 cmd;
116 	u32 result;
117 	/* reserved for future extension */
118 	u32 flags;
119 };
120 
121 /* The global driver data. Holds a reference to the i/o page used to
122  * communicate with the emulator, and a wake queue for blocked tasks
123  * waiting to be awoken.
124  */
125 struct goldfish_pipe_dev {
126 	spinlock_t lock;
127 	unsigned char __iomem *base;
128 	struct access_params *aps;
129 	int irq;
130 };
131 
132 static struct goldfish_pipe_dev   pipe_dev[1];
133 
134 /* This data type models a given pipe instance */
135 struct goldfish_pipe {
136 	struct goldfish_pipe_dev *dev;
137 	struct mutex lock;
138 	unsigned long flags;
139 	wait_queue_head_t wake_queue;
140 };
141 
142 
143 /* Bit flags for the 'flags' field */
144 enum {
145 	BIT_CLOSED_ON_HOST = 0,  /* pipe closed by host */
146 	BIT_WAKE_ON_WRITE  = 1,  /* want to be woken on writes */
147 	BIT_WAKE_ON_READ   = 2,  /* want to be woken on reads */
148 };
149 
150 
151 static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
152 {
153 	unsigned long flags;
154 	u32 status;
155 	struct goldfish_pipe_dev *dev = pipe->dev;
156 
157 	spin_lock_irqsave(&dev->lock, flags);
158 	writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
159 	writel(cmd, dev->base + PIPE_REG_COMMAND);
160 	status = readl(dev->base + PIPE_REG_STATUS);
161 	spin_unlock_irqrestore(&dev->lock, flags);
162 	return status;
163 }
164 
165 static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
166 {
167 	unsigned long flags;
168 	struct goldfish_pipe_dev *dev = pipe->dev;
169 
170 	spin_lock_irqsave(&dev->lock, flags);
171 	writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
172 	writel(cmd, dev->base + PIPE_REG_COMMAND);
173 	spin_unlock_irqrestore(&dev->lock, flags);
174 }
175 
176 /* This function converts an error code returned by the emulator through
177  * the PIPE_REG_STATUS i/o register into a valid negative errno value.
178  */
179 static int goldfish_pipe_error_convert(int status)
180 {
181 	switch (status) {
182 	case PIPE_ERROR_AGAIN:
183 		return -EAGAIN;
184 	case PIPE_ERROR_NOMEM:
185 		return -ENOMEM;
186 	case PIPE_ERROR_IO:
187 		return -EIO;
188 	default:
189 		return -EINVAL;
190 	}
191 }
192 
193 /*
194  * Notice: QEMU will return 0 for un-known register access, indicating
195  * param_acess is supported or not
196  */
197 static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
198 				  struct access_params *aps)
199 {
200 	u32 aph, apl;
201 	u64 paddr;
202 	aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
203 	apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
204 
205 	paddr = ((u64)aph << 32) | apl;
206 	if (paddr != (__pa(aps)))
207 		return 0;
208 	return 1;
209 }
210 
211 /* 0 on success */
212 static int setup_access_params_addr(struct platform_device *pdev,
213 					struct goldfish_pipe_dev *dev)
214 {
215 	u64 paddr;
216 	struct access_params *aps;
217 
218 	aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
219 	if (!aps)
220 		return -1;
221 
222 	/* FIXME */
223 	paddr = __pa(aps);
224 	writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
225 	writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
226 
227 	if (valid_batchbuffer_addr(dev, aps)) {
228 		dev->aps = aps;
229 		return 0;
230 	} else
231 		return -1;
232 }
233 
234 /* A value that will not be set by qemu emulator */
235 #define INITIAL_BATCH_RESULT (0xdeadbeaf)
236 static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
237 				unsigned long address, unsigned long avail,
238 				struct goldfish_pipe *pipe, int *status)
239 {
240 	struct access_params *aps = dev->aps;
241 
242 	if (aps == NULL)
243 		return -1;
244 
245 	aps->result = INITIAL_BATCH_RESULT;
246 	aps->channel = (unsigned long)pipe;
247 	aps->size = avail;
248 	aps->address = address;
249 	aps->cmd = cmd;
250 	writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
251 	/*
252 	 * If the aps->result has not changed, that means
253 	 * that the batch command failed
254 	 */
255 	if (aps->result == INITIAL_BATCH_RESULT)
256 		return -1;
257 	*status = aps->result;
258 	return 0;
259 }
260 
261 /* This function is used for both reading from and writing to a given
262  * pipe.
263  */
264 static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
265 				    size_t bufflen, int is_write)
266 {
267 	unsigned long irq_flags;
268 	struct goldfish_pipe *pipe = filp->private_data;
269 	struct goldfish_pipe_dev *dev = pipe->dev;
270 	const int cmd_offset = is_write ? 0
271 					: (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
272 	unsigned long address, address_end;
273 	int ret = 0;
274 
275 	/* If the emulator already closed the pipe, no need to go further */
276 	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
277 		return -EIO;
278 
279 	/* Null reads or writes succeeds */
280 	if (unlikely(bufflen) == 0)
281 		return 0;
282 
283 	/* Check the buffer range for access */
284 	if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
285 			buffer, bufflen))
286 		return -EFAULT;
287 
288 	/* Serialize access to the pipe */
289 	if (mutex_lock_interruptible(&pipe->lock))
290 		return -ERESTARTSYS;
291 
292 	address = (unsigned long)(void *)buffer;
293 	address_end = address + bufflen;
294 
295 	while (address < address_end) {
296 		unsigned long  page_end = (address & PAGE_MASK) + PAGE_SIZE;
297 		unsigned long  next     = page_end < address_end ? page_end
298 								 : address_end;
299 		unsigned long  avail    = next - address;
300 		int status, wakeBit;
301 
302 		/* Ensure that the corresponding page is properly mapped */
303 		/* FIXME: this isn't safe or sufficient - use get_user_pages */
304 		if (is_write) {
305 			char c;
306 			/* Ensure that the page is mapped and readable */
307 			if (__get_user(c, (char __user *)address)) {
308 				if (!ret)
309 					ret = -EFAULT;
310 				break;
311 			}
312 		} else {
313 			/* Ensure that the page is mapped and writable */
314 			if (__put_user(0, (char __user *)address)) {
315 				if (!ret)
316 					ret = -EFAULT;
317 				break;
318 			}
319 		}
320 
321 		/* Now, try to transfer the bytes in the current page */
322 		spin_lock_irqsave(&dev->lock, irq_flags);
323 		if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
324 				address, avail, pipe, &status)) {
325 			writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
326 			writel(avail, dev->base + PIPE_REG_SIZE);
327 			writel(address, dev->base + PIPE_REG_ADDRESS);
328 			writel(CMD_WRITE_BUFFER + cmd_offset,
329 					dev->base + PIPE_REG_COMMAND);
330 			status = readl(dev->base + PIPE_REG_STATUS);
331 		}
332 		spin_unlock_irqrestore(&dev->lock, irq_flags);
333 
334 		if (status > 0) { /* Correct transfer */
335 			ret += status;
336 			address += status;
337 			continue;
338 		}
339 
340 		if (status == 0)  /* EOF */
341 			break;
342 
343 		/* An error occured. If we already transfered stuff, just
344 		* return with its count. We expect the next call to return
345 		* an error code */
346 		if (ret > 0)
347 			break;
348 
349 		/* If the error is not PIPE_ERROR_AGAIN, or if we are not in
350 		* non-blocking mode, just return the error code.
351 		*/
352 		if (status != PIPE_ERROR_AGAIN ||
353 			(filp->f_flags & O_NONBLOCK) != 0) {
354 			ret = goldfish_pipe_error_convert(status);
355 			break;
356 		}
357 
358 		/* We will have to wait until more data/space is available.
359 		* First, mark the pipe as waiting for a specific wake signal.
360 		*/
361 		wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
362 		set_bit(wakeBit, &pipe->flags);
363 
364 		/* Tell the emulator we're going to wait for a wake event */
365 		goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
366 
367 		/* Unlock the pipe, then wait for the wake signal */
368 		mutex_unlock(&pipe->lock);
369 
370 		while (test_bit(wakeBit, &pipe->flags)) {
371 			if (wait_event_interruptible(
372 					pipe->wake_queue,
373 					!test_bit(wakeBit, &pipe->flags)))
374 				return -ERESTARTSYS;
375 
376 			if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
377 				return -EIO;
378 		}
379 
380 		/* Try to re-acquire the lock */
381 		if (mutex_lock_interruptible(&pipe->lock))
382 			return -ERESTARTSYS;
383 
384 		/* Try the transfer again */
385 		continue;
386 	}
387 	mutex_unlock(&pipe->lock);
388 	return ret;
389 }
390 
391 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
392 			      size_t bufflen, loff_t *ppos)
393 {
394 	return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
395 }
396 
397 static ssize_t goldfish_pipe_write(struct file *filp,
398 				const char __user *buffer, size_t bufflen,
399 				loff_t *ppos)
400 {
401 	return goldfish_pipe_read_write(filp, (char __user *)buffer,
402 								bufflen, 1);
403 }
404 
405 
406 static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
407 {
408 	struct goldfish_pipe *pipe = filp->private_data;
409 	unsigned int mask = 0;
410 	int status;
411 
412 	mutex_lock(&pipe->lock);
413 
414 	poll_wait(filp, &pipe->wake_queue, wait);
415 
416 	status = goldfish_cmd_status(pipe, CMD_POLL);
417 
418 	mutex_unlock(&pipe->lock);
419 
420 	if (status & PIPE_POLL_IN)
421 		mask |= POLLIN | POLLRDNORM;
422 
423 	if (status & PIPE_POLL_OUT)
424 		mask |= POLLOUT | POLLWRNORM;
425 
426 	if (status & PIPE_POLL_HUP)
427 		mask |= POLLHUP;
428 
429 	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
430 		mask |= POLLERR;
431 
432 	return mask;
433 }
434 
435 static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
436 {
437 	struct goldfish_pipe_dev *dev = dev_id;
438 	unsigned long irq_flags;
439 	int count = 0;
440 
441 	/* We're going to read from the emulator a list of (channel,flags)
442 	* pairs corresponding to the wake events that occured on each
443 	* blocked pipe (i.e. channel).
444 	*/
445 	spin_lock_irqsave(&dev->lock, irq_flags);
446 	for (;;) {
447 		/* First read the channel, 0 means the end of the list */
448 		struct goldfish_pipe *pipe;
449 		unsigned long wakes;
450 		unsigned long channel = readl(dev->base + PIPE_REG_CHANNEL);
451 
452 		if (channel == 0)
453 			break;
454 
455 		/* Convert channel to struct pipe pointer + read wake flags */
456 		wakes = readl(dev->base + PIPE_REG_WAKES);
457 		pipe  = (struct goldfish_pipe *)(ptrdiff_t)channel;
458 
459 		/* Did the emulator just closed a pipe? */
460 		if (wakes & PIPE_WAKE_CLOSED) {
461 			set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
462 			wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
463 		}
464 		if (wakes & PIPE_WAKE_READ)
465 			clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
466 		if (wakes & PIPE_WAKE_WRITE)
467 			clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
468 
469 		wake_up_interruptible(&pipe->wake_queue);
470 		count++;
471 	}
472 	spin_unlock_irqrestore(&dev->lock, irq_flags);
473 
474 	return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
475 }
476 
477 /**
478  *	goldfish_pipe_open	-	open a channel to the AVD
479  *	@inode: inode of device
480  *	@file: file struct of opener
481  *
482  *	Create a new pipe link between the emulator and the use application.
483  *	Each new request produces a new pipe.
484  *
485  *	Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
486  *	right now so this is fine. A move to 64bit will need this addressing
487  */
488 static int goldfish_pipe_open(struct inode *inode, struct file *file)
489 {
490 	struct goldfish_pipe *pipe;
491 	struct goldfish_pipe_dev *dev = pipe_dev;
492 	int32_t status;
493 
494 	/* Allocate new pipe kernel object */
495 	pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
496 	if (pipe == NULL)
497 		return -ENOMEM;
498 
499 	pipe->dev = dev;
500 	mutex_init(&pipe->lock);
501 	init_waitqueue_head(&pipe->wake_queue);
502 
503 	/*
504 	 * Now, tell the emulator we're opening a new pipe. We use the
505 	 * pipe object's address as the channel identifier for simplicity.
506 	 */
507 
508 	status = goldfish_cmd_status(pipe, CMD_OPEN);
509 	if (status < 0) {
510 		kfree(pipe);
511 		return status;
512 	}
513 
514 	/* All is done, save the pipe into the file's private data field */
515 	file->private_data = pipe;
516 	return 0;
517 }
518 
519 static int goldfish_pipe_release(struct inode *inode, struct file *filp)
520 {
521 	struct goldfish_pipe *pipe = filp->private_data;
522 
523 	/* The guest is closing the channel, so tell the emulator right now */
524 	goldfish_cmd(pipe, CMD_CLOSE);
525 	kfree(pipe);
526 	filp->private_data = NULL;
527 	return 0;
528 }
529 
530 static const struct file_operations goldfish_pipe_fops = {
531 	.owner = THIS_MODULE,
532 	.read = goldfish_pipe_read,
533 	.write = goldfish_pipe_write,
534 	.poll = goldfish_pipe_poll,
535 	.open = goldfish_pipe_open,
536 	.release = goldfish_pipe_release,
537 };
538 
539 static struct miscdevice goldfish_pipe_device = {
540 	.minor = MISC_DYNAMIC_MINOR,
541 	.name = "goldfish_pipe",
542 	.fops = &goldfish_pipe_fops,
543 };
544 
545 static int goldfish_pipe_probe(struct platform_device *pdev)
546 {
547 	int err;
548 	struct resource *r;
549 	struct goldfish_pipe_dev *dev = pipe_dev;
550 
551 	/* not thread safe, but this should not happen */
552 	WARN_ON(dev->base != NULL);
553 
554 	spin_lock_init(&dev->lock);
555 
556 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
557 	if (r == NULL || resource_size(r) < PAGE_SIZE) {
558 		dev_err(&pdev->dev, "can't allocate i/o page\n");
559 		return -EINVAL;
560 	}
561 	dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
562 	if (dev->base == NULL) {
563 		dev_err(&pdev->dev, "ioremap failed\n");
564 		return -EINVAL;
565 	}
566 
567 	r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
568 	if (r == NULL) {
569 		err = -EINVAL;
570 		goto error;
571 	}
572 	dev->irq = r->start;
573 
574 	err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
575 				IRQF_SHARED, "goldfish_pipe", dev);
576 	if (err) {
577 		dev_err(&pdev->dev, "unable to allocate IRQ\n");
578 		goto error;
579 	}
580 
581 	err = misc_register(&goldfish_pipe_device);
582 	if (err) {
583 		dev_err(&pdev->dev, "unable to register device\n");
584 		goto error;
585 	}
586 	setup_access_params_addr(pdev, dev);
587 	return 0;
588 
589 error:
590 	dev->base = NULL;
591 	return err;
592 }
593 
594 static int goldfish_pipe_remove(struct platform_device *pdev)
595 {
596 	struct goldfish_pipe_dev *dev = pipe_dev;
597 	misc_deregister(&goldfish_pipe_device);
598 	dev->base = NULL;
599 	return 0;
600 }
601 
602 static struct platform_driver goldfish_pipe = {
603 	.probe = goldfish_pipe_probe,
604 	.remove = goldfish_pipe_remove,
605 	.driver = {
606 		.name = "goldfish_pipe"
607 	}
608 };
609 
610 module_platform_driver(goldfish_pipe);
611 MODULE_AUTHOR("David Turner <digit@google.com>");
612 MODULE_LICENSE("GPL");
613