1 /* 2 * Copyright (C) 2012 Intel, Inc. 3 * Copyright (C) 2013 Intel, Inc. 4 * Copyright (C) 2014 Linaro Limited 5 * Copyright (C) 2011-2016 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 /* This source file contains the implementation of a special device driver 19 * that intends to provide a *very* fast communication channel between the 20 * guest system and the QEMU emulator. 21 * 22 * Usage from the guest is simply the following (error handling simplified): 23 * 24 * int fd = open("/dev/qemu_pipe",O_RDWR); 25 * .... write() or read() through the pipe. 26 * 27 * This driver doesn't deal with the exact protocol used during the session. 28 * It is intended to be as simple as something like: 29 * 30 * // do this _just_ after opening the fd to connect to a specific 31 * // emulator service. 32 * const char* msg = "<pipename>"; 33 * if (write(fd, msg, strlen(msg)+1) < 0) { 34 * ... could not connect to <pipename> service 35 * close(fd); 36 * } 37 * 38 * // after this, simply read() and write() to communicate with the 39 * // service. Exact protocol details left as an exercise to the reader. 40 * 41 * This driver is very fast because it doesn't copy any data through 42 * intermediate buffers, since the emulator is capable of translating 43 * guest user addresses into host ones. 44 * 45 * Note that we must however ensure that each user page involved in the 46 * exchange is properly mapped during a transfer. 47 */ 48 49 50 #include <linux/module.h> 51 #include <linux/mod_devicetable.h> 52 #include <linux/interrupt.h> 53 #include <linux/kernel.h> 54 #include <linux/spinlock.h> 55 #include <linux/miscdevice.h> 56 #include <linux/platform_device.h> 57 #include <linux/poll.h> 58 #include <linux/sched.h> 59 #include <linux/bitops.h> 60 #include <linux/slab.h> 61 #include <linux/io.h> 62 #include <linux/goldfish.h> 63 #include <linux/dma-mapping.h> 64 #include <linux/mm.h> 65 #include <linux/acpi.h> 66 67 /* 68 * Update this when something changes in the driver's behavior so the host 69 * can benefit from knowing it 70 */ 71 enum { 72 PIPE_DRIVER_VERSION = 2, 73 PIPE_CURRENT_DEVICE_VERSION = 2 74 }; 75 76 /* 77 * IMPORTANT: The following constants must match the ones used and defined 78 * in external/qemu/hw/goldfish_pipe.c in the Android source tree. 79 */ 80 81 /* List of bitflags returned in status of CMD_POLL command */ 82 enum PipePollFlags { 83 PIPE_POLL_IN = 1 << 0, 84 PIPE_POLL_OUT = 1 << 1, 85 PIPE_POLL_HUP = 1 << 2 86 }; 87 88 /* Possible status values used to signal errors - see goldfish_pipe_error_convert */ 89 enum PipeErrors { 90 PIPE_ERROR_INVAL = -1, 91 PIPE_ERROR_AGAIN = -2, 92 PIPE_ERROR_NOMEM = -3, 93 PIPE_ERROR_IO = -4 94 }; 95 96 /* Bit-flags used to signal events from the emulator */ 97 enum PipeWakeFlags { 98 PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */ 99 PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */ 100 PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */ 101 }; 102 103 /* Bit flags for the 'flags' field */ 104 enum PipeFlagsBits { 105 BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */ 106 BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */ 107 BIT_WAKE_ON_READ = 2, /* want to be woken on reads */ 108 }; 109 110 enum PipeRegs { 111 PIPE_REG_CMD = 0, 112 113 PIPE_REG_SIGNAL_BUFFER_HIGH = 4, 114 PIPE_REG_SIGNAL_BUFFER = 8, 115 PIPE_REG_SIGNAL_BUFFER_COUNT = 12, 116 117 PIPE_REG_OPEN_BUFFER_HIGH = 20, 118 PIPE_REG_OPEN_BUFFER = 24, 119 120 PIPE_REG_VERSION = 36, 121 122 PIPE_REG_GET_SIGNALLED = 48, 123 }; 124 125 enum PipeCmdCode { 126 PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */ 127 PIPE_CMD_CLOSE, 128 PIPE_CMD_POLL, 129 PIPE_CMD_WRITE, 130 PIPE_CMD_WAKE_ON_WRITE, 131 PIPE_CMD_READ, 132 PIPE_CMD_WAKE_ON_READ, 133 134 /* 135 * TODO(zyy): implement a deferred read/write execution to allow 136 * parallel processing of pipe operations on the host. 137 */ 138 PIPE_CMD_WAKE_ON_DONE_IO, 139 }; 140 141 enum { 142 MAX_BUFFERS_PER_COMMAND = 336, 143 MAX_SIGNALLED_PIPES = 64, 144 INITIAL_PIPES_CAPACITY = 64 145 }; 146 147 struct goldfish_pipe_dev; 148 struct goldfish_pipe; 149 struct goldfish_pipe_command; 150 151 /* A per-pipe command structure, shared with the host */ 152 struct goldfish_pipe_command { 153 s32 cmd; /* PipeCmdCode, guest -> host */ 154 s32 id; /* pipe id, guest -> host */ 155 s32 status; /* command execution status, host -> guest */ 156 s32 reserved; /* to pad to 64-bit boundary */ 157 union { 158 /* Parameters for PIPE_CMD_{READ,WRITE} */ 159 struct { 160 /* number of buffers, guest -> host */ 161 u32 buffers_count; 162 /* number of consumed bytes, host -> guest */ 163 s32 consumed_size; 164 /* buffer pointers, guest -> host */ 165 u64 ptrs[MAX_BUFFERS_PER_COMMAND]; 166 /* buffer sizes, guest -> host */ 167 u32 sizes[MAX_BUFFERS_PER_COMMAND]; 168 } rw_params; 169 }; 170 }; 171 172 /* A single signalled pipe information */ 173 struct signalled_pipe_buffer { 174 u32 id; 175 u32 flags; 176 }; 177 178 /* Parameters for the PIPE_CMD_OPEN command */ 179 struct open_command_param { 180 u64 command_buffer_ptr; 181 u32 rw_params_max_count; 182 }; 183 184 /* Device-level set of buffers shared with the host */ 185 struct goldfish_pipe_dev_buffers { 186 struct open_command_param open_command_params; 187 struct signalled_pipe_buffer signalled_pipe_buffers[ 188 MAX_SIGNALLED_PIPES]; 189 }; 190 191 /* This data type models a given pipe instance */ 192 struct goldfish_pipe { 193 /* pipe ID - index into goldfish_pipe_dev::pipes array */ 194 u32 id; 195 /* The wake flags pipe is waiting for 196 * Note: not protected with any lock, uses atomic operations 197 * and barriers to make it thread-safe. 198 */ 199 unsigned long flags; 200 /* wake flags host have signalled, 201 * - protected by goldfish_pipe_dev::lock 202 */ 203 unsigned long signalled_flags; 204 205 /* A pointer to command buffer */ 206 struct goldfish_pipe_command *command_buffer; 207 208 /* doubly linked list of signalled pipes, protected by 209 * goldfish_pipe_dev::lock 210 */ 211 struct goldfish_pipe *prev_signalled; 212 struct goldfish_pipe *next_signalled; 213 214 /* 215 * A pipe's own lock. Protects the following: 216 * - *command_buffer - makes sure a command can safely write its 217 * parameters to the host and read the results back. 218 */ 219 struct mutex lock; 220 221 /* A wake queue for sleeping until host signals an event */ 222 wait_queue_head_t wake_queue; 223 /* Pointer to the parent goldfish_pipe_dev instance */ 224 struct goldfish_pipe_dev *dev; 225 }; 226 227 /* The global driver data. Holds a reference to the i/o page used to 228 * communicate with the emulator, and a wake queue for blocked tasks 229 * waiting to be awoken. 230 */ 231 struct goldfish_pipe_dev { 232 /* 233 * Global device spinlock. Protects the following members: 234 * - pipes, pipes_capacity 235 * - [*pipes, *pipes + pipes_capacity) - array data 236 * - first_signalled_pipe, 237 * goldfish_pipe::prev_signalled, 238 * goldfish_pipe::next_signalled, 239 * goldfish_pipe::signalled_flags - all singnalled-related fields, 240 * in all allocated pipes 241 * - open_command_params - PIPE_CMD_OPEN-related buffers 242 * 243 * It looks like a lot of different fields, but the trick is that 244 * the only operation that happens often is the signalled pipes array 245 * manipulation. That's why it's OK for now to keep the rest of the 246 * fields under the same lock. If we notice too much contention because 247 * of PIPE_CMD_OPEN, then we should add a separate lock there. 248 */ 249 spinlock_t lock; 250 251 /* 252 * Array of the pipes of |pipes_capacity| elements, 253 * indexed by goldfish_pipe::id 254 */ 255 struct goldfish_pipe **pipes; 256 u32 pipes_capacity; 257 258 /* Pointers to the buffers host uses for interaction with this driver */ 259 struct goldfish_pipe_dev_buffers *buffers; 260 261 /* Head of a doubly linked list of signalled pipes */ 262 struct goldfish_pipe *first_signalled_pipe; 263 264 /* Some device-specific data */ 265 int irq; 266 int version; 267 unsigned char __iomem *base; 268 }; 269 270 static struct goldfish_pipe_dev pipe_dev[1] = {}; 271 272 static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) 273 { 274 pipe->command_buffer->cmd = cmd; 275 /* failure by default */ 276 pipe->command_buffer->status = PIPE_ERROR_INVAL; 277 writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); 278 return pipe->command_buffer->status; 279 } 280 281 static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) 282 { 283 int status; 284 285 if (mutex_lock_interruptible(&pipe->lock)) 286 return PIPE_ERROR_IO; 287 status = goldfish_cmd_locked(pipe, cmd); 288 mutex_unlock(&pipe->lock); 289 return status; 290 } 291 292 /* 293 * This function converts an error code returned by the emulator through 294 * the PIPE_REG_STATUS i/o register into a valid negative errno value. 295 */ 296 static int goldfish_pipe_error_convert(int status) 297 { 298 switch (status) { 299 case PIPE_ERROR_AGAIN: 300 return -EAGAIN; 301 case PIPE_ERROR_NOMEM: 302 return -ENOMEM; 303 case PIPE_ERROR_IO: 304 return -EIO; 305 default: 306 return -EINVAL; 307 } 308 } 309 310 static int pin_user_pages(unsigned long first_page, unsigned long last_page, 311 unsigned int last_page_size, int is_write, 312 struct page *pages[MAX_BUFFERS_PER_COMMAND], 313 unsigned int *iter_last_page_size) 314 { 315 int ret; 316 int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; 317 318 if (requested_pages > MAX_BUFFERS_PER_COMMAND) { 319 requested_pages = MAX_BUFFERS_PER_COMMAND; 320 *iter_last_page_size = PAGE_SIZE; 321 } else { 322 *iter_last_page_size = last_page_size; 323 } 324 325 ret = get_user_pages_fast( 326 first_page, requested_pages, !is_write, pages); 327 if (ret <= 0) 328 return -EFAULT; 329 if (ret < requested_pages) 330 *iter_last_page_size = PAGE_SIZE; 331 return ret; 332 333 } 334 335 static void release_user_pages(struct page **pages, int pages_count, 336 int is_write, s32 consumed_size) 337 { 338 int i; 339 340 for (i = 0; i < pages_count; i++) { 341 if (!is_write && consumed_size > 0) 342 set_page_dirty(pages[i]); 343 put_page(pages[i]); 344 } 345 } 346 347 /* Populate the call parameters, merging adjacent pages together */ 348 static void populate_rw_params( 349 struct page **pages, int pages_count, 350 unsigned long address, unsigned long address_end, 351 unsigned long first_page, unsigned long last_page, 352 unsigned int iter_last_page_size, int is_write, 353 struct goldfish_pipe_command *command) 354 { 355 /* 356 * Process the first page separately - it's the only page that 357 * needs special handling for its start address. 358 */ 359 unsigned long xaddr = page_to_phys(pages[0]); 360 unsigned long xaddr_prev = xaddr; 361 int buffer_idx = 0; 362 int i = 1; 363 int size_on_page = first_page == last_page 364 ? (int)(address_end - address) 365 : (PAGE_SIZE - (address & ~PAGE_MASK)); 366 command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK)); 367 command->rw_params.sizes[0] = size_on_page; 368 for (; i < pages_count; ++i) { 369 xaddr = page_to_phys(pages[i]); 370 size_on_page = (i == pages_count - 1) ? 371 iter_last_page_size : PAGE_SIZE; 372 if (xaddr == xaddr_prev + PAGE_SIZE) { 373 command->rw_params.sizes[buffer_idx] += size_on_page; 374 } else { 375 ++buffer_idx; 376 command->rw_params.ptrs[buffer_idx] = (u64)xaddr; 377 command->rw_params.sizes[buffer_idx] = size_on_page; 378 } 379 xaddr_prev = xaddr; 380 } 381 command->rw_params.buffers_count = buffer_idx + 1; 382 } 383 384 static int transfer_max_buffers(struct goldfish_pipe *pipe, 385 unsigned long address, unsigned long address_end, int is_write, 386 unsigned long last_page, unsigned int last_page_size, 387 s32 *consumed_size, int *status) 388 { 389 static struct page *pages[MAX_BUFFERS_PER_COMMAND]; 390 unsigned long first_page = address & PAGE_MASK; 391 unsigned int iter_last_page_size; 392 int pages_count = pin_user_pages(first_page, last_page, 393 last_page_size, is_write, 394 pages, &iter_last_page_size); 395 396 if (pages_count < 0) 397 return pages_count; 398 399 /* Serialize access to the pipe command buffers */ 400 if (mutex_lock_interruptible(&pipe->lock)) 401 return -ERESTARTSYS; 402 403 populate_rw_params(pages, pages_count, address, address_end, 404 first_page, last_page, iter_last_page_size, is_write, 405 pipe->command_buffer); 406 407 /* Transfer the data */ 408 *status = goldfish_cmd_locked(pipe, 409 is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); 410 411 *consumed_size = pipe->command_buffer->rw_params.consumed_size; 412 413 release_user_pages(pages, pages_count, is_write, *consumed_size); 414 415 mutex_unlock(&pipe->lock); 416 417 return 0; 418 } 419 420 static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) 421 { 422 u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; 423 424 set_bit(wakeBit, &pipe->flags); 425 426 /* Tell the emulator we're going to wait for a wake event */ 427 (void)goldfish_cmd(pipe, 428 is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); 429 430 while (test_bit(wakeBit, &pipe->flags)) { 431 if (wait_event_interruptible( 432 pipe->wake_queue, 433 !test_bit(wakeBit, &pipe->flags))) 434 return -ERESTARTSYS; 435 436 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) 437 return -EIO; 438 } 439 440 return 0; 441 } 442 443 static ssize_t goldfish_pipe_read_write(struct file *filp, 444 char __user *buffer, size_t bufflen, int is_write) 445 { 446 struct goldfish_pipe *pipe = filp->private_data; 447 int count = 0, ret = -EINVAL; 448 unsigned long address, address_end, last_page; 449 unsigned int last_page_size; 450 451 /* If the emulator already closed the pipe, no need to go further */ 452 if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))) 453 return -EIO; 454 /* Null reads or writes succeeds */ 455 if (unlikely(bufflen == 0)) 456 return 0; 457 /* Check the buffer range for access */ 458 if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, 459 buffer, bufflen))) 460 return -EFAULT; 461 462 address = (unsigned long)buffer; 463 address_end = address + bufflen; 464 last_page = (address_end - 1) & PAGE_MASK; 465 last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1; 466 467 while (address < address_end) { 468 s32 consumed_size; 469 int status; 470 471 ret = transfer_max_buffers(pipe, address, address_end, is_write, 472 last_page, last_page_size, &consumed_size, 473 &status); 474 if (ret < 0) 475 break; 476 477 if (consumed_size > 0) { 478 /* No matter what's the status, we've transferred 479 * something. 480 */ 481 count += consumed_size; 482 address += consumed_size; 483 } 484 if (status > 0) 485 continue; 486 if (status == 0) { 487 /* EOF */ 488 ret = 0; 489 break; 490 } 491 if (count > 0) { 492 /* 493 * An error occurred, but we already transferred 494 * something on one of the previous iterations. 495 * Just return what we already copied and log this 496 * err. 497 */ 498 if (status != PIPE_ERROR_AGAIN) 499 pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", 500 status, is_write ? "write" : "read"); 501 break; 502 } 503 504 /* 505 * If the error is not PIPE_ERROR_AGAIN, or if we are in 506 * non-blocking mode, just return the error code. 507 */ 508 if (status != PIPE_ERROR_AGAIN || 509 (filp->f_flags & O_NONBLOCK) != 0) { 510 ret = goldfish_pipe_error_convert(status); 511 break; 512 } 513 514 status = wait_for_host_signal(pipe, is_write); 515 if (status < 0) 516 return status; 517 } 518 519 if (count > 0) 520 return count; 521 return ret; 522 } 523 524 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, 525 size_t bufflen, loff_t *ppos) 526 { 527 return goldfish_pipe_read_write(filp, buffer, bufflen, 528 /* is_write */ 0); 529 } 530 531 static ssize_t goldfish_pipe_write(struct file *filp, 532 const char __user *buffer, size_t bufflen, 533 loff_t *ppos) 534 { 535 return goldfish_pipe_read_write(filp, 536 /* cast away the const */(char __user *)buffer, bufflen, 537 /* is_write */ 1); 538 } 539 540 static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) 541 { 542 struct goldfish_pipe *pipe = filp->private_data; 543 __poll_t mask = 0; 544 int status; 545 546 poll_wait(filp, &pipe->wake_queue, wait); 547 548 status = goldfish_cmd(pipe, PIPE_CMD_POLL); 549 if (status < 0) 550 return -ERESTARTSYS; 551 552 if (status & PIPE_POLL_IN) 553 mask |= EPOLLIN | EPOLLRDNORM; 554 if (status & PIPE_POLL_OUT) 555 mask |= EPOLLOUT | EPOLLWRNORM; 556 if (status & PIPE_POLL_HUP) 557 mask |= EPOLLHUP; 558 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) 559 mask |= EPOLLERR; 560 561 return mask; 562 } 563 564 static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, 565 u32 id, u32 flags) 566 { 567 struct goldfish_pipe *pipe; 568 569 if (WARN_ON(id >= dev->pipes_capacity)) 570 return; 571 572 pipe = dev->pipes[id]; 573 if (!pipe) 574 return; 575 pipe->signalled_flags |= flags; 576 577 if (pipe->prev_signalled || pipe->next_signalled 578 || dev->first_signalled_pipe == pipe) 579 return; /* already in the list */ 580 pipe->next_signalled = dev->first_signalled_pipe; 581 if (dev->first_signalled_pipe) 582 dev->first_signalled_pipe->prev_signalled = pipe; 583 dev->first_signalled_pipe = pipe; 584 } 585 586 static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, 587 struct goldfish_pipe *pipe) { 588 if (pipe->prev_signalled) 589 pipe->prev_signalled->next_signalled = pipe->next_signalled; 590 if (pipe->next_signalled) 591 pipe->next_signalled->prev_signalled = pipe->prev_signalled; 592 if (pipe == dev->first_signalled_pipe) 593 dev->first_signalled_pipe = pipe->next_signalled; 594 pipe->prev_signalled = NULL; 595 pipe->next_signalled = NULL; 596 } 597 598 static struct goldfish_pipe *signalled_pipes_pop_front( 599 struct goldfish_pipe_dev *dev, int *wakes) 600 { 601 struct goldfish_pipe *pipe; 602 unsigned long flags; 603 604 spin_lock_irqsave(&dev->lock, flags); 605 606 pipe = dev->first_signalled_pipe; 607 if (pipe) { 608 *wakes = pipe->signalled_flags; 609 pipe->signalled_flags = 0; 610 /* 611 * This is an optimized version of 612 * signalled_pipes_remove_locked() 613 * - We want to make it as fast as possible to 614 * wake the sleeping pipe operations faster. 615 */ 616 dev->first_signalled_pipe = pipe->next_signalled; 617 if (dev->first_signalled_pipe) 618 dev->first_signalled_pipe->prev_signalled = NULL; 619 pipe->next_signalled = NULL; 620 } 621 622 spin_unlock_irqrestore(&dev->lock, flags); 623 return pipe; 624 } 625 626 static void goldfish_interrupt_task(unsigned long unused) 627 { 628 struct goldfish_pipe_dev *dev = pipe_dev; 629 /* Iterate over the signalled pipes and wake them one by one */ 630 struct goldfish_pipe *pipe; 631 int wakes; 632 633 while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) { 634 if (wakes & PIPE_WAKE_CLOSED) { 635 pipe->flags = 1 << BIT_CLOSED_ON_HOST; 636 } else { 637 if (wakes & PIPE_WAKE_READ) 638 clear_bit(BIT_WAKE_ON_READ, &pipe->flags); 639 if (wakes & PIPE_WAKE_WRITE) 640 clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); 641 } 642 /* 643 * wake_up_interruptible() implies a write barrier, so don't 644 * explicitly add another one here. 645 */ 646 wake_up_interruptible(&pipe->wake_queue); 647 } 648 } 649 static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); 650 651 /* 652 * The general idea of the interrupt handling: 653 * 654 * 1. device raises an interrupt if there's at least one signalled pipe 655 * 2. IRQ handler reads the signalled pipes and their count from the device 656 * 3. device writes them into a shared buffer and returns the count 657 * it only resets the IRQ if it has returned all signalled pipes, 658 * otherwise it leaves it raised, so IRQ handler will be called 659 * again for the next chunk 660 * 4. IRQ handler adds all returned pipes to the device's signalled pipes list 661 * 5. IRQ handler launches a tasklet to process the signalled pipes from the 662 * list in a separate context 663 */ 664 static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) 665 { 666 u32 count; 667 u32 i; 668 unsigned long flags; 669 struct goldfish_pipe_dev *dev = dev_id; 670 671 if (dev != pipe_dev) 672 return IRQ_NONE; 673 674 /* Request the signalled pipes from the device */ 675 spin_lock_irqsave(&dev->lock, flags); 676 677 count = readl(dev->base + PIPE_REG_GET_SIGNALLED); 678 if (count == 0) { 679 spin_unlock_irqrestore(&dev->lock, flags); 680 return IRQ_NONE; 681 } 682 if (count > MAX_SIGNALLED_PIPES) 683 count = MAX_SIGNALLED_PIPES; 684 685 for (i = 0; i < count; ++i) 686 signalled_pipes_add_locked(dev, 687 dev->buffers->signalled_pipe_buffers[i].id, 688 dev->buffers->signalled_pipe_buffers[i].flags); 689 690 spin_unlock_irqrestore(&dev->lock, flags); 691 692 tasklet_schedule(&goldfish_interrupt_tasklet); 693 return IRQ_HANDLED; 694 } 695 696 static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) 697 { 698 int id; 699 700 for (id = 0; id < dev->pipes_capacity; ++id) 701 if (!dev->pipes[id]) 702 return id; 703 704 { 705 /* Reallocate the array */ 706 u32 new_capacity = 2 * dev->pipes_capacity; 707 struct goldfish_pipe **pipes = 708 kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); 709 if (!pipes) 710 return -ENOMEM; 711 memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); 712 kfree(dev->pipes); 713 dev->pipes = pipes; 714 id = dev->pipes_capacity; 715 dev->pipes_capacity = new_capacity; 716 } 717 return id; 718 } 719 720 /** 721 * goldfish_pipe_open - open a channel to the AVD 722 * @inode: inode of device 723 * @file: file struct of opener 724 * 725 * Create a new pipe link between the emulator and the use application. 726 * Each new request produces a new pipe. 727 * 728 * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit 729 * right now so this is fine. A move to 64bit will need this addressing 730 */ 731 static int goldfish_pipe_open(struct inode *inode, struct file *file) 732 { 733 struct goldfish_pipe_dev *dev = pipe_dev; 734 unsigned long flags; 735 int id; 736 int status; 737 738 /* Allocate new pipe kernel object */ 739 struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); 740 if (pipe == NULL) 741 return -ENOMEM; 742 743 pipe->dev = dev; 744 mutex_init(&pipe->lock); 745 init_waitqueue_head(&pipe->wake_queue); 746 747 /* 748 * Command buffer needs to be allocated on its own page to make sure 749 * it is physically contiguous in host's address space. 750 */ 751 pipe->command_buffer = 752 (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); 753 if (!pipe->command_buffer) { 754 status = -ENOMEM; 755 goto err_pipe; 756 } 757 758 spin_lock_irqsave(&dev->lock, flags); 759 760 id = get_free_pipe_id_locked(dev); 761 if (id < 0) { 762 status = id; 763 goto err_id_locked; 764 } 765 766 dev->pipes[id] = pipe; 767 pipe->id = id; 768 pipe->command_buffer->id = id; 769 770 /* Now tell the emulator we're opening a new pipe. */ 771 dev->buffers->open_command_params.rw_params_max_count = 772 MAX_BUFFERS_PER_COMMAND; 773 dev->buffers->open_command_params.command_buffer_ptr = 774 (u64)(unsigned long)__pa(pipe->command_buffer); 775 status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); 776 spin_unlock_irqrestore(&dev->lock, flags); 777 if (status < 0) 778 goto err_cmd; 779 /* All is done, save the pipe into the file's private data field */ 780 file->private_data = pipe; 781 return 0; 782 783 err_cmd: 784 spin_lock_irqsave(&dev->lock, flags); 785 dev->pipes[id] = NULL; 786 err_id_locked: 787 spin_unlock_irqrestore(&dev->lock, flags); 788 free_page((unsigned long)pipe->command_buffer); 789 err_pipe: 790 kfree(pipe); 791 return status; 792 } 793 794 static int goldfish_pipe_release(struct inode *inode, struct file *filp) 795 { 796 unsigned long flags; 797 struct goldfish_pipe *pipe = filp->private_data; 798 struct goldfish_pipe_dev *dev = pipe->dev; 799 800 /* The guest is closing the channel, so tell the emulator right now */ 801 (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); 802 803 spin_lock_irqsave(&dev->lock, flags); 804 dev->pipes[pipe->id] = NULL; 805 signalled_pipes_remove_locked(dev, pipe); 806 spin_unlock_irqrestore(&dev->lock, flags); 807 808 filp->private_data = NULL; 809 free_page((unsigned long)pipe->command_buffer); 810 kfree(pipe); 811 return 0; 812 } 813 814 static const struct file_operations goldfish_pipe_fops = { 815 .owner = THIS_MODULE, 816 .read = goldfish_pipe_read, 817 .write = goldfish_pipe_write, 818 .poll = goldfish_pipe_poll, 819 .open = goldfish_pipe_open, 820 .release = goldfish_pipe_release, 821 }; 822 823 static struct miscdevice goldfish_pipe_dev = { 824 .minor = MISC_DYNAMIC_MINOR, 825 .name = "goldfish_pipe", 826 .fops = &goldfish_pipe_fops, 827 }; 828 829 static int goldfish_pipe_device_init(struct platform_device *pdev) 830 { 831 char *page; 832 struct goldfish_pipe_dev *dev = pipe_dev; 833 int err = devm_request_irq(&pdev->dev, dev->irq, 834 goldfish_pipe_interrupt, 835 IRQF_SHARED, "goldfish_pipe", dev); 836 if (err) { 837 dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); 838 return err; 839 } 840 841 err = misc_register(&goldfish_pipe_dev); 842 if (err) { 843 dev_err(&pdev->dev, "unable to register v2 device\n"); 844 return err; 845 } 846 847 dev->first_signalled_pipe = NULL; 848 dev->pipes_capacity = INITIAL_PIPES_CAPACITY; 849 dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), 850 GFP_KERNEL); 851 if (!dev->pipes) 852 return -ENOMEM; 853 854 /* 855 * We're going to pass two buffers, open_command_params and 856 * signalled_pipe_buffers, to the host. This means each of those buffers 857 * needs to be contained in a single physical page. The easiest choice 858 * is to just allocate a page and place the buffers in it. 859 */ 860 if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE)) 861 return -ENOMEM; 862 863 page = (char *)__get_free_page(GFP_KERNEL); 864 if (!page) { 865 kfree(dev->pipes); 866 return -ENOMEM; 867 } 868 dev->buffers = (struct goldfish_pipe_dev_buffers *)page; 869 870 /* Send the buffer addresses to the host */ 871 { 872 u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); 873 874 writel((u32)(unsigned long)(paddr >> 32), 875 dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); 876 writel((u32)(unsigned long)paddr, 877 dev->base + PIPE_REG_SIGNAL_BUFFER); 878 writel((u32)MAX_SIGNALLED_PIPES, 879 dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); 880 881 paddr = __pa(&dev->buffers->open_command_params); 882 writel((u32)(unsigned long)(paddr >> 32), 883 dev->base + PIPE_REG_OPEN_BUFFER_HIGH); 884 writel((u32)(unsigned long)paddr, 885 dev->base + PIPE_REG_OPEN_BUFFER); 886 } 887 return 0; 888 } 889 890 static void goldfish_pipe_device_deinit(struct platform_device *pdev) 891 { 892 struct goldfish_pipe_dev *dev = pipe_dev; 893 894 misc_deregister(&goldfish_pipe_dev); 895 kfree(dev->pipes); 896 free_page((unsigned long)dev->buffers); 897 } 898 899 static int goldfish_pipe_probe(struct platform_device *pdev) 900 { 901 int err; 902 struct resource *r; 903 struct goldfish_pipe_dev *dev = pipe_dev; 904 905 if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE)) 906 return -ENOMEM; 907 908 /* not thread safe, but this should not happen */ 909 WARN_ON(dev->base != NULL); 910 911 spin_lock_init(&dev->lock); 912 913 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 914 if (r == NULL || resource_size(r) < PAGE_SIZE) { 915 dev_err(&pdev->dev, "can't allocate i/o page\n"); 916 return -EINVAL; 917 } 918 dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); 919 if (dev->base == NULL) { 920 dev_err(&pdev->dev, "ioremap failed\n"); 921 return -EINVAL; 922 } 923 924 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 925 if (r == NULL) { 926 err = -EINVAL; 927 goto error; 928 } 929 dev->irq = r->start; 930 931 /* 932 * Exchange the versions with the host device 933 * 934 * Note: v1 driver used to not report its version, so we write it before 935 * reading device version back: this allows the host implementation to 936 * detect the old driver (if there was no version write before read). 937 */ 938 writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); 939 dev->version = readl(dev->base + PIPE_REG_VERSION); 940 if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION)) 941 return -EINVAL; 942 943 err = goldfish_pipe_device_init(pdev); 944 if (!err) 945 return 0; 946 947 error: 948 dev->base = NULL; 949 return err; 950 } 951 952 static int goldfish_pipe_remove(struct platform_device *pdev) 953 { 954 struct goldfish_pipe_dev *dev = pipe_dev; 955 goldfish_pipe_device_deinit(pdev); 956 dev->base = NULL; 957 return 0; 958 } 959 960 static const struct acpi_device_id goldfish_pipe_acpi_match[] = { 961 { "GFSH0003", 0 }, 962 { }, 963 }; 964 MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); 965 966 static const struct of_device_id goldfish_pipe_of_match[] = { 967 { .compatible = "google,android-pipe", }, 968 {}, 969 }; 970 MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); 971 972 static struct platform_driver goldfish_pipe_driver = { 973 .probe = goldfish_pipe_probe, 974 .remove = goldfish_pipe_remove, 975 .driver = { 976 .name = "goldfish_pipe", 977 .of_match_table = goldfish_pipe_of_match, 978 .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), 979 } 980 }; 981 982 module_platform_driver(goldfish_pipe_driver); 983 MODULE_AUTHOR("David Turner <digit@google.com>"); 984 MODULE_LICENSE("GPL"); 985