1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. 4 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/sched/signal.h> 10 #include <linux/types.h> 11 #include <linux/errno.h> 12 #include <linux/cdev.h> 13 #include <linux/fs.h> 14 #include <linux/device.h> 15 #include <linux/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/pagemap.h> 18 #include <linux/bug.h> 19 #include <linux/completion.h> 20 #include <linux/list.h> 21 #include <linux/of.h> 22 #include <linux/platform_device.h> 23 #include <linux/compat.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/rcupdate.h> 26 #include <linux/delay.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/io.h> 30 #include <linux/uaccess.h> 31 #include <soc/bcm2835/raspberrypi-firmware.h> 32 33 #include "vchiq_core.h" 34 #include "vchiq_ioctl.h" 35 #include "vchiq_arm.h" 36 #include "vchiq_debugfs.h" 37 #include "vchiq_connected.h" 38 #include "vchiq_pagelist.h" 39 40 #define DEVICE_NAME "vchiq" 41 42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32) 43 44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2) 45 46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0 47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1 48 49 #define BELL0 0x00 50 #define BELL2 0x08 51 52 #define ARM_DS_ACTIVE BIT(2) 53 54 /* Override the default prefix, which would be vchiq_arm (from the filename) */ 55 #undef MODULE_PARAM_PREFIX 56 #define MODULE_PARAM_PREFIX DEVICE_NAME "." 57 58 #define KEEPALIVE_VER 1 59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER 60 61 /* Run time control of log level, based on KERN_XXX level. */ 62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT; 63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR; 64 65 DEFINE_SPINLOCK(msg_queue_spinlock); 66 struct vchiq_state g_state; 67 68 static struct platform_device *bcm2835_camera; 69 static struct platform_device *bcm2835_audio; 70 71 struct vchiq_drvdata { 72 const unsigned int cache_line_size; 73 struct rpi_firmware *fw; 74 }; 75 76 static struct vchiq_drvdata bcm2835_drvdata = { 77 .cache_line_size = 32, 78 }; 79 80 static struct vchiq_drvdata bcm2836_drvdata = { 81 .cache_line_size = 64, 82 }; 83 84 struct vchiq_arm_state { 85 /* Keepalive-related data */ 86 struct task_struct *ka_thread; 87 struct completion ka_evt; 88 atomic_t ka_use_count; 89 atomic_t ka_use_ack_count; 90 atomic_t ka_release_count; 91 92 rwlock_t susp_res_lock; 93 94 struct vchiq_state *state; 95 96 /* 97 * Global use count for videocore. 98 * This is equal to the sum of the use counts for all services. When 99 * this hits zero the videocore suspend procedure will be initiated. 100 */ 101 int videocore_use_count; 102 103 /* 104 * Use count to track requests from videocore peer. 105 * This use count is not associated with a service, so needs to be 106 * tracked separately with the state. 107 */ 108 int peer_use_count; 109 110 /* 111 * Flag to indicate that the first vchiq connect has made it through. 112 * This means that both sides should be fully ready, and we should 113 * be able to suspend after this point. 114 */ 115 int first_connect; 116 }; 117 118 struct vchiq_2835_state { 119 int inited; 120 struct vchiq_arm_state arm_state; 121 }; 122 123 struct vchiq_pagelist_info { 124 struct pagelist *pagelist; 125 size_t pagelist_buffer_size; 126 dma_addr_t dma_addr; 127 enum dma_data_direction dma_dir; 128 unsigned int num_pages; 129 unsigned int pages_need_release; 130 struct page **pages; 131 struct scatterlist *scatterlist; 132 unsigned int scatterlist_mapped; 133 }; 134 135 static void __iomem *g_regs; 136 /* This value is the size of the L2 cache lines as understood by the 137 * VPU firmware, which determines the required alignment of the 138 * offsets/sizes in pagelists. 139 * 140 * Modern VPU firmware looks for a DT "cache-line-size" property in 141 * the VCHIQ node and will overwrite it with the actual L2 cache size, 142 * which the kernel must then respect. That property was rejected 143 * upstream, so we have to use the VPU firmware's compatibility value 144 * of 32. 145 */ 146 static unsigned int g_cache_line_size = 32; 147 static unsigned int g_fragments_size; 148 static char *g_fragments_base; 149 static char *g_free_fragments; 150 static struct semaphore g_free_fragments_sema; 151 static struct device *g_dev; 152 153 static DEFINE_SEMAPHORE(g_free_fragments_mutex); 154 155 static enum vchiq_status 156 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, 157 unsigned int size, enum vchiq_bulk_dir dir); 158 159 static irqreturn_t 160 vchiq_doorbell_irq(int irq, void *dev_id) 161 { 162 struct vchiq_state *state = dev_id; 163 irqreturn_t ret = IRQ_NONE; 164 unsigned int status; 165 166 /* Read (and clear) the doorbell */ 167 status = readl(g_regs + BELL0); 168 169 if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */ 170 remote_event_pollall(state); 171 ret = IRQ_HANDLED; 172 } 173 174 return ret; 175 } 176 177 static void 178 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo) 179 { 180 if (pagelistinfo->scatterlist_mapped) { 181 dma_unmap_sg(g_dev, pagelistinfo->scatterlist, 182 pagelistinfo->num_pages, pagelistinfo->dma_dir); 183 } 184 185 if (pagelistinfo->pages_need_release) 186 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); 187 188 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size, 189 pagelistinfo->pagelist, pagelistinfo->dma_addr); 190 } 191 192 static inline bool 193 is_adjacent_block(u32 *addrs, u32 addr, unsigned int k) 194 { 195 u32 tmp; 196 197 if (!k) 198 return false; 199 200 tmp = (addrs[k - 1] & PAGE_MASK) + 201 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT); 202 203 return tmp == (addr & PAGE_MASK); 204 } 205 206 /* There is a potential problem with partial cache lines (pages?) 207 * at the ends of the block when reading. If the CPU accessed anything in 208 * the same line (page?) then it may have pulled old data into the cache, 209 * obscuring the new data underneath. We can solve this by transferring the 210 * partial cache lines separately, and allowing the ARM to copy into the 211 * cached area. 212 */ 213 214 static struct vchiq_pagelist_info * 215 create_pagelist(char *buf, char __user *ubuf, 216 size_t count, unsigned short type) 217 { 218 struct pagelist *pagelist; 219 struct vchiq_pagelist_info *pagelistinfo; 220 struct page **pages; 221 u32 *addrs; 222 unsigned int num_pages, offset, i, k; 223 int actual_pages; 224 size_t pagelist_size; 225 struct scatterlist *scatterlist, *sg; 226 int dma_buffers; 227 dma_addr_t dma_addr; 228 229 if (count >= INT_MAX - PAGE_SIZE) 230 return NULL; 231 232 if (buf) 233 offset = (uintptr_t)buf & (PAGE_SIZE - 1); 234 else 235 offset = (uintptr_t)ubuf & (PAGE_SIZE - 1); 236 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); 237 238 if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) - 239 sizeof(struct vchiq_pagelist_info)) / 240 (sizeof(u32) + sizeof(pages[0]) + 241 sizeof(struct scatterlist))) 242 return NULL; 243 244 pagelist_size = sizeof(struct pagelist) + 245 (num_pages * sizeof(u32)) + 246 (num_pages * sizeof(pages[0]) + 247 (num_pages * sizeof(struct scatterlist))) + 248 sizeof(struct vchiq_pagelist_info); 249 250 /* Allocate enough storage to hold the page pointers and the page 251 * list 252 */ 253 pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr, 254 GFP_KERNEL); 255 256 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); 257 258 if (!pagelist) 259 return NULL; 260 261 addrs = pagelist->addrs; 262 pages = (struct page **)(addrs + num_pages); 263 scatterlist = (struct scatterlist *)(pages + num_pages); 264 pagelistinfo = (struct vchiq_pagelist_info *) 265 (scatterlist + num_pages); 266 267 pagelist->length = count; 268 pagelist->type = type; 269 pagelist->offset = offset; 270 271 /* Populate the fields of the pagelistinfo structure */ 272 pagelistinfo->pagelist = pagelist; 273 pagelistinfo->pagelist_buffer_size = pagelist_size; 274 pagelistinfo->dma_addr = dma_addr; 275 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ? 276 DMA_TO_DEVICE : DMA_FROM_DEVICE; 277 pagelistinfo->num_pages = num_pages; 278 pagelistinfo->pages_need_release = 0; 279 pagelistinfo->pages = pages; 280 pagelistinfo->scatterlist = scatterlist; 281 pagelistinfo->scatterlist_mapped = 0; 282 283 if (buf) { 284 unsigned long length = count; 285 unsigned int off = offset; 286 287 for (actual_pages = 0; actual_pages < num_pages; 288 actual_pages++) { 289 struct page *pg = 290 vmalloc_to_page((buf + 291 (actual_pages * PAGE_SIZE))); 292 size_t bytes = PAGE_SIZE - off; 293 294 if (!pg) { 295 cleanup_pagelistinfo(pagelistinfo); 296 return NULL; 297 } 298 299 if (bytes > length) 300 bytes = length; 301 pages[actual_pages] = pg; 302 length -= bytes; 303 off = 0; 304 } 305 /* do not try and release vmalloc pages */ 306 } else { 307 actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages, 308 type == PAGELIST_READ, pages); 309 310 if (actual_pages != num_pages) { 311 vchiq_log_info(vchiq_arm_log_level, 312 "%s - only %d/%d pages locked", 313 __func__, actual_pages, num_pages); 314 315 /* This is probably due to the process being killed */ 316 if (actual_pages > 0) 317 unpin_user_pages(pages, actual_pages); 318 cleanup_pagelistinfo(pagelistinfo); 319 return NULL; 320 } 321 /* release user pages */ 322 pagelistinfo->pages_need_release = 1; 323 } 324 325 /* 326 * Initialize the scatterlist so that the magic cookie 327 * is filled if debugging is enabled 328 */ 329 sg_init_table(scatterlist, num_pages); 330 /* Now set the pages for each scatterlist */ 331 for (i = 0; i < num_pages; i++) { 332 unsigned int len = PAGE_SIZE - offset; 333 334 if (len > count) 335 len = count; 336 sg_set_page(scatterlist + i, pages[i], len, offset); 337 offset = 0; 338 count -= len; 339 } 340 341 dma_buffers = dma_map_sg(g_dev, 342 scatterlist, 343 num_pages, 344 pagelistinfo->dma_dir); 345 346 if (dma_buffers == 0) { 347 cleanup_pagelistinfo(pagelistinfo); 348 return NULL; 349 } 350 351 pagelistinfo->scatterlist_mapped = 1; 352 353 /* Combine adjacent blocks for performance */ 354 k = 0; 355 for_each_sg(scatterlist, sg, dma_buffers, i) { 356 u32 len = sg_dma_len(sg); 357 u32 addr = sg_dma_address(sg); 358 359 /* Note: addrs is the address + page_count - 1 360 * The firmware expects blocks after the first to be page- 361 * aligned and a multiple of the page size 362 */ 363 WARN_ON(len == 0); 364 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK)); 365 WARN_ON(i && (addr & ~PAGE_MASK)); 366 if (is_adjacent_block(addrs, addr, k)) 367 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT); 368 else 369 addrs[k++] = (addr & PAGE_MASK) | 370 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1); 371 } 372 373 /* Partial cache lines (fragments) require special measures */ 374 if ((type == PAGELIST_READ) && 375 ((pagelist->offset & (g_cache_line_size - 1)) || 376 ((pagelist->offset + pagelist->length) & 377 (g_cache_line_size - 1)))) { 378 char *fragments; 379 380 if (down_interruptible(&g_free_fragments_sema)) { 381 cleanup_pagelistinfo(pagelistinfo); 382 return NULL; 383 } 384 385 WARN_ON(!g_free_fragments); 386 387 down(&g_free_fragments_mutex); 388 fragments = g_free_fragments; 389 WARN_ON(!fragments); 390 g_free_fragments = *(char **)g_free_fragments; 391 up(&g_free_fragments_mutex); 392 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + 393 (fragments - g_fragments_base) / g_fragments_size; 394 } 395 396 return pagelistinfo; 397 } 398 399 static void 400 free_pagelist(struct vchiq_pagelist_info *pagelistinfo, 401 int actual) 402 { 403 struct pagelist *pagelist = pagelistinfo->pagelist; 404 struct page **pages = pagelistinfo->pages; 405 unsigned int num_pages = pagelistinfo->num_pages; 406 407 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d", 408 __func__, pagelistinfo->pagelist, actual); 409 410 /* 411 * NOTE: dma_unmap_sg must be called before the 412 * cpu can touch any of the data/pages. 413 */ 414 dma_unmap_sg(g_dev, pagelistinfo->scatterlist, 415 pagelistinfo->num_pages, pagelistinfo->dma_dir); 416 pagelistinfo->scatterlist_mapped = 0; 417 418 /* Deal with any partial cache lines (fragments) */ 419 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) { 420 char *fragments = g_fragments_base + 421 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) * 422 g_fragments_size; 423 int head_bytes, tail_bytes; 424 425 head_bytes = (g_cache_line_size - pagelist->offset) & 426 (g_cache_line_size - 1); 427 tail_bytes = (pagelist->offset + actual) & 428 (g_cache_line_size - 1); 429 430 if ((actual >= 0) && (head_bytes != 0)) { 431 if (head_bytes > actual) 432 head_bytes = actual; 433 434 memcpy_to_page(pages[0], 435 pagelist->offset, 436 fragments, 437 head_bytes); 438 } 439 if ((actual >= 0) && (head_bytes < actual) && 440 (tail_bytes != 0)) 441 memcpy_to_page(pages[num_pages - 1], 442 (pagelist->offset + actual) & 443 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1), 444 fragments + g_cache_line_size, 445 tail_bytes); 446 447 down(&g_free_fragments_mutex); 448 *(char **)fragments = g_free_fragments; 449 g_free_fragments = fragments; 450 up(&g_free_fragments_mutex); 451 up(&g_free_fragments_sema); 452 } 453 454 /* Need to mark all the pages dirty. */ 455 if (pagelist->type != PAGELIST_WRITE && 456 pagelistinfo->pages_need_release) { 457 unsigned int i; 458 459 for (i = 0; i < num_pages; i++) 460 set_page_dirty(pages[i]); 461 } 462 463 cleanup_pagelistinfo(pagelistinfo); 464 } 465 466 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state) 467 { 468 struct device *dev = &pdev->dev; 469 struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev); 470 struct rpi_firmware *fw = drvdata->fw; 471 struct vchiq_slot_zero *vchiq_slot_zero; 472 void *slot_mem; 473 dma_addr_t slot_phys; 474 u32 channelbase; 475 int slot_mem_size, frag_mem_size; 476 int err, irq, i; 477 478 /* 479 * VCHI messages between the CPU and firmware use 480 * 32-bit bus addresses. 481 */ 482 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 483 484 if (err < 0) 485 return err; 486 487 g_cache_line_size = drvdata->cache_line_size; 488 g_fragments_size = 2 * g_cache_line_size; 489 490 /* Allocate space for the channels in coherent memory */ 491 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE); 492 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS); 493 494 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size, 495 &slot_phys, GFP_KERNEL); 496 if (!slot_mem) { 497 dev_err(dev, "could not allocate DMA memory\n"); 498 return -ENOMEM; 499 } 500 501 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0); 502 503 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size); 504 if (!vchiq_slot_zero) 505 return -EINVAL; 506 507 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] = 508 (int)slot_phys + slot_mem_size; 509 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] = 510 MAX_FRAGMENTS; 511 512 g_fragments_base = (char *)slot_mem + slot_mem_size; 513 514 g_free_fragments = g_fragments_base; 515 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) { 516 *(char **)&g_fragments_base[i * g_fragments_size] = 517 &g_fragments_base[(i + 1) * g_fragments_size]; 518 } 519 *(char **)&g_fragments_base[i * g_fragments_size] = NULL; 520 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS); 521 522 err = vchiq_init_state(state, vchiq_slot_zero); 523 if (err) 524 return err; 525 526 g_regs = devm_platform_ioremap_resource(pdev, 0); 527 if (IS_ERR(g_regs)) 528 return PTR_ERR(g_regs); 529 530 irq = platform_get_irq(pdev, 0); 531 if (irq <= 0) 532 return irq; 533 534 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL, 535 "VCHIQ doorbell", state); 536 if (err) { 537 dev_err(dev, "failed to register irq=%d\n", irq); 538 return err; 539 } 540 541 /* Send the base address of the slots to VideoCore */ 542 channelbase = slot_phys; 543 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT, 544 &channelbase, sizeof(channelbase)); 545 if (err || channelbase) { 546 dev_err(dev, "failed to set channelbase\n"); 547 return err ? : -ENXIO; 548 } 549 550 g_dev = dev; 551 vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)", 552 vchiq_slot_zero, &slot_phys); 553 554 vchiq_call_connected_callbacks(); 555 556 return 0; 557 } 558 559 static void 560 vchiq_arm_init_state(struct vchiq_state *state, 561 struct vchiq_arm_state *arm_state) 562 { 563 if (arm_state) { 564 rwlock_init(&arm_state->susp_res_lock); 565 566 init_completion(&arm_state->ka_evt); 567 atomic_set(&arm_state->ka_use_count, 0); 568 atomic_set(&arm_state->ka_use_ack_count, 0); 569 atomic_set(&arm_state->ka_release_count, 0); 570 571 arm_state->state = state; 572 arm_state->first_connect = 0; 573 } 574 } 575 576 int 577 vchiq_platform_init_state(struct vchiq_state *state) 578 { 579 struct vchiq_2835_state *platform_state; 580 581 state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL); 582 if (!state->platform_state) 583 return -ENOMEM; 584 585 platform_state = (struct vchiq_2835_state *)state->platform_state; 586 587 platform_state->inited = 1; 588 vchiq_arm_init_state(state, &platform_state->arm_state); 589 590 return 0; 591 } 592 593 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state) 594 { 595 struct vchiq_2835_state *platform_state; 596 597 platform_state = (struct vchiq_2835_state *)state->platform_state; 598 599 WARN_ON_ONCE(!platform_state->inited); 600 601 return &platform_state->arm_state; 602 } 603 604 void 605 remote_event_signal(struct remote_event *event) 606 { 607 wmb(); 608 609 event->fired = 1; 610 611 dsb(sy); /* data barrier operation */ 612 613 if (event->armed) 614 writel(0, g_regs + BELL2); /* trigger vc interrupt */ 615 } 616 617 int 618 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, 619 void __user *uoffset, int size, int dir) 620 { 621 struct vchiq_pagelist_info *pagelistinfo; 622 623 pagelistinfo = create_pagelist(offset, uoffset, size, 624 (dir == VCHIQ_BULK_RECEIVE) 625 ? PAGELIST_READ 626 : PAGELIST_WRITE); 627 628 if (!pagelistinfo) 629 return -ENOMEM; 630 631 bulk->data = pagelistinfo->dma_addr; 632 633 /* 634 * Store the pagelistinfo address in remote_data, 635 * which isn't used by the slave. 636 */ 637 bulk->remote_data = pagelistinfo; 638 639 return 0; 640 } 641 642 void 643 vchiq_complete_bulk(struct vchiq_bulk *bulk) 644 { 645 if (bulk && bulk->remote_data && bulk->actual) 646 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data, 647 bulk->actual); 648 } 649 650 int vchiq_dump_platform_state(void *dump_context) 651 { 652 char buf[80]; 653 int len; 654 655 len = snprintf(buf, sizeof(buf), " Platform: 2835 (VC master)"); 656 return vchiq_dump(dump_context, buf, len + 1); 657 } 658 659 #define VCHIQ_INIT_RETRIES 10 660 int vchiq_initialise(struct vchiq_instance **instance_out) 661 { 662 struct vchiq_state *state; 663 struct vchiq_instance *instance = NULL; 664 int i, ret; 665 666 /* 667 * VideoCore may not be ready due to boot up timing. 668 * It may never be ready if kernel and firmware are mismatched,so don't 669 * block forever. 670 */ 671 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) { 672 state = vchiq_get_state(); 673 if (state) 674 break; 675 usleep_range(500, 600); 676 } 677 if (i == VCHIQ_INIT_RETRIES) { 678 vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__); 679 ret = -ENOTCONN; 680 goto failed; 681 } else if (i > 0) { 682 vchiq_log_warning(vchiq_core_log_level, 683 "%s: videocore initialized after %d retries\n", __func__, i); 684 } 685 686 instance = kzalloc(sizeof(*instance), GFP_KERNEL); 687 if (!instance) { 688 vchiq_log_error(vchiq_core_log_level, 689 "%s: error allocating vchiq instance\n", __func__); 690 ret = -ENOMEM; 691 goto failed; 692 } 693 694 instance->connected = 0; 695 instance->state = state; 696 mutex_init(&instance->bulk_waiter_list_mutex); 697 INIT_LIST_HEAD(&instance->bulk_waiter_list); 698 699 *instance_out = instance; 700 701 ret = 0; 702 703 failed: 704 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret); 705 706 return ret; 707 } 708 EXPORT_SYMBOL(vchiq_initialise); 709 710 void free_bulk_waiter(struct vchiq_instance *instance) 711 { 712 struct bulk_waiter_node *waiter, *next; 713 714 list_for_each_entry_safe(waiter, next, 715 &instance->bulk_waiter_list, list) { 716 list_del(&waiter->list); 717 vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d", 718 waiter, waiter->pid); 719 kfree(waiter); 720 } 721 } 722 723 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance) 724 { 725 enum vchiq_status status = VCHIQ_SUCCESS; 726 struct vchiq_state *state = instance->state; 727 728 if (mutex_lock_killable(&state->mutex)) 729 return VCHIQ_RETRY; 730 731 /* Remove all services */ 732 vchiq_shutdown_internal(state, instance); 733 734 mutex_unlock(&state->mutex); 735 736 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status); 737 738 free_bulk_waiter(instance); 739 kfree(instance); 740 741 return status; 742 } 743 EXPORT_SYMBOL(vchiq_shutdown); 744 745 static int vchiq_is_connected(struct vchiq_instance *instance) 746 { 747 return instance->connected; 748 } 749 750 enum vchiq_status vchiq_connect(struct vchiq_instance *instance) 751 { 752 enum vchiq_status status; 753 struct vchiq_state *state = instance->state; 754 755 if (mutex_lock_killable(&state->mutex)) { 756 vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__); 757 status = VCHIQ_RETRY; 758 goto failed; 759 } 760 status = vchiq_connect_internal(state, instance); 761 762 if (status == VCHIQ_SUCCESS) 763 instance->connected = 1; 764 765 mutex_unlock(&state->mutex); 766 767 failed: 768 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status); 769 770 return status; 771 } 772 EXPORT_SYMBOL(vchiq_connect); 773 774 static enum vchiq_status 775 vchiq_add_service(struct vchiq_instance *instance, 776 const struct vchiq_service_params_kernel *params, 777 unsigned int *phandle) 778 { 779 enum vchiq_status status; 780 struct vchiq_state *state = instance->state; 781 struct vchiq_service *service = NULL; 782 int srvstate; 783 784 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 785 786 srvstate = vchiq_is_connected(instance) 787 ? VCHIQ_SRVSTATE_LISTENING 788 : VCHIQ_SRVSTATE_HIDDEN; 789 790 service = vchiq_add_service_internal(state, params, srvstate, instance, NULL); 791 792 if (service) { 793 *phandle = service->handle; 794 status = VCHIQ_SUCCESS; 795 } else { 796 status = VCHIQ_ERROR; 797 } 798 799 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status); 800 801 return status; 802 } 803 804 enum vchiq_status 805 vchiq_open_service(struct vchiq_instance *instance, 806 const struct vchiq_service_params_kernel *params, 807 unsigned int *phandle) 808 { 809 enum vchiq_status status = VCHIQ_ERROR; 810 struct vchiq_state *state = instance->state; 811 struct vchiq_service *service = NULL; 812 813 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 814 815 if (!vchiq_is_connected(instance)) 816 goto failed; 817 818 service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL); 819 820 if (service) { 821 *phandle = service->handle; 822 status = vchiq_open_service_internal(service, current->pid); 823 if (status != VCHIQ_SUCCESS) { 824 vchiq_remove_service(service->handle); 825 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 826 } 827 } 828 829 failed: 830 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status); 831 832 return status; 833 } 834 EXPORT_SYMBOL(vchiq_open_service); 835 836 enum vchiq_status 837 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size, 838 void *userdata, enum vchiq_bulk_mode mode) 839 { 840 enum vchiq_status status; 841 842 while (1) { 843 switch (mode) { 844 case VCHIQ_BULK_MODE_NOCALLBACK: 845 case VCHIQ_BULK_MODE_CALLBACK: 846 status = vchiq_bulk_transfer(handle, 847 (void *)data, NULL, 848 size, userdata, mode, 849 VCHIQ_BULK_TRANSMIT); 850 break; 851 case VCHIQ_BULK_MODE_BLOCKING: 852 status = vchiq_blocking_bulk_transfer(handle, (void *)data, size, 853 VCHIQ_BULK_TRANSMIT); 854 break; 855 default: 856 return VCHIQ_ERROR; 857 } 858 859 /* 860 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need 861 * to implement a retry mechanism since this function is 862 * supposed to block until queued 863 */ 864 if (status != VCHIQ_RETRY) 865 break; 866 867 msleep(1); 868 } 869 870 return status; 871 } 872 EXPORT_SYMBOL(vchiq_bulk_transmit); 873 874 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data, 875 unsigned int size, void *userdata, 876 enum vchiq_bulk_mode mode) 877 { 878 enum vchiq_status status; 879 880 while (1) { 881 switch (mode) { 882 case VCHIQ_BULK_MODE_NOCALLBACK: 883 case VCHIQ_BULK_MODE_CALLBACK: 884 status = vchiq_bulk_transfer(handle, data, NULL, 885 size, userdata, 886 mode, VCHIQ_BULK_RECEIVE); 887 break; 888 case VCHIQ_BULK_MODE_BLOCKING: 889 status = vchiq_blocking_bulk_transfer(handle, (void *)data, size, 890 VCHIQ_BULK_RECEIVE); 891 break; 892 default: 893 return VCHIQ_ERROR; 894 } 895 896 /* 897 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need 898 * to implement a retry mechanism since this function is 899 * supposed to block until queued 900 */ 901 if (status != VCHIQ_RETRY) 902 break; 903 904 msleep(1); 905 } 906 907 return status; 908 } 909 EXPORT_SYMBOL(vchiq_bulk_receive); 910 911 static enum vchiq_status 912 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size, 913 enum vchiq_bulk_dir dir) 914 { 915 struct vchiq_instance *instance; 916 struct vchiq_service *service; 917 enum vchiq_status status; 918 struct bulk_waiter_node *waiter = NULL, *iter; 919 920 service = find_service_by_handle(handle); 921 if (!service) 922 return VCHIQ_ERROR; 923 924 instance = service->instance; 925 926 vchiq_service_put(service); 927 928 mutex_lock(&instance->bulk_waiter_list_mutex); 929 list_for_each_entry(iter, &instance->bulk_waiter_list, list) { 930 if (iter->pid == current->pid) { 931 list_del(&iter->list); 932 waiter = iter; 933 break; 934 } 935 } 936 mutex_unlock(&instance->bulk_waiter_list_mutex); 937 938 if (waiter) { 939 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; 940 941 if (bulk) { 942 /* This thread has an outstanding bulk transfer. */ 943 /* FIXME: why compare a dma address to a pointer? */ 944 if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) { 945 /* 946 * This is not a retry of the previous one. 947 * Cancel the signal when the transfer completes. 948 */ 949 spin_lock(&bulk_waiter_spinlock); 950 bulk->userdata = NULL; 951 spin_unlock(&bulk_waiter_spinlock); 952 } 953 } 954 } else { 955 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); 956 if (!waiter) { 957 vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__); 958 return VCHIQ_ERROR; 959 } 960 } 961 962 status = vchiq_bulk_transfer(handle, data, NULL, size, 963 &waiter->bulk_waiter, 964 VCHIQ_BULK_MODE_BLOCKING, dir); 965 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) { 966 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; 967 968 if (bulk) { 969 /* Cancel the signal when the transfer completes. */ 970 spin_lock(&bulk_waiter_spinlock); 971 bulk->userdata = NULL; 972 spin_unlock(&bulk_waiter_spinlock); 973 } 974 kfree(waiter); 975 } else { 976 waiter->pid = current->pid; 977 mutex_lock(&instance->bulk_waiter_list_mutex); 978 list_add(&waiter->list, &instance->bulk_waiter_list); 979 mutex_unlock(&instance->bulk_waiter_list_mutex); 980 vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter, 981 current->pid); 982 } 983 984 return status; 985 } 986 987 static enum vchiq_status 988 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason, 989 struct vchiq_header *header, struct user_service *user_service, 990 void *bulk_userdata) 991 { 992 struct vchiq_completion_data_kernel *completion; 993 int insert; 994 995 DEBUG_INITIALISE(g_state.local); 996 997 insert = instance->completion_insert; 998 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) { 999 /* Out of space - wait for the client */ 1000 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1001 vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__); 1002 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT); 1003 if (wait_for_completion_interruptible(&instance->remove_event)) { 1004 vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted"); 1005 return VCHIQ_RETRY; 1006 } else if (instance->closing) { 1007 vchiq_log_info(vchiq_arm_log_level, "service_callback closing"); 1008 return VCHIQ_SUCCESS; 1009 } 1010 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1011 } 1012 1013 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)]; 1014 1015 completion->header = header; 1016 completion->reason = reason; 1017 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */ 1018 completion->service_userdata = user_service->service; 1019 completion->bulk_userdata = bulk_userdata; 1020 1021 if (reason == VCHIQ_SERVICE_CLOSED) { 1022 /* 1023 * Take an extra reference, to be held until 1024 * this CLOSED notification is delivered. 1025 */ 1026 vchiq_service_get(user_service->service); 1027 if (instance->use_close_delivered) 1028 user_service->close_pending = 1; 1029 } 1030 1031 /* 1032 * A write barrier is needed here to ensure that the entire completion 1033 * record is written out before the insert point. 1034 */ 1035 wmb(); 1036 1037 if (reason == VCHIQ_MESSAGE_AVAILABLE) 1038 user_service->message_available_pos = insert; 1039 1040 insert++; 1041 instance->completion_insert = insert; 1042 1043 complete(&instance->insert_event); 1044 1045 return VCHIQ_SUCCESS; 1046 } 1047 1048 enum vchiq_status 1049 service_callback(enum vchiq_reason reason, struct vchiq_header *header, 1050 unsigned int handle, void *bulk_userdata) 1051 { 1052 /* 1053 * How do we ensure the callback goes to the right client? 1054 * The service_user data points to a user_service record 1055 * containing the original callback and the user state structure, which 1056 * contains a circular buffer for completion records. 1057 */ 1058 struct user_service *user_service; 1059 struct vchiq_service *service; 1060 struct vchiq_instance *instance; 1061 bool skip_completion = false; 1062 1063 DEBUG_INITIALISE(g_state.local); 1064 1065 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1066 1067 rcu_read_lock(); 1068 service = handle_to_service(handle); 1069 if (WARN_ON(!service)) { 1070 rcu_read_unlock(); 1071 return VCHIQ_SUCCESS; 1072 } 1073 1074 user_service = (struct user_service *)service->base.userdata; 1075 instance = user_service->instance; 1076 1077 if (!instance || instance->closing) { 1078 rcu_read_unlock(); 1079 return VCHIQ_SUCCESS; 1080 } 1081 1082 /* 1083 * As hopping around different synchronization mechanism, 1084 * taking an extra reference results in simpler implementation. 1085 */ 1086 vchiq_service_get(service); 1087 rcu_read_unlock(); 1088 1089 vchiq_log_trace(vchiq_arm_log_level, 1090 "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx", 1091 __func__, (unsigned long)user_service, service->localport, 1092 user_service->userdata, reason, (unsigned long)header, 1093 (unsigned long)instance, (unsigned long)bulk_userdata); 1094 1095 if (header && user_service->is_vchi) { 1096 spin_lock(&msg_queue_spinlock); 1097 while (user_service->msg_insert == 1098 (user_service->msg_remove + MSG_QUEUE_SIZE)) { 1099 spin_unlock(&msg_queue_spinlock); 1100 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1101 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT); 1102 vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__); 1103 /* 1104 * If there is no MESSAGE_AVAILABLE in the completion 1105 * queue, add one 1106 */ 1107 if ((user_service->message_available_pos - 1108 instance->completion_remove) < 0) { 1109 enum vchiq_status status; 1110 1111 vchiq_log_info(vchiq_arm_log_level, 1112 "Inserting extra MESSAGE_AVAILABLE"); 1113 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1114 status = add_completion(instance, reason, NULL, user_service, 1115 bulk_userdata); 1116 if (status != VCHIQ_SUCCESS) { 1117 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1118 vchiq_service_put(service); 1119 return status; 1120 } 1121 } 1122 1123 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1124 if (wait_for_completion_interruptible(&user_service->remove_event)) { 1125 vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__); 1126 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1127 vchiq_service_put(service); 1128 return VCHIQ_RETRY; 1129 } else if (instance->closing) { 1130 vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__); 1131 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1132 vchiq_service_put(service); 1133 return VCHIQ_ERROR; 1134 } 1135 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1136 spin_lock(&msg_queue_spinlock); 1137 } 1138 1139 user_service->msg_queue[user_service->msg_insert & 1140 (MSG_QUEUE_SIZE - 1)] = header; 1141 user_service->msg_insert++; 1142 1143 /* 1144 * If there is a thread waiting in DEQUEUE_MESSAGE, or if 1145 * there is a MESSAGE_AVAILABLE in the completion queue then 1146 * bypass the completion queue. 1147 */ 1148 if (((user_service->message_available_pos - 1149 instance->completion_remove) >= 0) || 1150 user_service->dequeue_pending) { 1151 user_service->dequeue_pending = 0; 1152 skip_completion = true; 1153 } 1154 1155 spin_unlock(&msg_queue_spinlock); 1156 complete(&user_service->insert_event); 1157 1158 header = NULL; 1159 } 1160 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1161 vchiq_service_put(service); 1162 1163 if (skip_completion) 1164 return VCHIQ_SUCCESS; 1165 1166 return add_completion(instance, reason, header, user_service, 1167 bulk_userdata); 1168 } 1169 1170 int vchiq_dump(void *dump_context, const char *str, int len) 1171 { 1172 struct dump_context *context = (struct dump_context *)dump_context; 1173 int copy_bytes; 1174 1175 if (context->actual >= context->space) 1176 return 0; 1177 1178 if (context->offset > 0) { 1179 int skip_bytes = min_t(int, len, context->offset); 1180 1181 str += skip_bytes; 1182 len -= skip_bytes; 1183 context->offset -= skip_bytes; 1184 if (context->offset > 0) 1185 return 0; 1186 } 1187 copy_bytes = min_t(int, len, context->space - context->actual); 1188 if (copy_bytes == 0) 1189 return 0; 1190 if (copy_to_user(context->buf + context->actual, str, 1191 copy_bytes)) 1192 return -EFAULT; 1193 context->actual += copy_bytes; 1194 len -= copy_bytes; 1195 1196 /* 1197 * If the terminating NUL is included in the length, then it 1198 * marks the end of a line and should be replaced with a 1199 * carriage return. 1200 */ 1201 if ((len == 0) && (str[copy_bytes - 1] == '\0')) { 1202 char cr = '\n'; 1203 1204 if (copy_to_user(context->buf + context->actual - 1, 1205 &cr, 1)) 1206 return -EFAULT; 1207 } 1208 return 0; 1209 } 1210 1211 int vchiq_dump_platform_instances(void *dump_context) 1212 { 1213 struct vchiq_state *state = vchiq_get_state(); 1214 char buf[80]; 1215 int len; 1216 int i; 1217 1218 if (!state) 1219 return -ENOTCONN; 1220 1221 /* 1222 * There is no list of instances, so instead scan all services, 1223 * marking those that have been dumped. 1224 */ 1225 1226 rcu_read_lock(); 1227 for (i = 0; i < state->unused_service; i++) { 1228 struct vchiq_service *service; 1229 struct vchiq_instance *instance; 1230 1231 service = rcu_dereference(state->services[i]); 1232 if (!service || service->base.callback != service_callback) 1233 continue; 1234 1235 instance = service->instance; 1236 if (instance) 1237 instance->mark = 0; 1238 } 1239 rcu_read_unlock(); 1240 1241 for (i = 0; i < state->unused_service; i++) { 1242 struct vchiq_service *service; 1243 struct vchiq_instance *instance; 1244 int err; 1245 1246 rcu_read_lock(); 1247 service = rcu_dereference(state->services[i]); 1248 if (!service || service->base.callback != service_callback) { 1249 rcu_read_unlock(); 1250 continue; 1251 } 1252 1253 instance = service->instance; 1254 if (!instance || instance->mark) { 1255 rcu_read_unlock(); 1256 continue; 1257 } 1258 rcu_read_unlock(); 1259 1260 len = snprintf(buf, sizeof(buf), 1261 "Instance %pK: pid %d,%s completions %d/%d", 1262 instance, instance->pid, 1263 instance->connected ? " connected, " : 1264 "", 1265 instance->completion_insert - 1266 instance->completion_remove, 1267 MAX_COMPLETIONS); 1268 err = vchiq_dump(dump_context, buf, len + 1); 1269 if (err) 1270 return err; 1271 instance->mark = 1; 1272 } 1273 return 0; 1274 } 1275 1276 int vchiq_dump_platform_service_state(void *dump_context, 1277 struct vchiq_service *service) 1278 { 1279 struct user_service *user_service = 1280 (struct user_service *)service->base.userdata; 1281 char buf[80]; 1282 int len; 1283 1284 len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance); 1285 1286 if ((service->base.callback == service_callback) && user_service->is_vchi) { 1287 len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages", 1288 user_service->msg_insert - user_service->msg_remove, 1289 MSG_QUEUE_SIZE); 1290 1291 if (user_service->dequeue_pending) 1292 len += scnprintf(buf + len, sizeof(buf) - len, 1293 " (dequeue pending)"); 1294 } 1295 1296 return vchiq_dump(dump_context, buf, len + 1); 1297 } 1298 1299 struct vchiq_state * 1300 vchiq_get_state(void) 1301 { 1302 if (!g_state.remote) { 1303 pr_err("%s: g_state.remote == NULL\n", __func__); 1304 return NULL; 1305 } 1306 1307 if (g_state.remote->initialised != 1) { 1308 pr_notice("%s: g_state.remote->initialised != 1 (%d)\n", 1309 __func__, g_state.remote->initialised); 1310 return NULL; 1311 } 1312 1313 return &g_state; 1314 } 1315 1316 /* 1317 * Autosuspend related functionality 1318 */ 1319 1320 static enum vchiq_status 1321 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason, 1322 struct vchiq_header *header, 1323 unsigned int service_user, void *bulk_user) 1324 { 1325 vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason); 1326 return 0; 1327 } 1328 1329 static int 1330 vchiq_keepalive_thread_func(void *v) 1331 { 1332 struct vchiq_state *state = (struct vchiq_state *)v; 1333 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1334 1335 enum vchiq_status status; 1336 struct vchiq_instance *instance; 1337 unsigned int ka_handle; 1338 int ret; 1339 1340 struct vchiq_service_params_kernel params = { 1341 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'), 1342 .callback = vchiq_keepalive_vchiq_callback, 1343 .version = KEEPALIVE_VER, 1344 .version_min = KEEPALIVE_VER_MIN 1345 }; 1346 1347 ret = vchiq_initialise(&instance); 1348 if (ret) { 1349 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__, 1350 ret); 1351 goto exit; 1352 } 1353 1354 status = vchiq_connect(instance); 1355 if (status != VCHIQ_SUCCESS) { 1356 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__, 1357 status); 1358 goto shutdown; 1359 } 1360 1361 status = vchiq_add_service(instance, ¶ms, &ka_handle); 1362 if (status != VCHIQ_SUCCESS) { 1363 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__, 1364 status); 1365 goto shutdown; 1366 } 1367 1368 while (1) { 1369 long rc = 0, uc = 0; 1370 1371 if (wait_for_completion_interruptible(&arm_state->ka_evt)) { 1372 vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__); 1373 flush_signals(current); 1374 continue; 1375 } 1376 1377 /* 1378 * read and clear counters. Do release_count then use_count to 1379 * prevent getting more releases than uses 1380 */ 1381 rc = atomic_xchg(&arm_state->ka_release_count, 0); 1382 uc = atomic_xchg(&arm_state->ka_use_count, 0); 1383 1384 /* 1385 * Call use/release service the requisite number of times. 1386 * Process use before release so use counts don't go negative 1387 */ 1388 while (uc--) { 1389 atomic_inc(&arm_state->ka_use_ack_count); 1390 status = vchiq_use_service(ka_handle); 1391 if (status != VCHIQ_SUCCESS) { 1392 vchiq_log_error(vchiq_susp_log_level, 1393 "%s vchiq_use_service error %d", __func__, status); 1394 } 1395 } 1396 while (rc--) { 1397 status = vchiq_release_service(ka_handle); 1398 if (status != VCHIQ_SUCCESS) { 1399 vchiq_log_error(vchiq_susp_log_level, 1400 "%s vchiq_release_service error %d", __func__, 1401 status); 1402 } 1403 } 1404 } 1405 1406 shutdown: 1407 vchiq_shutdown(instance); 1408 exit: 1409 return 0; 1410 } 1411 1412 int 1413 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, 1414 enum USE_TYPE_E use_type) 1415 { 1416 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1417 int ret = 0; 1418 char entity[16]; 1419 int *entity_uc; 1420 int local_uc; 1421 1422 if (!arm_state) { 1423 ret = -EINVAL; 1424 goto out; 1425 } 1426 1427 if (use_type == USE_TYPE_VCHIQ) { 1428 sprintf(entity, "VCHIQ: "); 1429 entity_uc = &arm_state->peer_use_count; 1430 } else if (service) { 1431 sprintf(entity, "%c%c%c%c:%03d", 1432 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 1433 service->client_id); 1434 entity_uc = &service->service_use_count; 1435 } else { 1436 vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__); 1437 ret = -EINVAL; 1438 goto out; 1439 } 1440 1441 write_lock_bh(&arm_state->susp_res_lock); 1442 local_uc = ++arm_state->videocore_use_count; 1443 ++(*entity_uc); 1444 1445 vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity, 1446 *entity_uc, local_uc); 1447 1448 write_unlock_bh(&arm_state->susp_res_lock); 1449 1450 if (!ret) { 1451 enum vchiq_status status = VCHIQ_SUCCESS; 1452 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0); 1453 1454 while (ack_cnt && (status == VCHIQ_SUCCESS)) { 1455 /* Send the use notify to videocore */ 1456 status = vchiq_send_remote_use_active(state); 1457 if (status == VCHIQ_SUCCESS) 1458 ack_cnt--; 1459 else 1460 atomic_add(ack_cnt, &arm_state->ka_use_ack_count); 1461 } 1462 } 1463 1464 out: 1465 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 1466 return ret; 1467 } 1468 1469 int 1470 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service) 1471 { 1472 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1473 int ret = 0; 1474 char entity[16]; 1475 int *entity_uc; 1476 1477 if (!arm_state) { 1478 ret = -EINVAL; 1479 goto out; 1480 } 1481 1482 if (service) { 1483 sprintf(entity, "%c%c%c%c:%03d", 1484 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 1485 service->client_id); 1486 entity_uc = &service->service_use_count; 1487 } else { 1488 sprintf(entity, "PEER: "); 1489 entity_uc = &arm_state->peer_use_count; 1490 } 1491 1492 write_lock_bh(&arm_state->susp_res_lock); 1493 if (!arm_state->videocore_use_count || !(*entity_uc)) { 1494 /* Don't use BUG_ON - don't allow user thread to crash kernel */ 1495 WARN_ON(!arm_state->videocore_use_count); 1496 WARN_ON(!(*entity_uc)); 1497 ret = -EINVAL; 1498 goto unlock; 1499 } 1500 --arm_state->videocore_use_count; 1501 --(*entity_uc); 1502 1503 vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity, 1504 *entity_uc, arm_state->videocore_use_count); 1505 1506 unlock: 1507 write_unlock_bh(&arm_state->susp_res_lock); 1508 1509 out: 1510 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 1511 return ret; 1512 } 1513 1514 void 1515 vchiq_on_remote_use(struct vchiq_state *state) 1516 { 1517 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1518 1519 atomic_inc(&arm_state->ka_use_count); 1520 complete(&arm_state->ka_evt); 1521 } 1522 1523 void 1524 vchiq_on_remote_release(struct vchiq_state *state) 1525 { 1526 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1527 1528 atomic_inc(&arm_state->ka_release_count); 1529 complete(&arm_state->ka_evt); 1530 } 1531 1532 int 1533 vchiq_use_service_internal(struct vchiq_service *service) 1534 { 1535 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); 1536 } 1537 1538 int 1539 vchiq_release_service_internal(struct vchiq_service *service) 1540 { 1541 return vchiq_release_internal(service->state, service); 1542 } 1543 1544 struct vchiq_debugfs_node * 1545 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance) 1546 { 1547 return &instance->debugfs_node; 1548 } 1549 1550 int 1551 vchiq_instance_get_use_count(struct vchiq_instance *instance) 1552 { 1553 struct vchiq_service *service; 1554 int use_count = 0, i; 1555 1556 i = 0; 1557 rcu_read_lock(); 1558 while ((service = __next_service_by_instance(instance->state, 1559 instance, &i))) 1560 use_count += service->service_use_count; 1561 rcu_read_unlock(); 1562 return use_count; 1563 } 1564 1565 int 1566 vchiq_instance_get_pid(struct vchiq_instance *instance) 1567 { 1568 return instance->pid; 1569 } 1570 1571 int 1572 vchiq_instance_get_trace(struct vchiq_instance *instance) 1573 { 1574 return instance->trace; 1575 } 1576 1577 void 1578 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace) 1579 { 1580 struct vchiq_service *service; 1581 int i; 1582 1583 i = 0; 1584 rcu_read_lock(); 1585 while ((service = __next_service_by_instance(instance->state, 1586 instance, &i))) 1587 service->trace = trace; 1588 rcu_read_unlock(); 1589 instance->trace = (trace != 0); 1590 } 1591 1592 enum vchiq_status 1593 vchiq_use_service(unsigned int handle) 1594 { 1595 enum vchiq_status ret = VCHIQ_ERROR; 1596 struct vchiq_service *service = find_service_by_handle(handle); 1597 1598 if (service) { 1599 ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); 1600 vchiq_service_put(service); 1601 } 1602 return ret; 1603 } 1604 EXPORT_SYMBOL(vchiq_use_service); 1605 1606 enum vchiq_status 1607 vchiq_release_service(unsigned int handle) 1608 { 1609 enum vchiq_status ret = VCHIQ_ERROR; 1610 struct vchiq_service *service = find_service_by_handle(handle); 1611 1612 if (service) { 1613 ret = vchiq_release_internal(service->state, service); 1614 vchiq_service_put(service); 1615 } 1616 return ret; 1617 } 1618 EXPORT_SYMBOL(vchiq_release_service); 1619 1620 struct service_data_struct { 1621 int fourcc; 1622 int clientid; 1623 int use_count; 1624 }; 1625 1626 void 1627 vchiq_dump_service_use_state(struct vchiq_state *state) 1628 { 1629 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1630 struct service_data_struct *service_data; 1631 int i, found = 0; 1632 /* 1633 * If there's more than 64 services, only dump ones with 1634 * non-zero counts 1635 */ 1636 int only_nonzero = 0; 1637 static const char *nz = "<-- preventing suspend"; 1638 1639 int peer_count; 1640 int vc_use_count; 1641 int active_services; 1642 1643 if (!arm_state) 1644 return; 1645 1646 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data), 1647 GFP_KERNEL); 1648 if (!service_data) 1649 return; 1650 1651 read_lock_bh(&arm_state->susp_res_lock); 1652 peer_count = arm_state->peer_use_count; 1653 vc_use_count = arm_state->videocore_use_count; 1654 active_services = state->unused_service; 1655 if (active_services > MAX_SERVICES) 1656 only_nonzero = 1; 1657 1658 rcu_read_lock(); 1659 for (i = 0; i < active_services; i++) { 1660 struct vchiq_service *service_ptr = 1661 rcu_dereference(state->services[i]); 1662 1663 if (!service_ptr) 1664 continue; 1665 1666 if (only_nonzero && !service_ptr->service_use_count) 1667 continue; 1668 1669 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE) 1670 continue; 1671 1672 service_data[found].fourcc = service_ptr->base.fourcc; 1673 service_data[found].clientid = service_ptr->client_id; 1674 service_data[found].use_count = service_ptr->service_use_count; 1675 found++; 1676 if (found >= MAX_SERVICES) 1677 break; 1678 } 1679 rcu_read_unlock(); 1680 1681 read_unlock_bh(&arm_state->susp_res_lock); 1682 1683 if (only_nonzero) 1684 vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count", 1685 active_services, found); 1686 1687 for (i = 0; i < found; i++) { 1688 vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s", 1689 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc), 1690 service_data[i].clientid, service_data[i].use_count, 1691 service_data[i].use_count ? nz : ""); 1692 } 1693 vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count); 1694 vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d", 1695 vc_use_count); 1696 1697 kfree(service_data); 1698 } 1699 1700 enum vchiq_status 1701 vchiq_check_service(struct vchiq_service *service) 1702 { 1703 struct vchiq_arm_state *arm_state; 1704 enum vchiq_status ret = VCHIQ_ERROR; 1705 1706 if (!service || !service->state) 1707 goto out; 1708 1709 arm_state = vchiq_platform_get_arm_state(service->state); 1710 1711 read_lock_bh(&arm_state->susp_res_lock); 1712 if (service->service_use_count) 1713 ret = VCHIQ_SUCCESS; 1714 read_unlock_bh(&arm_state->susp_res_lock); 1715 1716 if (ret == VCHIQ_ERROR) { 1717 vchiq_log_error(vchiq_susp_log_level, 1718 "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__, 1719 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id, 1720 service->service_use_count, arm_state->videocore_use_count); 1721 vchiq_dump_service_use_state(service->state); 1722 } 1723 out: 1724 return ret; 1725 } 1726 1727 void vchiq_platform_conn_state_changed(struct vchiq_state *state, 1728 enum vchiq_connstate oldstate, 1729 enum vchiq_connstate newstate) 1730 { 1731 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1732 char threadname[16]; 1733 1734 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id, 1735 get_conn_state_name(oldstate), get_conn_state_name(newstate)); 1736 if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED) 1737 return; 1738 1739 write_lock_bh(&arm_state->susp_res_lock); 1740 if (arm_state->first_connect) { 1741 write_unlock_bh(&arm_state->susp_res_lock); 1742 return; 1743 } 1744 1745 arm_state->first_connect = 1; 1746 write_unlock_bh(&arm_state->susp_res_lock); 1747 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", 1748 state->id); 1749 arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, 1750 (void *)state, 1751 threadname); 1752 if (IS_ERR(arm_state->ka_thread)) { 1753 vchiq_log_error(vchiq_susp_log_level, 1754 "vchiq: FATAL: couldn't create thread %s", 1755 threadname); 1756 } else { 1757 wake_up_process(arm_state->ka_thread); 1758 } 1759 } 1760 1761 static const struct of_device_id vchiq_of_match[] = { 1762 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata }, 1763 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata }, 1764 {}, 1765 }; 1766 MODULE_DEVICE_TABLE(of, vchiq_of_match); 1767 1768 static struct platform_device * 1769 vchiq_register_child(struct platform_device *pdev, const char *name) 1770 { 1771 struct platform_device_info pdevinfo; 1772 struct platform_device *child; 1773 1774 memset(&pdevinfo, 0, sizeof(pdevinfo)); 1775 1776 pdevinfo.parent = &pdev->dev; 1777 pdevinfo.name = name; 1778 pdevinfo.id = PLATFORM_DEVID_NONE; 1779 pdevinfo.dma_mask = DMA_BIT_MASK(32); 1780 1781 child = platform_device_register_full(&pdevinfo); 1782 if (IS_ERR(child)) { 1783 dev_warn(&pdev->dev, "%s not registered\n", name); 1784 child = NULL; 1785 } 1786 1787 return child; 1788 } 1789 1790 static int vchiq_probe(struct platform_device *pdev) 1791 { 1792 struct device_node *fw_node; 1793 const struct of_device_id *of_id; 1794 struct vchiq_drvdata *drvdata; 1795 int err; 1796 1797 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); 1798 drvdata = (struct vchiq_drvdata *)of_id->data; 1799 if (!drvdata) 1800 return -EINVAL; 1801 1802 fw_node = of_find_compatible_node(NULL, NULL, 1803 "raspberrypi,bcm2835-firmware"); 1804 if (!fw_node) { 1805 dev_err(&pdev->dev, "Missing firmware node\n"); 1806 return -ENOENT; 1807 } 1808 1809 drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node); 1810 of_node_put(fw_node); 1811 if (!drvdata->fw) 1812 return -EPROBE_DEFER; 1813 1814 platform_set_drvdata(pdev, drvdata); 1815 1816 err = vchiq_platform_init(pdev, &g_state); 1817 if (err) 1818 goto failed_platform_init; 1819 1820 vchiq_debugfs_init(); 1821 1822 vchiq_log_info(vchiq_arm_log_level, 1823 "vchiq: platform initialised - version %d (min %d)", 1824 VCHIQ_VERSION, VCHIQ_VERSION_MIN); 1825 1826 /* 1827 * Simply exit on error since the function handles cleanup in 1828 * cases of failure. 1829 */ 1830 err = vchiq_register_chrdev(&pdev->dev); 1831 if (err) { 1832 vchiq_log_warning(vchiq_arm_log_level, 1833 "Failed to initialize vchiq cdev"); 1834 goto error_exit; 1835 } 1836 1837 bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera"); 1838 bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio"); 1839 1840 return 0; 1841 1842 failed_platform_init: 1843 vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform"); 1844 error_exit: 1845 return err; 1846 } 1847 1848 static int vchiq_remove(struct platform_device *pdev) 1849 { 1850 platform_device_unregister(bcm2835_audio); 1851 platform_device_unregister(bcm2835_camera); 1852 vchiq_debugfs_deinit(); 1853 vchiq_deregister_chrdev(); 1854 1855 return 0; 1856 } 1857 1858 static struct platform_driver vchiq_driver = { 1859 .driver = { 1860 .name = "bcm2835_vchiq", 1861 .of_match_table = vchiq_of_match, 1862 }, 1863 .probe = vchiq_probe, 1864 .remove = vchiq_remove, 1865 }; 1866 1867 static int __init vchiq_driver_init(void) 1868 { 1869 int ret; 1870 1871 ret = platform_driver_register(&vchiq_driver); 1872 if (ret) 1873 pr_err("Failed to register vchiq driver\n"); 1874 1875 return ret; 1876 } 1877 module_init(vchiq_driver_init); 1878 1879 static void __exit vchiq_driver_exit(void) 1880 { 1881 platform_driver_unregister(&vchiq_driver); 1882 } 1883 module_exit(vchiq_driver_exit); 1884 1885 MODULE_LICENSE("Dual BSD/GPL"); 1886 MODULE_DESCRIPTION("Videocore VCHIQ driver"); 1887 MODULE_AUTHOR("Broadcom Corporation"); 1888