1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. 4 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/sched/signal.h> 10 #include <linux/types.h> 11 #include <linux/errno.h> 12 #include <linux/cdev.h> 13 #include <linux/fs.h> 14 #include <linux/device.h> 15 #include <linux/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/pagemap.h> 18 #include <linux/bug.h> 19 #include <linux/completion.h> 20 #include <linux/list.h> 21 #include <linux/of.h> 22 #include <linux/platform_device.h> 23 #include <linux/compat.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/rcupdate.h> 26 #include <linux/delay.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/io.h> 30 #include <linux/uaccess.h> 31 #include <soc/bcm2835/raspberrypi-firmware.h> 32 33 #include "vchiq_core.h" 34 #include "vchiq_ioctl.h" 35 #include "vchiq_arm.h" 36 #include "vchiq_debugfs.h" 37 #include "vchiq_connected.h" 38 #include "vchiq_pagelist.h" 39 40 #define DEVICE_NAME "vchiq" 41 42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32) 43 44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2) 45 46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0 47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1 48 49 #define BELL0 0x00 50 #define BELL2 0x08 51 52 #define ARM_DS_ACTIVE BIT(2) 53 54 /* Override the default prefix, which would be vchiq_arm (from the filename) */ 55 #undef MODULE_PARAM_PREFIX 56 #define MODULE_PARAM_PREFIX DEVICE_NAME "." 57 58 #define KEEPALIVE_VER 1 59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER 60 61 /* Run time control of log level, based on KERN_XXX level. */ 62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT; 63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR; 64 65 DEFINE_SPINLOCK(msg_queue_spinlock); 66 struct vchiq_state g_state; 67 68 static struct platform_device *bcm2835_camera; 69 static struct platform_device *bcm2835_audio; 70 71 struct vchiq_drvdata { 72 const unsigned int cache_line_size; 73 struct rpi_firmware *fw; 74 }; 75 76 static struct vchiq_drvdata bcm2835_drvdata = { 77 .cache_line_size = 32, 78 }; 79 80 static struct vchiq_drvdata bcm2836_drvdata = { 81 .cache_line_size = 64, 82 }; 83 84 struct vchiq_arm_state { 85 /* Keepalive-related data */ 86 struct task_struct *ka_thread; 87 struct completion ka_evt; 88 atomic_t ka_use_count; 89 atomic_t ka_use_ack_count; 90 atomic_t ka_release_count; 91 92 rwlock_t susp_res_lock; 93 94 struct vchiq_state *state; 95 96 /* 97 * Global use count for videocore. 98 * This is equal to the sum of the use counts for all services. When 99 * this hits zero the videocore suspend procedure will be initiated. 100 */ 101 int videocore_use_count; 102 103 /* 104 * Use count to track requests from videocore peer. 105 * This use count is not associated with a service, so needs to be 106 * tracked separately with the state. 107 */ 108 int peer_use_count; 109 110 /* 111 * Flag to indicate that the first vchiq connect has made it through. 112 * This means that both sides should be fully ready, and we should 113 * be able to suspend after this point. 114 */ 115 int first_connect; 116 }; 117 118 struct vchiq_2835_state { 119 int inited; 120 struct vchiq_arm_state arm_state; 121 }; 122 123 struct vchiq_pagelist_info { 124 struct pagelist *pagelist; 125 size_t pagelist_buffer_size; 126 dma_addr_t dma_addr; 127 enum dma_data_direction dma_dir; 128 unsigned int num_pages; 129 unsigned int pages_need_release; 130 struct page **pages; 131 struct scatterlist *scatterlist; 132 unsigned int scatterlist_mapped; 133 }; 134 135 static void __iomem *g_regs; 136 /* This value is the size of the L2 cache lines as understood by the 137 * VPU firmware, which determines the required alignment of the 138 * offsets/sizes in pagelists. 139 * 140 * Modern VPU firmware looks for a DT "cache-line-size" property in 141 * the VCHIQ node and will overwrite it with the actual L2 cache size, 142 * which the kernel must then respect. That property was rejected 143 * upstream, so we have to use the VPU firmware's compatibility value 144 * of 32. 145 */ 146 static unsigned int g_cache_line_size = 32; 147 static unsigned int g_fragments_size; 148 static char *g_fragments_base; 149 static char *g_free_fragments; 150 static struct semaphore g_free_fragments_sema; 151 152 static DEFINE_SEMAPHORE(g_free_fragments_mutex, 1); 153 154 static int 155 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data, 156 unsigned int size, enum vchiq_bulk_dir dir); 157 158 static irqreturn_t 159 vchiq_doorbell_irq(int irq, void *dev_id) 160 { 161 struct vchiq_state *state = dev_id; 162 irqreturn_t ret = IRQ_NONE; 163 unsigned int status; 164 165 /* Read (and clear) the doorbell */ 166 status = readl(g_regs + BELL0); 167 168 if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */ 169 remote_event_pollall(state); 170 ret = IRQ_HANDLED; 171 } 172 173 return ret; 174 } 175 176 static void 177 cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo) 178 { 179 if (pagelistinfo->scatterlist_mapped) { 180 dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist, 181 pagelistinfo->num_pages, pagelistinfo->dma_dir); 182 } 183 184 if (pagelistinfo->pages_need_release) 185 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); 186 187 dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size, 188 pagelistinfo->pagelist, pagelistinfo->dma_addr); 189 } 190 191 static inline bool 192 is_adjacent_block(u32 *addrs, u32 addr, unsigned int k) 193 { 194 u32 tmp; 195 196 if (!k) 197 return false; 198 199 tmp = (addrs[k - 1] & PAGE_MASK) + 200 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT); 201 202 return tmp == (addr & PAGE_MASK); 203 } 204 205 /* There is a potential problem with partial cache lines (pages?) 206 * at the ends of the block when reading. If the CPU accessed anything in 207 * the same line (page?) then it may have pulled old data into the cache, 208 * obscuring the new data underneath. We can solve this by transferring the 209 * partial cache lines separately, and allowing the ARM to copy into the 210 * cached area. 211 */ 212 213 static struct vchiq_pagelist_info * 214 create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf, 215 size_t count, unsigned short type) 216 { 217 struct pagelist *pagelist; 218 struct vchiq_pagelist_info *pagelistinfo; 219 struct page **pages; 220 u32 *addrs; 221 unsigned int num_pages, offset, i, k; 222 int actual_pages; 223 size_t pagelist_size; 224 struct scatterlist *scatterlist, *sg; 225 int dma_buffers; 226 dma_addr_t dma_addr; 227 228 if (count >= INT_MAX - PAGE_SIZE) 229 return NULL; 230 231 if (buf) 232 offset = (uintptr_t)buf & (PAGE_SIZE - 1); 233 else 234 offset = (uintptr_t)ubuf & (PAGE_SIZE - 1); 235 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); 236 237 if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) - 238 sizeof(struct vchiq_pagelist_info)) / 239 (sizeof(u32) + sizeof(pages[0]) + 240 sizeof(struct scatterlist))) 241 return NULL; 242 243 pagelist_size = sizeof(struct pagelist) + 244 (num_pages * sizeof(u32)) + 245 (num_pages * sizeof(pages[0]) + 246 (num_pages * sizeof(struct scatterlist))) + 247 sizeof(struct vchiq_pagelist_info); 248 249 /* Allocate enough storage to hold the page pointers and the page 250 * list 251 */ 252 pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr, 253 GFP_KERNEL); 254 255 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); 256 257 if (!pagelist) 258 return NULL; 259 260 addrs = pagelist->addrs; 261 pages = (struct page **)(addrs + num_pages); 262 scatterlist = (struct scatterlist *)(pages + num_pages); 263 pagelistinfo = (struct vchiq_pagelist_info *) 264 (scatterlist + num_pages); 265 266 pagelist->length = count; 267 pagelist->type = type; 268 pagelist->offset = offset; 269 270 /* Populate the fields of the pagelistinfo structure */ 271 pagelistinfo->pagelist = pagelist; 272 pagelistinfo->pagelist_buffer_size = pagelist_size; 273 pagelistinfo->dma_addr = dma_addr; 274 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ? 275 DMA_TO_DEVICE : DMA_FROM_DEVICE; 276 pagelistinfo->num_pages = num_pages; 277 pagelistinfo->pages_need_release = 0; 278 pagelistinfo->pages = pages; 279 pagelistinfo->scatterlist = scatterlist; 280 pagelistinfo->scatterlist_mapped = 0; 281 282 if (buf) { 283 unsigned long length = count; 284 unsigned int off = offset; 285 286 for (actual_pages = 0; actual_pages < num_pages; 287 actual_pages++) { 288 struct page *pg = 289 vmalloc_to_page((buf + 290 (actual_pages * PAGE_SIZE))); 291 size_t bytes = PAGE_SIZE - off; 292 293 if (!pg) { 294 cleanup_pagelistinfo(instance, pagelistinfo); 295 return NULL; 296 } 297 298 if (bytes > length) 299 bytes = length; 300 pages[actual_pages] = pg; 301 length -= bytes; 302 off = 0; 303 } 304 /* do not try and release vmalloc pages */ 305 } else { 306 actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages, 307 type == PAGELIST_READ, pages); 308 309 if (actual_pages != num_pages) { 310 vchiq_log_info(vchiq_arm_log_level, 311 "%s - only %d/%d pages locked", 312 __func__, actual_pages, num_pages); 313 314 /* This is probably due to the process being killed */ 315 if (actual_pages > 0) 316 unpin_user_pages(pages, actual_pages); 317 cleanup_pagelistinfo(instance, pagelistinfo); 318 return NULL; 319 } 320 /* release user pages */ 321 pagelistinfo->pages_need_release = 1; 322 } 323 324 /* 325 * Initialize the scatterlist so that the magic cookie 326 * is filled if debugging is enabled 327 */ 328 sg_init_table(scatterlist, num_pages); 329 /* Now set the pages for each scatterlist */ 330 for (i = 0; i < num_pages; i++) { 331 unsigned int len = PAGE_SIZE - offset; 332 333 if (len > count) 334 len = count; 335 sg_set_page(scatterlist + i, pages[i], len, offset); 336 offset = 0; 337 count -= len; 338 } 339 340 dma_buffers = dma_map_sg(instance->state->dev, 341 scatterlist, 342 num_pages, 343 pagelistinfo->dma_dir); 344 345 if (dma_buffers == 0) { 346 cleanup_pagelistinfo(instance, pagelistinfo); 347 return NULL; 348 } 349 350 pagelistinfo->scatterlist_mapped = 1; 351 352 /* Combine adjacent blocks for performance */ 353 k = 0; 354 for_each_sg(scatterlist, sg, dma_buffers, i) { 355 u32 len = sg_dma_len(sg); 356 u32 addr = sg_dma_address(sg); 357 358 /* Note: addrs is the address + page_count - 1 359 * The firmware expects blocks after the first to be page- 360 * aligned and a multiple of the page size 361 */ 362 WARN_ON(len == 0); 363 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK)); 364 WARN_ON(i && (addr & ~PAGE_MASK)); 365 if (is_adjacent_block(addrs, addr, k)) 366 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT); 367 else 368 addrs[k++] = (addr & PAGE_MASK) | 369 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1); 370 } 371 372 /* Partial cache lines (fragments) require special measures */ 373 if ((type == PAGELIST_READ) && 374 ((pagelist->offset & (g_cache_line_size - 1)) || 375 ((pagelist->offset + pagelist->length) & 376 (g_cache_line_size - 1)))) { 377 char *fragments; 378 379 if (down_interruptible(&g_free_fragments_sema)) { 380 cleanup_pagelistinfo(instance, pagelistinfo); 381 return NULL; 382 } 383 384 WARN_ON(!g_free_fragments); 385 386 down(&g_free_fragments_mutex); 387 fragments = g_free_fragments; 388 WARN_ON(!fragments); 389 g_free_fragments = *(char **)g_free_fragments; 390 up(&g_free_fragments_mutex); 391 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + 392 (fragments - g_fragments_base) / g_fragments_size; 393 } 394 395 return pagelistinfo; 396 } 397 398 static void 399 free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo, 400 int actual) 401 { 402 struct pagelist *pagelist = pagelistinfo->pagelist; 403 struct page **pages = pagelistinfo->pages; 404 unsigned int num_pages = pagelistinfo->num_pages; 405 406 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d", 407 __func__, pagelistinfo->pagelist, actual); 408 409 /* 410 * NOTE: dma_unmap_sg must be called before the 411 * cpu can touch any of the data/pages. 412 */ 413 dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist, 414 pagelistinfo->num_pages, pagelistinfo->dma_dir); 415 pagelistinfo->scatterlist_mapped = 0; 416 417 /* Deal with any partial cache lines (fragments) */ 418 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) { 419 char *fragments = g_fragments_base + 420 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) * 421 g_fragments_size; 422 int head_bytes, tail_bytes; 423 424 head_bytes = (g_cache_line_size - pagelist->offset) & 425 (g_cache_line_size - 1); 426 tail_bytes = (pagelist->offset + actual) & 427 (g_cache_line_size - 1); 428 429 if ((actual >= 0) && (head_bytes != 0)) { 430 if (head_bytes > actual) 431 head_bytes = actual; 432 433 memcpy_to_page(pages[0], 434 pagelist->offset, 435 fragments, 436 head_bytes); 437 } 438 if ((actual >= 0) && (head_bytes < actual) && 439 (tail_bytes != 0)) 440 memcpy_to_page(pages[num_pages - 1], 441 (pagelist->offset + actual) & 442 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1), 443 fragments + g_cache_line_size, 444 tail_bytes); 445 446 down(&g_free_fragments_mutex); 447 *(char **)fragments = g_free_fragments; 448 g_free_fragments = fragments; 449 up(&g_free_fragments_mutex); 450 up(&g_free_fragments_sema); 451 } 452 453 /* Need to mark all the pages dirty. */ 454 if (pagelist->type != PAGELIST_WRITE && 455 pagelistinfo->pages_need_release) { 456 unsigned int i; 457 458 for (i = 0; i < num_pages; i++) 459 set_page_dirty(pages[i]); 460 } 461 462 cleanup_pagelistinfo(instance, pagelistinfo); 463 } 464 465 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state) 466 { 467 struct device *dev = &pdev->dev; 468 struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev); 469 struct rpi_firmware *fw = drvdata->fw; 470 struct vchiq_slot_zero *vchiq_slot_zero; 471 void *slot_mem; 472 dma_addr_t slot_phys; 473 u32 channelbase; 474 int slot_mem_size, frag_mem_size; 475 int err, irq, i; 476 477 /* 478 * VCHI messages between the CPU and firmware use 479 * 32-bit bus addresses. 480 */ 481 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 482 483 if (err < 0) 484 return err; 485 486 g_cache_line_size = drvdata->cache_line_size; 487 g_fragments_size = 2 * g_cache_line_size; 488 489 /* Allocate space for the channels in coherent memory */ 490 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE); 491 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS); 492 493 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size, 494 &slot_phys, GFP_KERNEL); 495 if (!slot_mem) { 496 dev_err(dev, "could not allocate DMA memory\n"); 497 return -ENOMEM; 498 } 499 500 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0); 501 502 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size); 503 if (!vchiq_slot_zero) 504 return -ENOMEM; 505 506 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] = 507 (int)slot_phys + slot_mem_size; 508 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] = 509 MAX_FRAGMENTS; 510 511 g_fragments_base = (char *)slot_mem + slot_mem_size; 512 513 g_free_fragments = g_fragments_base; 514 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) { 515 *(char **)&g_fragments_base[i * g_fragments_size] = 516 &g_fragments_base[(i + 1) * g_fragments_size]; 517 } 518 *(char **)&g_fragments_base[i * g_fragments_size] = NULL; 519 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS); 520 521 err = vchiq_init_state(state, vchiq_slot_zero, dev); 522 if (err) 523 return err; 524 525 g_regs = devm_platform_ioremap_resource(pdev, 0); 526 if (IS_ERR(g_regs)) 527 return PTR_ERR(g_regs); 528 529 irq = platform_get_irq(pdev, 0); 530 if (irq <= 0) 531 return irq; 532 533 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL, 534 "VCHIQ doorbell", state); 535 if (err) { 536 dev_err(dev, "failed to register irq=%d\n", irq); 537 return err; 538 } 539 540 /* Send the base address of the slots to VideoCore */ 541 channelbase = slot_phys; 542 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT, 543 &channelbase, sizeof(channelbase)); 544 if (err) { 545 dev_err(dev, "failed to send firmware property: %d\n", err); 546 return err; 547 } 548 549 if (channelbase) { 550 dev_err(dev, "failed to set channelbase (response: %x)\n", 551 channelbase); 552 return -ENXIO; 553 } 554 555 vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)", 556 vchiq_slot_zero, &slot_phys); 557 558 vchiq_call_connected_callbacks(); 559 560 return 0; 561 } 562 563 static void 564 vchiq_arm_init_state(struct vchiq_state *state, 565 struct vchiq_arm_state *arm_state) 566 { 567 if (arm_state) { 568 rwlock_init(&arm_state->susp_res_lock); 569 570 init_completion(&arm_state->ka_evt); 571 atomic_set(&arm_state->ka_use_count, 0); 572 atomic_set(&arm_state->ka_use_ack_count, 0); 573 atomic_set(&arm_state->ka_release_count, 0); 574 575 arm_state->state = state; 576 arm_state->first_connect = 0; 577 } 578 } 579 580 int 581 vchiq_platform_init_state(struct vchiq_state *state) 582 { 583 struct vchiq_2835_state *platform_state; 584 585 state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL); 586 if (!state->platform_state) 587 return -ENOMEM; 588 589 platform_state = (struct vchiq_2835_state *)state->platform_state; 590 591 platform_state->inited = 1; 592 vchiq_arm_init_state(state, &platform_state->arm_state); 593 594 return 0; 595 } 596 597 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state) 598 { 599 struct vchiq_2835_state *platform_state; 600 601 platform_state = (struct vchiq_2835_state *)state->platform_state; 602 603 WARN_ON_ONCE(!platform_state->inited); 604 605 return &platform_state->arm_state; 606 } 607 608 void 609 remote_event_signal(struct remote_event *event) 610 { 611 /* 612 * Ensure that all writes to shared data structures have completed 613 * before signalling the peer. 614 */ 615 wmb(); 616 617 event->fired = 1; 618 619 dsb(sy); /* data barrier operation */ 620 621 if (event->armed) 622 writel(0, g_regs + BELL2); /* trigger vc interrupt */ 623 } 624 625 int 626 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset, 627 void __user *uoffset, int size, int dir) 628 { 629 struct vchiq_pagelist_info *pagelistinfo; 630 631 pagelistinfo = create_pagelist(instance, offset, uoffset, size, 632 (dir == VCHIQ_BULK_RECEIVE) 633 ? PAGELIST_READ 634 : PAGELIST_WRITE); 635 636 if (!pagelistinfo) 637 return -ENOMEM; 638 639 bulk->data = pagelistinfo->dma_addr; 640 641 /* 642 * Store the pagelistinfo address in remote_data, 643 * which isn't used by the slave. 644 */ 645 bulk->remote_data = pagelistinfo; 646 647 return 0; 648 } 649 650 void 651 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk) 652 { 653 if (bulk && bulk->remote_data && bulk->actual) 654 free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data, 655 bulk->actual); 656 } 657 658 int vchiq_dump_platform_state(void *dump_context) 659 { 660 char buf[80]; 661 int len; 662 663 len = snprintf(buf, sizeof(buf), " Platform: 2835 (VC master)"); 664 return vchiq_dump(dump_context, buf, len + 1); 665 } 666 667 #define VCHIQ_INIT_RETRIES 10 668 int vchiq_initialise(struct vchiq_instance **instance_out) 669 { 670 struct vchiq_state *state; 671 struct vchiq_instance *instance = NULL; 672 int i, ret; 673 674 /* 675 * VideoCore may not be ready due to boot up timing. 676 * It may never be ready if kernel and firmware are mismatched,so don't 677 * block forever. 678 */ 679 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) { 680 state = vchiq_get_state(); 681 if (state) 682 break; 683 usleep_range(500, 600); 684 } 685 if (i == VCHIQ_INIT_RETRIES) { 686 vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__); 687 ret = -ENOTCONN; 688 goto failed; 689 } else if (i > 0) { 690 vchiq_log_warning(vchiq_core_log_level, 691 "%s: videocore initialized after %d retries\n", __func__, i); 692 } 693 694 instance = kzalloc(sizeof(*instance), GFP_KERNEL); 695 if (!instance) { 696 vchiq_log_error(vchiq_core_log_level, 697 "%s: error allocating vchiq instance\n", __func__); 698 ret = -ENOMEM; 699 goto failed; 700 } 701 702 instance->connected = 0; 703 instance->state = state; 704 mutex_init(&instance->bulk_waiter_list_mutex); 705 INIT_LIST_HEAD(&instance->bulk_waiter_list); 706 707 *instance_out = instance; 708 709 ret = 0; 710 711 failed: 712 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret); 713 714 return ret; 715 } 716 EXPORT_SYMBOL(vchiq_initialise); 717 718 void free_bulk_waiter(struct vchiq_instance *instance) 719 { 720 struct bulk_waiter_node *waiter, *next; 721 722 list_for_each_entry_safe(waiter, next, 723 &instance->bulk_waiter_list, list) { 724 list_del(&waiter->list); 725 vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d", 726 waiter, waiter->pid); 727 kfree(waiter); 728 } 729 } 730 731 int vchiq_shutdown(struct vchiq_instance *instance) 732 { 733 int status = 0; 734 struct vchiq_state *state = instance->state; 735 736 if (mutex_lock_killable(&state->mutex)) 737 return -EAGAIN; 738 739 /* Remove all services */ 740 vchiq_shutdown_internal(state, instance); 741 742 mutex_unlock(&state->mutex); 743 744 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status); 745 746 free_bulk_waiter(instance); 747 kfree(instance); 748 749 return status; 750 } 751 EXPORT_SYMBOL(vchiq_shutdown); 752 753 static int vchiq_is_connected(struct vchiq_instance *instance) 754 { 755 return instance->connected; 756 } 757 758 int vchiq_connect(struct vchiq_instance *instance) 759 { 760 int status; 761 struct vchiq_state *state = instance->state; 762 763 if (mutex_lock_killable(&state->mutex)) { 764 vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__); 765 status = -EAGAIN; 766 goto failed; 767 } 768 status = vchiq_connect_internal(state, instance); 769 770 if (!status) 771 instance->connected = 1; 772 773 mutex_unlock(&state->mutex); 774 775 failed: 776 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status); 777 778 return status; 779 } 780 EXPORT_SYMBOL(vchiq_connect); 781 782 static int 783 vchiq_add_service(struct vchiq_instance *instance, 784 const struct vchiq_service_params_kernel *params, 785 unsigned int *phandle) 786 { 787 int status; 788 struct vchiq_state *state = instance->state; 789 struct vchiq_service *service = NULL; 790 int srvstate; 791 792 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 793 794 srvstate = vchiq_is_connected(instance) 795 ? VCHIQ_SRVSTATE_LISTENING 796 : VCHIQ_SRVSTATE_HIDDEN; 797 798 service = vchiq_add_service_internal(state, params, srvstate, instance, NULL); 799 800 if (service) { 801 *phandle = service->handle; 802 status = 0; 803 } else { 804 status = -EINVAL; 805 } 806 807 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status); 808 809 return status; 810 } 811 812 int 813 vchiq_open_service(struct vchiq_instance *instance, 814 const struct vchiq_service_params_kernel *params, 815 unsigned int *phandle) 816 { 817 int status = -EINVAL; 818 struct vchiq_state *state = instance->state; 819 struct vchiq_service *service = NULL; 820 821 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 822 823 if (!vchiq_is_connected(instance)) 824 goto failed; 825 826 service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL); 827 828 if (service) { 829 *phandle = service->handle; 830 status = vchiq_open_service_internal(service, current->pid); 831 if (status) { 832 vchiq_remove_service(instance, service->handle); 833 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 834 } 835 } 836 837 failed: 838 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status); 839 840 return status; 841 } 842 EXPORT_SYMBOL(vchiq_open_service); 843 844 int 845 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data, 846 unsigned int size, void *userdata, enum vchiq_bulk_mode mode) 847 { 848 int status; 849 850 while (1) { 851 switch (mode) { 852 case VCHIQ_BULK_MODE_NOCALLBACK: 853 case VCHIQ_BULK_MODE_CALLBACK: 854 status = vchiq_bulk_transfer(instance, handle, 855 (void *)data, NULL, 856 size, userdata, mode, 857 VCHIQ_BULK_TRANSMIT); 858 break; 859 case VCHIQ_BULK_MODE_BLOCKING: 860 status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size, 861 VCHIQ_BULK_TRANSMIT); 862 break; 863 default: 864 return -EINVAL; 865 } 866 867 /* 868 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need 869 * to implement a retry mechanism since this function is 870 * supposed to block until queued 871 */ 872 if (status != -EAGAIN) 873 break; 874 875 msleep(1); 876 } 877 878 return status; 879 } 880 EXPORT_SYMBOL(vchiq_bulk_transmit); 881 882 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle, 883 void *data, unsigned int size, void *userdata, 884 enum vchiq_bulk_mode mode) 885 { 886 int status; 887 888 while (1) { 889 switch (mode) { 890 case VCHIQ_BULK_MODE_NOCALLBACK: 891 case VCHIQ_BULK_MODE_CALLBACK: 892 status = vchiq_bulk_transfer(instance, handle, data, NULL, 893 size, userdata, 894 mode, VCHIQ_BULK_RECEIVE); 895 break; 896 case VCHIQ_BULK_MODE_BLOCKING: 897 status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size, 898 VCHIQ_BULK_RECEIVE); 899 break; 900 default: 901 return -EINVAL; 902 } 903 904 /* 905 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need 906 * to implement a retry mechanism since this function is 907 * supposed to block until queued 908 */ 909 if (status != -EAGAIN) 910 break; 911 912 msleep(1); 913 } 914 915 return status; 916 } 917 EXPORT_SYMBOL(vchiq_bulk_receive); 918 919 static int 920 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data, 921 unsigned int size, enum vchiq_bulk_dir dir) 922 { 923 struct vchiq_service *service; 924 int status; 925 struct bulk_waiter_node *waiter = NULL, *iter; 926 927 service = find_service_by_handle(instance, handle); 928 if (!service) 929 return -EINVAL; 930 931 vchiq_service_put(service); 932 933 mutex_lock(&instance->bulk_waiter_list_mutex); 934 list_for_each_entry(iter, &instance->bulk_waiter_list, list) { 935 if (iter->pid == current->pid) { 936 list_del(&iter->list); 937 waiter = iter; 938 break; 939 } 940 } 941 mutex_unlock(&instance->bulk_waiter_list_mutex); 942 943 if (waiter) { 944 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; 945 946 if (bulk) { 947 /* This thread has an outstanding bulk transfer. */ 948 /* FIXME: why compare a dma address to a pointer? */ 949 if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) { 950 /* 951 * This is not a retry of the previous one. 952 * Cancel the signal when the transfer completes. 953 */ 954 spin_lock(&bulk_waiter_spinlock); 955 bulk->userdata = NULL; 956 spin_unlock(&bulk_waiter_spinlock); 957 } 958 } 959 } else { 960 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); 961 if (!waiter) { 962 vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__); 963 return -ENOMEM; 964 } 965 } 966 967 status = vchiq_bulk_transfer(instance, handle, data, NULL, size, 968 &waiter->bulk_waiter, 969 VCHIQ_BULK_MODE_BLOCKING, dir); 970 if ((status != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) { 971 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; 972 973 if (bulk) { 974 /* Cancel the signal when the transfer completes. */ 975 spin_lock(&bulk_waiter_spinlock); 976 bulk->userdata = NULL; 977 spin_unlock(&bulk_waiter_spinlock); 978 } 979 kfree(waiter); 980 } else { 981 waiter->pid = current->pid; 982 mutex_lock(&instance->bulk_waiter_list_mutex); 983 list_add(&waiter->list, &instance->bulk_waiter_list); 984 mutex_unlock(&instance->bulk_waiter_list_mutex); 985 vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter, 986 current->pid); 987 } 988 989 return status; 990 } 991 992 static int 993 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason, 994 struct vchiq_header *header, struct user_service *user_service, 995 void *bulk_userdata) 996 { 997 struct vchiq_completion_data_kernel *completion; 998 int insert; 999 1000 DEBUG_INITIALISE(g_state.local); 1001 1002 insert = instance->completion_insert; 1003 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) { 1004 /* Out of space - wait for the client */ 1005 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1006 vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__); 1007 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT); 1008 if (wait_for_completion_interruptible(&instance->remove_event)) { 1009 vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted"); 1010 return -EAGAIN; 1011 } else if (instance->closing) { 1012 vchiq_log_info(vchiq_arm_log_level, "service_callback closing"); 1013 return 0; 1014 } 1015 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1016 } 1017 1018 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)]; 1019 1020 completion->header = header; 1021 completion->reason = reason; 1022 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */ 1023 completion->service_userdata = user_service->service; 1024 completion->bulk_userdata = bulk_userdata; 1025 1026 if (reason == VCHIQ_SERVICE_CLOSED) { 1027 /* 1028 * Take an extra reference, to be held until 1029 * this CLOSED notification is delivered. 1030 */ 1031 vchiq_service_get(user_service->service); 1032 if (instance->use_close_delivered) 1033 user_service->close_pending = 1; 1034 } 1035 1036 /* 1037 * A write barrier is needed here to ensure that the entire completion 1038 * record is written out before the insert point. 1039 */ 1040 wmb(); 1041 1042 if (reason == VCHIQ_MESSAGE_AVAILABLE) 1043 user_service->message_available_pos = insert; 1044 1045 insert++; 1046 instance->completion_insert = insert; 1047 1048 complete(&instance->insert_event); 1049 1050 return 0; 1051 } 1052 1053 int 1054 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason, 1055 struct vchiq_header *header, unsigned int handle, void *bulk_userdata) 1056 { 1057 /* 1058 * How do we ensure the callback goes to the right client? 1059 * The service_user data points to a user_service record 1060 * containing the original callback and the user state structure, which 1061 * contains a circular buffer for completion records. 1062 */ 1063 struct user_service *user_service; 1064 struct vchiq_service *service; 1065 bool skip_completion = false; 1066 1067 DEBUG_INITIALISE(g_state.local); 1068 1069 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1070 1071 rcu_read_lock(); 1072 service = handle_to_service(instance, handle); 1073 if (WARN_ON(!service)) { 1074 rcu_read_unlock(); 1075 return 0; 1076 } 1077 1078 user_service = (struct user_service *)service->base.userdata; 1079 1080 if (!instance || instance->closing) { 1081 rcu_read_unlock(); 1082 return 0; 1083 } 1084 1085 /* 1086 * As hopping around different synchronization mechanism, 1087 * taking an extra reference results in simpler implementation. 1088 */ 1089 vchiq_service_get(service); 1090 rcu_read_unlock(); 1091 1092 vchiq_log_trace(vchiq_arm_log_level, 1093 "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx", 1094 __func__, (unsigned long)user_service, service->localport, 1095 user_service->userdata, reason, (unsigned long)header, 1096 (unsigned long)instance, (unsigned long)bulk_userdata); 1097 1098 if (header && user_service->is_vchi) { 1099 spin_lock(&msg_queue_spinlock); 1100 while (user_service->msg_insert == 1101 (user_service->msg_remove + MSG_QUEUE_SIZE)) { 1102 spin_unlock(&msg_queue_spinlock); 1103 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1104 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT); 1105 vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__); 1106 /* 1107 * If there is no MESSAGE_AVAILABLE in the completion 1108 * queue, add one 1109 */ 1110 if ((user_service->message_available_pos - 1111 instance->completion_remove) < 0) { 1112 int status; 1113 1114 vchiq_log_info(vchiq_arm_log_level, 1115 "Inserting extra MESSAGE_AVAILABLE"); 1116 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1117 status = add_completion(instance, reason, NULL, user_service, 1118 bulk_userdata); 1119 if (status) { 1120 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1121 vchiq_service_put(service); 1122 return status; 1123 } 1124 } 1125 1126 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1127 if (wait_for_completion_interruptible(&user_service->remove_event)) { 1128 vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__); 1129 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1130 vchiq_service_put(service); 1131 return -EAGAIN; 1132 } else if (instance->closing) { 1133 vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__); 1134 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1135 vchiq_service_put(service); 1136 return -EINVAL; 1137 } 1138 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1139 spin_lock(&msg_queue_spinlock); 1140 } 1141 1142 user_service->msg_queue[user_service->msg_insert & 1143 (MSG_QUEUE_SIZE - 1)] = header; 1144 user_service->msg_insert++; 1145 1146 /* 1147 * If there is a thread waiting in DEQUEUE_MESSAGE, or if 1148 * there is a MESSAGE_AVAILABLE in the completion queue then 1149 * bypass the completion queue. 1150 */ 1151 if (((user_service->message_available_pos - 1152 instance->completion_remove) >= 0) || 1153 user_service->dequeue_pending) { 1154 user_service->dequeue_pending = 0; 1155 skip_completion = true; 1156 } 1157 1158 spin_unlock(&msg_queue_spinlock); 1159 complete(&user_service->insert_event); 1160 1161 header = NULL; 1162 } 1163 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1164 vchiq_service_put(service); 1165 1166 if (skip_completion) 1167 return 0; 1168 1169 return add_completion(instance, reason, header, user_service, 1170 bulk_userdata); 1171 } 1172 1173 int vchiq_dump(void *dump_context, const char *str, int len) 1174 { 1175 struct dump_context *context = (struct dump_context *)dump_context; 1176 int copy_bytes; 1177 1178 if (context->actual >= context->space) 1179 return 0; 1180 1181 if (context->offset > 0) { 1182 int skip_bytes = min_t(int, len, context->offset); 1183 1184 str += skip_bytes; 1185 len -= skip_bytes; 1186 context->offset -= skip_bytes; 1187 if (context->offset > 0) 1188 return 0; 1189 } 1190 copy_bytes = min_t(int, len, context->space - context->actual); 1191 if (copy_bytes == 0) 1192 return 0; 1193 if (copy_to_user(context->buf + context->actual, str, 1194 copy_bytes)) 1195 return -EFAULT; 1196 context->actual += copy_bytes; 1197 len -= copy_bytes; 1198 1199 /* 1200 * If the terminating NUL is included in the length, then it 1201 * marks the end of a line and should be replaced with a 1202 * carriage return. 1203 */ 1204 if ((len == 0) && (str[copy_bytes - 1] == '\0')) { 1205 char cr = '\n'; 1206 1207 if (copy_to_user(context->buf + context->actual - 1, 1208 &cr, 1)) 1209 return -EFAULT; 1210 } 1211 return 0; 1212 } 1213 1214 int vchiq_dump_platform_instances(void *dump_context) 1215 { 1216 struct vchiq_state *state = vchiq_get_state(); 1217 char buf[80]; 1218 int len; 1219 int i; 1220 1221 if (!state) 1222 return -ENOTCONN; 1223 1224 /* 1225 * There is no list of instances, so instead scan all services, 1226 * marking those that have been dumped. 1227 */ 1228 1229 rcu_read_lock(); 1230 for (i = 0; i < state->unused_service; i++) { 1231 struct vchiq_service *service; 1232 struct vchiq_instance *instance; 1233 1234 service = rcu_dereference(state->services[i]); 1235 if (!service || service->base.callback != service_callback) 1236 continue; 1237 1238 instance = service->instance; 1239 if (instance) 1240 instance->mark = 0; 1241 } 1242 rcu_read_unlock(); 1243 1244 for (i = 0; i < state->unused_service; i++) { 1245 struct vchiq_service *service; 1246 struct vchiq_instance *instance; 1247 int err; 1248 1249 rcu_read_lock(); 1250 service = rcu_dereference(state->services[i]); 1251 if (!service || service->base.callback != service_callback) { 1252 rcu_read_unlock(); 1253 continue; 1254 } 1255 1256 instance = service->instance; 1257 if (!instance || instance->mark) { 1258 rcu_read_unlock(); 1259 continue; 1260 } 1261 rcu_read_unlock(); 1262 1263 len = snprintf(buf, sizeof(buf), 1264 "Instance %pK: pid %d,%s completions %d/%d", 1265 instance, instance->pid, 1266 instance->connected ? " connected, " : 1267 "", 1268 instance->completion_insert - 1269 instance->completion_remove, 1270 MAX_COMPLETIONS); 1271 err = vchiq_dump(dump_context, buf, len + 1); 1272 if (err) 1273 return err; 1274 instance->mark = 1; 1275 } 1276 return 0; 1277 } 1278 1279 int vchiq_dump_platform_service_state(void *dump_context, 1280 struct vchiq_service *service) 1281 { 1282 struct user_service *user_service = 1283 (struct user_service *)service->base.userdata; 1284 char buf[80]; 1285 int len; 1286 1287 len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance); 1288 1289 if ((service->base.callback == service_callback) && user_service->is_vchi) { 1290 len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages", 1291 user_service->msg_insert - user_service->msg_remove, 1292 MSG_QUEUE_SIZE); 1293 1294 if (user_service->dequeue_pending) 1295 len += scnprintf(buf + len, sizeof(buf) - len, 1296 " (dequeue pending)"); 1297 } 1298 1299 return vchiq_dump(dump_context, buf, len + 1); 1300 } 1301 1302 struct vchiq_state * 1303 vchiq_get_state(void) 1304 { 1305 if (!g_state.remote) { 1306 pr_err("%s: g_state.remote == NULL\n", __func__); 1307 return NULL; 1308 } 1309 1310 if (g_state.remote->initialised != 1) { 1311 pr_notice("%s: g_state.remote->initialised != 1 (%d)\n", 1312 __func__, g_state.remote->initialised); 1313 return NULL; 1314 } 1315 1316 return &g_state; 1317 } 1318 1319 /* 1320 * Autosuspend related functionality 1321 */ 1322 1323 static int 1324 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance, 1325 enum vchiq_reason reason, 1326 struct vchiq_header *header, 1327 unsigned int service_user, void *bulk_user) 1328 { 1329 vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason); 1330 return 0; 1331 } 1332 1333 static int 1334 vchiq_keepalive_thread_func(void *v) 1335 { 1336 struct vchiq_state *state = (struct vchiq_state *)v; 1337 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1338 1339 int status; 1340 struct vchiq_instance *instance; 1341 unsigned int ka_handle; 1342 int ret; 1343 1344 struct vchiq_service_params_kernel params = { 1345 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'), 1346 .callback = vchiq_keepalive_vchiq_callback, 1347 .version = KEEPALIVE_VER, 1348 .version_min = KEEPALIVE_VER_MIN 1349 }; 1350 1351 ret = vchiq_initialise(&instance); 1352 if (ret) { 1353 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__, 1354 ret); 1355 goto exit; 1356 } 1357 1358 status = vchiq_connect(instance); 1359 if (status) { 1360 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__, 1361 status); 1362 goto shutdown; 1363 } 1364 1365 status = vchiq_add_service(instance, ¶ms, &ka_handle); 1366 if (status) { 1367 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__, 1368 status); 1369 goto shutdown; 1370 } 1371 1372 while (1) { 1373 long rc = 0, uc = 0; 1374 1375 if (wait_for_completion_interruptible(&arm_state->ka_evt)) { 1376 vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__); 1377 flush_signals(current); 1378 continue; 1379 } 1380 1381 /* 1382 * read and clear counters. Do release_count then use_count to 1383 * prevent getting more releases than uses 1384 */ 1385 rc = atomic_xchg(&arm_state->ka_release_count, 0); 1386 uc = atomic_xchg(&arm_state->ka_use_count, 0); 1387 1388 /* 1389 * Call use/release service the requisite number of times. 1390 * Process use before release so use counts don't go negative 1391 */ 1392 while (uc--) { 1393 atomic_inc(&arm_state->ka_use_ack_count); 1394 status = vchiq_use_service(instance, ka_handle); 1395 if (status) { 1396 vchiq_log_error(vchiq_susp_log_level, 1397 "%s vchiq_use_service error %d", __func__, status); 1398 } 1399 } 1400 while (rc--) { 1401 status = vchiq_release_service(instance, ka_handle); 1402 if (status) { 1403 vchiq_log_error(vchiq_susp_log_level, 1404 "%s vchiq_release_service error %d", __func__, 1405 status); 1406 } 1407 } 1408 } 1409 1410 shutdown: 1411 vchiq_shutdown(instance); 1412 exit: 1413 return 0; 1414 } 1415 1416 int 1417 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, 1418 enum USE_TYPE_E use_type) 1419 { 1420 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1421 int ret = 0; 1422 char entity[16]; 1423 int *entity_uc; 1424 int local_uc; 1425 1426 if (!arm_state) { 1427 ret = -EINVAL; 1428 goto out; 1429 } 1430 1431 if (use_type == USE_TYPE_VCHIQ) { 1432 sprintf(entity, "VCHIQ: "); 1433 entity_uc = &arm_state->peer_use_count; 1434 } else if (service) { 1435 sprintf(entity, "%c%c%c%c:%03d", 1436 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 1437 service->client_id); 1438 entity_uc = &service->service_use_count; 1439 } else { 1440 vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__); 1441 ret = -EINVAL; 1442 goto out; 1443 } 1444 1445 write_lock_bh(&arm_state->susp_res_lock); 1446 local_uc = ++arm_state->videocore_use_count; 1447 ++(*entity_uc); 1448 1449 vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity, 1450 *entity_uc, local_uc); 1451 1452 write_unlock_bh(&arm_state->susp_res_lock); 1453 1454 if (!ret) { 1455 int status = 0; 1456 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0); 1457 1458 while (ack_cnt && !status) { 1459 /* Send the use notify to videocore */ 1460 status = vchiq_send_remote_use_active(state); 1461 if (!status) 1462 ack_cnt--; 1463 else 1464 atomic_add(ack_cnt, &arm_state->ka_use_ack_count); 1465 } 1466 } 1467 1468 out: 1469 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 1470 return ret; 1471 } 1472 1473 int 1474 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service) 1475 { 1476 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1477 int ret = 0; 1478 char entity[16]; 1479 int *entity_uc; 1480 1481 if (!arm_state) { 1482 ret = -EINVAL; 1483 goto out; 1484 } 1485 1486 if (service) { 1487 sprintf(entity, "%c%c%c%c:%03d", 1488 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 1489 service->client_id); 1490 entity_uc = &service->service_use_count; 1491 } else { 1492 sprintf(entity, "PEER: "); 1493 entity_uc = &arm_state->peer_use_count; 1494 } 1495 1496 write_lock_bh(&arm_state->susp_res_lock); 1497 if (!arm_state->videocore_use_count || !(*entity_uc)) { 1498 /* Don't use BUG_ON - don't allow user thread to crash kernel */ 1499 WARN_ON(!arm_state->videocore_use_count); 1500 WARN_ON(!(*entity_uc)); 1501 ret = -EINVAL; 1502 goto unlock; 1503 } 1504 --arm_state->videocore_use_count; 1505 --(*entity_uc); 1506 1507 vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity, 1508 *entity_uc, arm_state->videocore_use_count); 1509 1510 unlock: 1511 write_unlock_bh(&arm_state->susp_res_lock); 1512 1513 out: 1514 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 1515 return ret; 1516 } 1517 1518 void 1519 vchiq_on_remote_use(struct vchiq_state *state) 1520 { 1521 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1522 1523 atomic_inc(&arm_state->ka_use_count); 1524 complete(&arm_state->ka_evt); 1525 } 1526 1527 void 1528 vchiq_on_remote_release(struct vchiq_state *state) 1529 { 1530 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1531 1532 atomic_inc(&arm_state->ka_release_count); 1533 complete(&arm_state->ka_evt); 1534 } 1535 1536 int 1537 vchiq_use_service_internal(struct vchiq_service *service) 1538 { 1539 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); 1540 } 1541 1542 int 1543 vchiq_release_service_internal(struct vchiq_service *service) 1544 { 1545 return vchiq_release_internal(service->state, service); 1546 } 1547 1548 struct vchiq_debugfs_node * 1549 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance) 1550 { 1551 return &instance->debugfs_node; 1552 } 1553 1554 int 1555 vchiq_instance_get_use_count(struct vchiq_instance *instance) 1556 { 1557 struct vchiq_service *service; 1558 int use_count = 0, i; 1559 1560 i = 0; 1561 rcu_read_lock(); 1562 while ((service = __next_service_by_instance(instance->state, 1563 instance, &i))) 1564 use_count += service->service_use_count; 1565 rcu_read_unlock(); 1566 return use_count; 1567 } 1568 1569 int 1570 vchiq_instance_get_pid(struct vchiq_instance *instance) 1571 { 1572 return instance->pid; 1573 } 1574 1575 int 1576 vchiq_instance_get_trace(struct vchiq_instance *instance) 1577 { 1578 return instance->trace; 1579 } 1580 1581 void 1582 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace) 1583 { 1584 struct vchiq_service *service; 1585 int i; 1586 1587 i = 0; 1588 rcu_read_lock(); 1589 while ((service = __next_service_by_instance(instance->state, 1590 instance, &i))) 1591 service->trace = trace; 1592 rcu_read_unlock(); 1593 instance->trace = (trace != 0); 1594 } 1595 1596 int 1597 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle) 1598 { 1599 int ret = -EINVAL; 1600 struct vchiq_service *service = find_service_by_handle(instance, handle); 1601 1602 if (service) { 1603 ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); 1604 vchiq_service_put(service); 1605 } 1606 return ret; 1607 } 1608 EXPORT_SYMBOL(vchiq_use_service); 1609 1610 int 1611 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle) 1612 { 1613 int ret = -EINVAL; 1614 struct vchiq_service *service = find_service_by_handle(instance, handle); 1615 1616 if (service) { 1617 ret = vchiq_release_internal(service->state, service); 1618 vchiq_service_put(service); 1619 } 1620 return ret; 1621 } 1622 EXPORT_SYMBOL(vchiq_release_service); 1623 1624 struct service_data_struct { 1625 int fourcc; 1626 int clientid; 1627 int use_count; 1628 }; 1629 1630 void 1631 vchiq_dump_service_use_state(struct vchiq_state *state) 1632 { 1633 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1634 struct service_data_struct *service_data; 1635 int i, found = 0; 1636 /* 1637 * If there's more than 64 services, only dump ones with 1638 * non-zero counts 1639 */ 1640 int only_nonzero = 0; 1641 static const char *nz = "<-- preventing suspend"; 1642 1643 int peer_count; 1644 int vc_use_count; 1645 int active_services; 1646 1647 if (!arm_state) 1648 return; 1649 1650 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data), 1651 GFP_KERNEL); 1652 if (!service_data) 1653 return; 1654 1655 read_lock_bh(&arm_state->susp_res_lock); 1656 peer_count = arm_state->peer_use_count; 1657 vc_use_count = arm_state->videocore_use_count; 1658 active_services = state->unused_service; 1659 if (active_services > MAX_SERVICES) 1660 only_nonzero = 1; 1661 1662 rcu_read_lock(); 1663 for (i = 0; i < active_services; i++) { 1664 struct vchiq_service *service_ptr = 1665 rcu_dereference(state->services[i]); 1666 1667 if (!service_ptr) 1668 continue; 1669 1670 if (only_nonzero && !service_ptr->service_use_count) 1671 continue; 1672 1673 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE) 1674 continue; 1675 1676 service_data[found].fourcc = service_ptr->base.fourcc; 1677 service_data[found].clientid = service_ptr->client_id; 1678 service_data[found].use_count = service_ptr->service_use_count; 1679 found++; 1680 if (found >= MAX_SERVICES) 1681 break; 1682 } 1683 rcu_read_unlock(); 1684 1685 read_unlock_bh(&arm_state->susp_res_lock); 1686 1687 if (only_nonzero) 1688 vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count", 1689 active_services, found); 1690 1691 for (i = 0; i < found; i++) { 1692 vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s", 1693 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc), 1694 service_data[i].clientid, service_data[i].use_count, 1695 service_data[i].use_count ? nz : ""); 1696 } 1697 vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count); 1698 vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d", 1699 vc_use_count); 1700 1701 kfree(service_data); 1702 } 1703 1704 int 1705 vchiq_check_service(struct vchiq_service *service) 1706 { 1707 struct vchiq_arm_state *arm_state; 1708 int ret = -EINVAL; 1709 1710 if (!service || !service->state) 1711 goto out; 1712 1713 arm_state = vchiq_platform_get_arm_state(service->state); 1714 1715 read_lock_bh(&arm_state->susp_res_lock); 1716 if (service->service_use_count) 1717 ret = 0; 1718 read_unlock_bh(&arm_state->susp_res_lock); 1719 1720 if (ret) { 1721 vchiq_log_error(vchiq_susp_log_level, 1722 "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__, 1723 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id, 1724 service->service_use_count, arm_state->videocore_use_count); 1725 vchiq_dump_service_use_state(service->state); 1726 } 1727 out: 1728 return ret; 1729 } 1730 1731 void vchiq_platform_conn_state_changed(struct vchiq_state *state, 1732 enum vchiq_connstate oldstate, 1733 enum vchiq_connstate newstate) 1734 { 1735 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); 1736 char threadname[16]; 1737 1738 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id, 1739 get_conn_state_name(oldstate), get_conn_state_name(newstate)); 1740 if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED) 1741 return; 1742 1743 write_lock_bh(&arm_state->susp_res_lock); 1744 if (arm_state->first_connect) { 1745 write_unlock_bh(&arm_state->susp_res_lock); 1746 return; 1747 } 1748 1749 arm_state->first_connect = 1; 1750 write_unlock_bh(&arm_state->susp_res_lock); 1751 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", 1752 state->id); 1753 arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, 1754 (void *)state, 1755 threadname); 1756 if (IS_ERR(arm_state->ka_thread)) { 1757 vchiq_log_error(vchiq_susp_log_level, 1758 "vchiq: FATAL: couldn't create thread %s", 1759 threadname); 1760 } else { 1761 wake_up_process(arm_state->ka_thread); 1762 } 1763 } 1764 1765 static const struct of_device_id vchiq_of_match[] = { 1766 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata }, 1767 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata }, 1768 {}, 1769 }; 1770 MODULE_DEVICE_TABLE(of, vchiq_of_match); 1771 1772 static struct platform_device * 1773 vchiq_register_child(struct platform_device *pdev, const char *name) 1774 { 1775 struct platform_device_info pdevinfo; 1776 struct platform_device *child; 1777 1778 memset(&pdevinfo, 0, sizeof(pdevinfo)); 1779 1780 pdevinfo.parent = &pdev->dev; 1781 pdevinfo.name = name; 1782 pdevinfo.id = PLATFORM_DEVID_NONE; 1783 pdevinfo.dma_mask = DMA_BIT_MASK(32); 1784 1785 child = platform_device_register_full(&pdevinfo); 1786 if (IS_ERR(child)) { 1787 dev_warn(&pdev->dev, "%s not registered\n", name); 1788 child = NULL; 1789 } 1790 1791 return child; 1792 } 1793 1794 static int vchiq_probe(struct platform_device *pdev) 1795 { 1796 struct device_node *fw_node; 1797 const struct of_device_id *of_id; 1798 struct vchiq_drvdata *drvdata; 1799 int err; 1800 1801 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); 1802 drvdata = (struct vchiq_drvdata *)of_id->data; 1803 if (!drvdata) 1804 return -EINVAL; 1805 1806 fw_node = of_find_compatible_node(NULL, NULL, 1807 "raspberrypi,bcm2835-firmware"); 1808 if (!fw_node) { 1809 dev_err(&pdev->dev, "Missing firmware node\n"); 1810 return -ENOENT; 1811 } 1812 1813 drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node); 1814 of_node_put(fw_node); 1815 if (!drvdata->fw) 1816 return -EPROBE_DEFER; 1817 1818 platform_set_drvdata(pdev, drvdata); 1819 1820 err = vchiq_platform_init(pdev, &g_state); 1821 if (err) 1822 goto failed_platform_init; 1823 1824 vchiq_debugfs_init(); 1825 1826 vchiq_log_info(vchiq_arm_log_level, 1827 "vchiq: platform initialised - version %d (min %d)", 1828 VCHIQ_VERSION, VCHIQ_VERSION_MIN); 1829 1830 /* 1831 * Simply exit on error since the function handles cleanup in 1832 * cases of failure. 1833 */ 1834 err = vchiq_register_chrdev(&pdev->dev); 1835 if (err) { 1836 vchiq_log_warning(vchiq_arm_log_level, 1837 "Failed to initialize vchiq cdev"); 1838 goto error_exit; 1839 } 1840 1841 bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera"); 1842 bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio"); 1843 1844 return 0; 1845 1846 failed_platform_init: 1847 vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform"); 1848 error_exit: 1849 return err; 1850 } 1851 1852 static void vchiq_remove(struct platform_device *pdev) 1853 { 1854 platform_device_unregister(bcm2835_audio); 1855 platform_device_unregister(bcm2835_camera); 1856 vchiq_debugfs_deinit(); 1857 vchiq_deregister_chrdev(); 1858 } 1859 1860 static struct platform_driver vchiq_driver = { 1861 .driver = { 1862 .name = "bcm2835_vchiq", 1863 .of_match_table = vchiq_of_match, 1864 }, 1865 .probe = vchiq_probe, 1866 .remove_new = vchiq_remove, 1867 }; 1868 1869 static int __init vchiq_driver_init(void) 1870 { 1871 int ret; 1872 1873 ret = platform_driver_register(&vchiq_driver); 1874 if (ret) 1875 pr_err("Failed to register vchiq driver\n"); 1876 1877 return ret; 1878 } 1879 module_init(vchiq_driver_init); 1880 1881 static void __exit vchiq_driver_exit(void) 1882 { 1883 platform_driver_unregister(&vchiq_driver); 1884 } 1885 module_exit(vchiq_driver_exit); 1886 1887 MODULE_LICENSE("Dual BSD/GPL"); 1888 MODULE_DESCRIPTION("Videocore VCHIQ driver"); 1889 MODULE_AUTHOR("Broadcom Corporation"); 1890