1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32 
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_debugfs.h"
37 #include "vchiq_connected.h"
38 #include "vchiq_pagelist.h"
39 
40 #define DEVICE_NAME "vchiq"
41 
42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
43 
44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
45 
46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
48 
49 #define BELL0	0x00
50 #define BELL2	0x08
51 
52 #define ARM_DS_ACTIVE	BIT(2)
53 
54 /* Override the default prefix, which would be vchiq_arm (from the filename) */
55 #undef MODULE_PARAM_PREFIX
56 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
57 
58 #define KEEPALIVE_VER 1
59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
60 
61 /* Run time control of log level, based on KERN_XXX level. */
62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
64 
65 DEFINE_SPINLOCK(msg_queue_spinlock);
66 struct vchiq_state g_state;
67 
68 static struct platform_device *bcm2835_camera;
69 static struct platform_device *bcm2835_audio;
70 
71 struct vchiq_drvdata {
72 	const unsigned int cache_line_size;
73 	struct rpi_firmware *fw;
74 };
75 
76 static struct vchiq_drvdata bcm2835_drvdata = {
77 	.cache_line_size = 32,
78 };
79 
80 static struct vchiq_drvdata bcm2836_drvdata = {
81 	.cache_line_size = 64,
82 };
83 
84 struct vchiq_arm_state {
85 	/* Keepalive-related data */
86 	struct task_struct *ka_thread;
87 	struct completion ka_evt;
88 	atomic_t ka_use_count;
89 	atomic_t ka_use_ack_count;
90 	atomic_t ka_release_count;
91 
92 	rwlock_t susp_res_lock;
93 
94 	struct vchiq_state *state;
95 
96 	/*
97 	 * Global use count for videocore.
98 	 * This is equal to the sum of the use counts for all services.  When
99 	 * this hits zero the videocore suspend procedure will be initiated.
100 	 */
101 	int videocore_use_count;
102 
103 	/*
104 	 * Use count to track requests from videocore peer.
105 	 * This use count is not associated with a service, so needs to be
106 	 * tracked separately with the state.
107 	 */
108 	int peer_use_count;
109 
110 	/*
111 	 * Flag to indicate that the first vchiq connect has made it through.
112 	 * This means that both sides should be fully ready, and we should
113 	 * be able to suspend after this point.
114 	 */
115 	int first_connect;
116 };
117 
118 struct vchiq_2835_state {
119 	int inited;
120 	struct vchiq_arm_state arm_state;
121 };
122 
123 struct vchiq_pagelist_info {
124 	struct pagelist *pagelist;
125 	size_t pagelist_buffer_size;
126 	dma_addr_t dma_addr;
127 	enum dma_data_direction dma_dir;
128 	unsigned int num_pages;
129 	unsigned int pages_need_release;
130 	struct page **pages;
131 	struct scatterlist *scatterlist;
132 	unsigned int scatterlist_mapped;
133 };
134 
135 static void __iomem *g_regs;
136 /* This value is the size of the L2 cache lines as understood by the
137  * VPU firmware, which determines the required alignment of the
138  * offsets/sizes in pagelists.
139  *
140  * Modern VPU firmware looks for a DT "cache-line-size" property in
141  * the VCHIQ node and will overwrite it with the actual L2 cache size,
142  * which the kernel must then respect.  That property was rejected
143  * upstream, so we have to use the VPU firmware's compatibility value
144  * of 32.
145  */
146 static unsigned int g_cache_line_size = 32;
147 static unsigned int g_fragments_size;
148 static char *g_fragments_base;
149 static char *g_free_fragments;
150 static struct semaphore g_free_fragments_sema;
151 static struct device *g_dev;
152 
153 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
154 
155 static enum vchiq_status
156 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
157 			     unsigned int size, enum vchiq_bulk_dir dir);
158 
159 static irqreturn_t
160 vchiq_doorbell_irq(int irq, void *dev_id)
161 {
162 	struct vchiq_state *state = dev_id;
163 	irqreturn_t ret = IRQ_NONE;
164 	unsigned int status;
165 
166 	/* Read (and clear) the doorbell */
167 	status = readl(g_regs + BELL0);
168 
169 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
170 		remote_event_pollall(state);
171 		ret = IRQ_HANDLED;
172 	}
173 
174 	return ret;
175 }
176 
177 static void
178 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
179 {
180 	if (pagelistinfo->scatterlist_mapped) {
181 		dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
182 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
183 	}
184 
185 	if (pagelistinfo->pages_need_release)
186 		unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
187 
188 	dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
189 			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
190 }
191 
192 /* There is a potential problem with partial cache lines (pages?)
193  * at the ends of the block when reading. If the CPU accessed anything in
194  * the same line (page?) then it may have pulled old data into the cache,
195  * obscuring the new data underneath. We can solve this by transferring the
196  * partial cache lines separately, and allowing the ARM to copy into the
197  * cached area.
198  */
199 
200 static struct vchiq_pagelist_info *
201 create_pagelist(char *buf, char __user *ubuf,
202 		size_t count, unsigned short type)
203 {
204 	struct pagelist *pagelist;
205 	struct vchiq_pagelist_info *pagelistinfo;
206 	struct page **pages;
207 	u32 *addrs;
208 	unsigned int num_pages, offset, i, k;
209 	int actual_pages;
210 	size_t pagelist_size;
211 	struct scatterlist *scatterlist, *sg;
212 	int dma_buffers;
213 	dma_addr_t dma_addr;
214 
215 	if (count >= INT_MAX - PAGE_SIZE)
216 		return NULL;
217 
218 	if (buf)
219 		offset = (uintptr_t)buf & (PAGE_SIZE - 1);
220 	else
221 		offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
222 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
223 
224 	if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
225 			 sizeof(struct vchiq_pagelist_info)) /
226 			(sizeof(u32) + sizeof(pages[0]) +
227 			 sizeof(struct scatterlist)))
228 		return NULL;
229 
230 	pagelist_size = sizeof(struct pagelist) +
231 			(num_pages * sizeof(u32)) +
232 			(num_pages * sizeof(pages[0]) +
233 			(num_pages * sizeof(struct scatterlist))) +
234 			sizeof(struct vchiq_pagelist_info);
235 
236 	/* Allocate enough storage to hold the page pointers and the page
237 	 * list
238 	 */
239 	pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
240 				      GFP_KERNEL);
241 
242 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
243 
244 	if (!pagelist)
245 		return NULL;
246 
247 	addrs		= pagelist->addrs;
248 	pages		= (struct page **)(addrs + num_pages);
249 	scatterlist	= (struct scatterlist *)(pages + num_pages);
250 	pagelistinfo	= (struct vchiq_pagelist_info *)
251 			  (scatterlist + num_pages);
252 
253 	pagelist->length = count;
254 	pagelist->type = type;
255 	pagelist->offset = offset;
256 
257 	/* Populate the fields of the pagelistinfo structure */
258 	pagelistinfo->pagelist = pagelist;
259 	pagelistinfo->pagelist_buffer_size = pagelist_size;
260 	pagelistinfo->dma_addr = dma_addr;
261 	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
262 				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
263 	pagelistinfo->num_pages = num_pages;
264 	pagelistinfo->pages_need_release = 0;
265 	pagelistinfo->pages = pages;
266 	pagelistinfo->scatterlist = scatterlist;
267 	pagelistinfo->scatterlist_mapped = 0;
268 
269 	if (buf) {
270 		unsigned long length = count;
271 		unsigned int off = offset;
272 
273 		for (actual_pages = 0; actual_pages < num_pages;
274 		     actual_pages++) {
275 			struct page *pg =
276 				vmalloc_to_page((buf +
277 						 (actual_pages * PAGE_SIZE)));
278 			size_t bytes = PAGE_SIZE - off;
279 
280 			if (!pg) {
281 				cleanup_pagelistinfo(pagelistinfo);
282 				return NULL;
283 			}
284 
285 			if (bytes > length)
286 				bytes = length;
287 			pages[actual_pages] = pg;
288 			length -= bytes;
289 			off = 0;
290 		}
291 		/* do not try and release vmalloc pages */
292 	} else {
293 		actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
294 						   type == PAGELIST_READ, pages);
295 
296 		if (actual_pages != num_pages) {
297 			vchiq_log_info(vchiq_arm_log_level,
298 				       "%s - only %d/%d pages locked",
299 				       __func__, actual_pages, num_pages);
300 
301 			/* This is probably due to the process being killed */
302 			if (actual_pages > 0)
303 				unpin_user_pages(pages, actual_pages);
304 			cleanup_pagelistinfo(pagelistinfo);
305 			return NULL;
306 		}
307 		 /* release user pages */
308 		pagelistinfo->pages_need_release = 1;
309 	}
310 
311 	/*
312 	 * Initialize the scatterlist so that the magic cookie
313 	 *  is filled if debugging is enabled
314 	 */
315 	sg_init_table(scatterlist, num_pages);
316 	/* Now set the pages for each scatterlist */
317 	for (i = 0; i < num_pages; i++)	{
318 		unsigned int len = PAGE_SIZE - offset;
319 
320 		if (len > count)
321 			len = count;
322 		sg_set_page(scatterlist + i, pages[i], len, offset);
323 		offset = 0;
324 		count -= len;
325 	}
326 
327 	dma_buffers = dma_map_sg(g_dev,
328 				 scatterlist,
329 				 num_pages,
330 				 pagelistinfo->dma_dir);
331 
332 	if (dma_buffers == 0) {
333 		cleanup_pagelistinfo(pagelistinfo);
334 		return NULL;
335 	}
336 
337 	pagelistinfo->scatterlist_mapped = 1;
338 
339 	/* Combine adjacent blocks for performance */
340 	k = 0;
341 	for_each_sg(scatterlist, sg, dma_buffers, i) {
342 		u32 len = sg_dma_len(sg);
343 		u32 addr = sg_dma_address(sg);
344 
345 		/* Note: addrs is the address + page_count - 1
346 		 * The firmware expects blocks after the first to be page-
347 		 * aligned and a multiple of the page size
348 		 */
349 		WARN_ON(len == 0);
350 		WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
351 		WARN_ON(i && (addr & ~PAGE_MASK));
352 		if (k > 0 &&
353 		    ((addrs[k - 1] & PAGE_MASK) +
354 		     (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
355 		    == (addr & PAGE_MASK))
356 			addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
357 		else
358 			addrs[k++] = (addr & PAGE_MASK) |
359 				(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
360 	}
361 
362 	/* Partial cache lines (fragments) require special measures */
363 	if ((type == PAGELIST_READ) &&
364 	    ((pagelist->offset & (g_cache_line_size - 1)) ||
365 	    ((pagelist->offset + pagelist->length) &
366 	    (g_cache_line_size - 1)))) {
367 		char *fragments;
368 
369 		if (down_interruptible(&g_free_fragments_sema)) {
370 			cleanup_pagelistinfo(pagelistinfo);
371 			return NULL;
372 		}
373 
374 		WARN_ON(!g_free_fragments);
375 
376 		down(&g_free_fragments_mutex);
377 		fragments = g_free_fragments;
378 		WARN_ON(!fragments);
379 		g_free_fragments = *(char **)g_free_fragments;
380 		up(&g_free_fragments_mutex);
381 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
382 			(fragments - g_fragments_base) / g_fragments_size;
383 	}
384 
385 	return pagelistinfo;
386 }
387 
388 static void
389 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
390 	      int actual)
391 {
392 	struct pagelist *pagelist = pagelistinfo->pagelist;
393 	struct page **pages = pagelistinfo->pages;
394 	unsigned int num_pages = pagelistinfo->num_pages;
395 
396 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
397 			__func__, pagelistinfo->pagelist, actual);
398 
399 	/*
400 	 * NOTE: dma_unmap_sg must be called before the
401 	 * cpu can touch any of the data/pages.
402 	 */
403 	dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
404 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
405 	pagelistinfo->scatterlist_mapped = 0;
406 
407 	/* Deal with any partial cache lines (fragments) */
408 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
409 		char *fragments = g_fragments_base +
410 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
411 			g_fragments_size;
412 		int head_bytes, tail_bytes;
413 
414 		head_bytes = (g_cache_line_size - pagelist->offset) &
415 			(g_cache_line_size - 1);
416 		tail_bytes = (pagelist->offset + actual) &
417 			(g_cache_line_size - 1);
418 
419 		if ((actual >= 0) && (head_bytes != 0)) {
420 			if (head_bytes > actual)
421 				head_bytes = actual;
422 
423 			memcpy((char *)kmap(pages[0]) +
424 				pagelist->offset,
425 				fragments,
426 				head_bytes);
427 			kunmap(pages[0]);
428 		}
429 		if ((actual >= 0) && (head_bytes < actual) &&
430 		    (tail_bytes != 0)) {
431 			memcpy((char *)kmap(pages[num_pages - 1]) +
432 				((pagelist->offset + actual) &
433 				(PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
434 				fragments + g_cache_line_size,
435 				tail_bytes);
436 			kunmap(pages[num_pages - 1]);
437 		}
438 
439 		down(&g_free_fragments_mutex);
440 		*(char **)fragments = g_free_fragments;
441 		g_free_fragments = fragments;
442 		up(&g_free_fragments_mutex);
443 		up(&g_free_fragments_sema);
444 	}
445 
446 	/* Need to mark all the pages dirty. */
447 	if (pagelist->type != PAGELIST_WRITE &&
448 	    pagelistinfo->pages_need_release) {
449 		unsigned int i;
450 
451 		for (i = 0; i < num_pages; i++)
452 			set_page_dirty(pages[i]);
453 	}
454 
455 	cleanup_pagelistinfo(pagelistinfo);
456 }
457 
458 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
459 {
460 	struct device *dev = &pdev->dev;
461 	struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
462 	struct rpi_firmware *fw = drvdata->fw;
463 	struct vchiq_slot_zero *vchiq_slot_zero;
464 	void *slot_mem;
465 	dma_addr_t slot_phys;
466 	u32 channelbase;
467 	int slot_mem_size, frag_mem_size;
468 	int err, irq, i;
469 
470 	/*
471 	 * VCHI messages between the CPU and firmware use
472 	 * 32-bit bus addresses.
473 	 */
474 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
475 
476 	if (err < 0)
477 		return err;
478 
479 	g_cache_line_size = drvdata->cache_line_size;
480 	g_fragments_size = 2 * g_cache_line_size;
481 
482 	/* Allocate space for the channels in coherent memory */
483 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
484 	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
485 
486 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
487 				       &slot_phys, GFP_KERNEL);
488 	if (!slot_mem) {
489 		dev_err(dev, "could not allocate DMA memory\n");
490 		return -ENOMEM;
491 	}
492 
493 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
494 
495 	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
496 	if (!vchiq_slot_zero)
497 		return -EINVAL;
498 
499 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
500 		(int)slot_phys + slot_mem_size;
501 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
502 		MAX_FRAGMENTS;
503 
504 	g_fragments_base = (char *)slot_mem + slot_mem_size;
505 
506 	g_free_fragments = g_fragments_base;
507 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
508 		*(char **)&g_fragments_base[i * g_fragments_size] =
509 			&g_fragments_base[(i + 1) * g_fragments_size];
510 	}
511 	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
512 	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
513 
514 	err = vchiq_init_state(state, vchiq_slot_zero);
515 	if (err)
516 		return err;
517 
518 	g_regs = devm_platform_ioremap_resource(pdev, 0);
519 	if (IS_ERR(g_regs))
520 		return PTR_ERR(g_regs);
521 
522 	irq = platform_get_irq(pdev, 0);
523 	if (irq <= 0)
524 		return irq;
525 
526 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
527 			       "VCHIQ doorbell", state);
528 	if (err) {
529 		dev_err(dev, "failed to register irq=%d\n", irq);
530 		return err;
531 	}
532 
533 	/* Send the base address of the slots to VideoCore */
534 	channelbase = slot_phys;
535 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
536 				    &channelbase, sizeof(channelbase));
537 	if (err || channelbase) {
538 		dev_err(dev, "failed to set channelbase\n");
539 		return err ? : -ENXIO;
540 	}
541 
542 	g_dev = dev;
543 	vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
544 		       vchiq_slot_zero, &slot_phys);
545 
546 	vchiq_call_connected_callbacks();
547 
548 	return 0;
549 }
550 
551 static void
552 vchiq_arm_init_state(struct vchiq_state *state,
553 		     struct vchiq_arm_state *arm_state)
554 {
555 	if (arm_state) {
556 		rwlock_init(&arm_state->susp_res_lock);
557 
558 		init_completion(&arm_state->ka_evt);
559 		atomic_set(&arm_state->ka_use_count, 0);
560 		atomic_set(&arm_state->ka_use_ack_count, 0);
561 		atomic_set(&arm_state->ka_release_count, 0);
562 
563 		arm_state->state = state;
564 		arm_state->first_connect = 0;
565 	}
566 }
567 
568 int
569 vchiq_platform_init_state(struct vchiq_state *state)
570 {
571 	struct vchiq_2835_state *platform_state;
572 
573 	state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
574 	if (!state->platform_state)
575 		return -ENOMEM;
576 
577 	platform_state = (struct vchiq_2835_state *)state->platform_state;
578 
579 	platform_state->inited = 1;
580 	vchiq_arm_init_state(state, &platform_state->arm_state);
581 
582 	return 0;
583 }
584 
585 struct vchiq_arm_state*
586 vchiq_platform_get_arm_state(struct vchiq_state *state)
587 {
588 	struct vchiq_2835_state *platform_state;
589 
590 	platform_state   = (struct vchiq_2835_state *)state->platform_state;
591 
592 	WARN_ON_ONCE(!platform_state->inited);
593 
594 	return &platform_state->arm_state;
595 }
596 
597 void
598 remote_event_signal(struct remote_event *event)
599 {
600 	wmb();
601 
602 	event->fired = 1;
603 
604 	dsb(sy);         /* data barrier operation */
605 
606 	if (event->armed)
607 		writel(0, g_regs + BELL2); /* trigger vc interrupt */
608 }
609 
610 int
611 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
612 			void __user *uoffset, int size, int dir)
613 {
614 	struct vchiq_pagelist_info *pagelistinfo;
615 
616 	pagelistinfo = create_pagelist(offset, uoffset, size,
617 				       (dir == VCHIQ_BULK_RECEIVE)
618 				       ? PAGELIST_READ
619 				       : PAGELIST_WRITE);
620 
621 	if (!pagelistinfo)
622 		return -ENOMEM;
623 
624 	bulk->data = pagelistinfo->dma_addr;
625 
626 	/*
627 	 * Store the pagelistinfo address in remote_data,
628 	 * which isn't used by the slave.
629 	 */
630 	bulk->remote_data = pagelistinfo;
631 
632 	return 0;
633 }
634 
635 void
636 vchiq_complete_bulk(struct vchiq_bulk *bulk)
637 {
638 	if (bulk && bulk->remote_data && bulk->actual)
639 		free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
640 			      bulk->actual);
641 }
642 
643 int vchiq_dump_platform_state(void *dump_context)
644 {
645 	char buf[80];
646 	int len;
647 
648 	len = snprintf(buf, sizeof(buf), "  Platform: 2835 (VC master)");
649 	return vchiq_dump(dump_context, buf, len + 1);
650 }
651 
652 #define VCHIQ_INIT_RETRIES 10
653 int vchiq_initialise(struct vchiq_instance **instance_out)
654 {
655 	struct vchiq_state *state;
656 	struct vchiq_instance *instance = NULL;
657 	int i, ret;
658 
659 	/*
660 	 * VideoCore may not be ready due to boot up timing.
661 	 * It may never be ready if kernel and firmware are mismatched,so don't
662 	 * block forever.
663 	 */
664 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
665 		state = vchiq_get_state();
666 		if (state)
667 			break;
668 		usleep_range(500, 600);
669 	}
670 	if (i == VCHIQ_INIT_RETRIES) {
671 		vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
672 		ret = -ENOTCONN;
673 		goto failed;
674 	} else if (i > 0) {
675 		vchiq_log_warning(vchiq_core_log_level,
676 				  "%s: videocore initialized after %d retries\n", __func__, i);
677 	}
678 
679 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
680 	if (!instance) {
681 		vchiq_log_error(vchiq_core_log_level,
682 				"%s: error allocating vchiq instance\n", __func__);
683 		ret = -ENOMEM;
684 		goto failed;
685 	}
686 
687 	instance->connected = 0;
688 	instance->state = state;
689 	mutex_init(&instance->bulk_waiter_list_mutex);
690 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
691 
692 	*instance_out = instance;
693 
694 	ret = 0;
695 
696 failed:
697 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
698 
699 	return ret;
700 }
701 EXPORT_SYMBOL(vchiq_initialise);
702 
703 void free_bulk_waiter(struct vchiq_instance *instance)
704 {
705 	struct bulk_waiter_node *waiter, *next;
706 
707 	list_for_each_entry_safe(waiter, next,
708 				 &instance->bulk_waiter_list, list) {
709 		list_del(&waiter->list);
710 		vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
711 			       waiter, waiter->pid);
712 		kfree(waiter);
713 	}
714 }
715 
716 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
717 {
718 	enum vchiq_status status = VCHIQ_SUCCESS;
719 	struct vchiq_state *state = instance->state;
720 
721 	if (mutex_lock_killable(&state->mutex))
722 		return VCHIQ_RETRY;
723 
724 	/* Remove all services */
725 	vchiq_shutdown_internal(state, instance);
726 
727 	mutex_unlock(&state->mutex);
728 
729 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
730 
731 	free_bulk_waiter(instance);
732 	kfree(instance);
733 
734 	return status;
735 }
736 EXPORT_SYMBOL(vchiq_shutdown);
737 
738 static int vchiq_is_connected(struct vchiq_instance *instance)
739 {
740 	return instance->connected;
741 }
742 
743 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
744 {
745 	enum vchiq_status status;
746 	struct vchiq_state *state = instance->state;
747 
748 	if (mutex_lock_killable(&state->mutex)) {
749 		vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
750 		status = VCHIQ_RETRY;
751 		goto failed;
752 	}
753 	status = vchiq_connect_internal(state, instance);
754 
755 	if (status == VCHIQ_SUCCESS)
756 		instance->connected = 1;
757 
758 	mutex_unlock(&state->mutex);
759 
760 failed:
761 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
762 
763 	return status;
764 }
765 EXPORT_SYMBOL(vchiq_connect);
766 
767 static enum vchiq_status
768 vchiq_add_service(struct vchiq_instance *instance,
769 		  const struct vchiq_service_params_kernel *params,
770 		  unsigned int *phandle)
771 {
772 	enum vchiq_status status;
773 	struct vchiq_state *state = instance->state;
774 	struct vchiq_service *service = NULL;
775 	int srvstate;
776 
777 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
778 
779 	srvstate = vchiq_is_connected(instance)
780 		? VCHIQ_SRVSTATE_LISTENING
781 		: VCHIQ_SRVSTATE_HIDDEN;
782 
783 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
784 
785 	if (service) {
786 		*phandle = service->handle;
787 		status = VCHIQ_SUCCESS;
788 	} else {
789 		status = VCHIQ_ERROR;
790 	}
791 
792 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
793 
794 	return status;
795 }
796 
797 enum vchiq_status
798 vchiq_open_service(struct vchiq_instance *instance,
799 		   const struct vchiq_service_params_kernel *params,
800 		   unsigned int *phandle)
801 {
802 	enum vchiq_status   status = VCHIQ_ERROR;
803 	struct vchiq_state   *state = instance->state;
804 	struct vchiq_service *service = NULL;
805 
806 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
807 
808 	if (!vchiq_is_connected(instance))
809 		goto failed;
810 
811 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
812 
813 	if (service) {
814 		*phandle = service->handle;
815 		status = vchiq_open_service_internal(service, current->pid);
816 		if (status != VCHIQ_SUCCESS) {
817 			vchiq_remove_service(service->handle);
818 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
819 		}
820 	}
821 
822 failed:
823 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
824 
825 	return status;
826 }
827 EXPORT_SYMBOL(vchiq_open_service);
828 
829 enum vchiq_status
830 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
831 		    void *userdata, enum vchiq_bulk_mode mode)
832 {
833 	enum vchiq_status status;
834 
835 	while (1) {
836 		switch (mode) {
837 		case VCHIQ_BULK_MODE_NOCALLBACK:
838 		case VCHIQ_BULK_MODE_CALLBACK:
839 			status = vchiq_bulk_transfer(handle,
840 						     (void *)data, NULL,
841 						     size, userdata, mode,
842 						     VCHIQ_BULK_TRANSMIT);
843 			break;
844 		case VCHIQ_BULK_MODE_BLOCKING:
845 			status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
846 							      VCHIQ_BULK_TRANSMIT);
847 			break;
848 		default:
849 			return VCHIQ_ERROR;
850 		}
851 
852 		/*
853 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
854 		 * to implement a retry mechanism since this function is
855 		 * supposed to block until queued
856 		 */
857 		if (status != VCHIQ_RETRY)
858 			break;
859 
860 		msleep(1);
861 	}
862 
863 	return status;
864 }
865 EXPORT_SYMBOL(vchiq_bulk_transmit);
866 
867 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
868 				     unsigned int size, void *userdata,
869 				     enum vchiq_bulk_mode mode)
870 {
871 	enum vchiq_status status;
872 
873 	while (1) {
874 		switch (mode) {
875 		case VCHIQ_BULK_MODE_NOCALLBACK:
876 		case VCHIQ_BULK_MODE_CALLBACK:
877 			status = vchiq_bulk_transfer(handle, data, NULL,
878 						     size, userdata,
879 						     mode, VCHIQ_BULK_RECEIVE);
880 			break;
881 		case VCHIQ_BULK_MODE_BLOCKING:
882 			status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
883 							      VCHIQ_BULK_RECEIVE);
884 			break;
885 		default:
886 			return VCHIQ_ERROR;
887 		}
888 
889 		/*
890 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
891 		 * to implement a retry mechanism since this function is
892 		 * supposed to block until queued
893 		 */
894 		if (status != VCHIQ_RETRY)
895 			break;
896 
897 		msleep(1);
898 	}
899 
900 	return status;
901 }
902 EXPORT_SYMBOL(vchiq_bulk_receive);
903 
904 static enum vchiq_status
905 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
906 			     enum vchiq_bulk_dir dir)
907 {
908 	struct vchiq_instance *instance;
909 	struct vchiq_service *service;
910 	enum vchiq_status status;
911 	struct bulk_waiter_node *waiter = NULL;
912 	bool found = false;
913 
914 	service = find_service_by_handle(handle);
915 	if (!service)
916 		return VCHIQ_ERROR;
917 
918 	instance = service->instance;
919 
920 	vchiq_service_put(service);
921 
922 	mutex_lock(&instance->bulk_waiter_list_mutex);
923 	list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
924 		if (waiter->pid == current->pid) {
925 			list_del(&waiter->list);
926 			found = true;
927 			break;
928 		}
929 	}
930 	mutex_unlock(&instance->bulk_waiter_list_mutex);
931 
932 	if (found) {
933 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
934 
935 		if (bulk) {
936 			/* This thread has an outstanding bulk transfer. */
937 			/* FIXME: why compare a dma address to a pointer? */
938 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
939 				/*
940 				 * This is not a retry of the previous one.
941 				 * Cancel the signal when the transfer completes.
942 				 */
943 				spin_lock(&bulk_waiter_spinlock);
944 				bulk->userdata = NULL;
945 				spin_unlock(&bulk_waiter_spinlock);
946 			}
947 		}
948 	} else {
949 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
950 		if (!waiter) {
951 			vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
952 			return VCHIQ_ERROR;
953 		}
954 	}
955 
956 	status = vchiq_bulk_transfer(handle, data, NULL, size,
957 				     &waiter->bulk_waiter,
958 				     VCHIQ_BULK_MODE_BLOCKING, dir);
959 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
960 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
961 
962 		if (bulk) {
963 			/* Cancel the signal when the transfer completes. */
964 			spin_lock(&bulk_waiter_spinlock);
965 			bulk->userdata = NULL;
966 			spin_unlock(&bulk_waiter_spinlock);
967 		}
968 		kfree(waiter);
969 	} else {
970 		waiter->pid = current->pid;
971 		mutex_lock(&instance->bulk_waiter_list_mutex);
972 		list_add(&waiter->list, &instance->bulk_waiter_list);
973 		mutex_unlock(&instance->bulk_waiter_list_mutex);
974 		vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
975 			       current->pid);
976 	}
977 
978 	return status;
979 }
980 
981 static enum vchiq_status
982 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
983 	       struct vchiq_header *header, struct user_service *user_service,
984 	       void *bulk_userdata)
985 {
986 	struct vchiq_completion_data_kernel *completion;
987 	int insert;
988 
989 	DEBUG_INITIALISE(g_state.local);
990 
991 	insert = instance->completion_insert;
992 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
993 		/* Out of space - wait for the client */
994 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
995 		vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
996 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
997 		if (wait_for_completion_interruptible(&instance->remove_event)) {
998 			vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
999 			return VCHIQ_RETRY;
1000 		} else if (instance->closing) {
1001 			vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
1002 			return VCHIQ_SUCCESS;
1003 		}
1004 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1005 	}
1006 
1007 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1008 
1009 	completion->header = header;
1010 	completion->reason = reason;
1011 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1012 	completion->service_userdata = user_service->service;
1013 	completion->bulk_userdata = bulk_userdata;
1014 
1015 	if (reason == VCHIQ_SERVICE_CLOSED) {
1016 		/*
1017 		 * Take an extra reference, to be held until
1018 		 * this CLOSED notification is delivered.
1019 		 */
1020 		vchiq_service_get(user_service->service);
1021 		if (instance->use_close_delivered)
1022 			user_service->close_pending = 1;
1023 	}
1024 
1025 	/*
1026 	 * A write barrier is needed here to ensure that the entire completion
1027 	 * record is written out before the insert point.
1028 	 */
1029 	wmb();
1030 
1031 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
1032 		user_service->message_available_pos = insert;
1033 
1034 	insert++;
1035 	instance->completion_insert = insert;
1036 
1037 	complete(&instance->insert_event);
1038 
1039 	return VCHIQ_SUCCESS;
1040 }
1041 
1042 enum vchiq_status
1043 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
1044 		 unsigned int handle, void *bulk_userdata)
1045 {
1046 	/*
1047 	 * How do we ensure the callback goes to the right client?
1048 	 * The service_user data points to a user_service record
1049 	 * containing the original callback and the user state structure, which
1050 	 * contains a circular buffer for completion records.
1051 	 */
1052 	struct user_service *user_service;
1053 	struct vchiq_service *service;
1054 	struct vchiq_instance *instance;
1055 	bool skip_completion = false;
1056 
1057 	DEBUG_INITIALISE(g_state.local);
1058 
1059 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1060 
1061 	service = handle_to_service(handle);
1062 	if (WARN_ON(!service))
1063 		return VCHIQ_SUCCESS;
1064 
1065 	user_service = (struct user_service *)service->base.userdata;
1066 	instance = user_service->instance;
1067 
1068 	if (!instance || instance->closing)
1069 		return VCHIQ_SUCCESS;
1070 
1071 	vchiq_log_trace(vchiq_arm_log_level,
1072 			"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1073 			__func__, (unsigned long)user_service, service->localport,
1074 			user_service->userdata, reason, (unsigned long)header,
1075 			(unsigned long)instance, (unsigned long)bulk_userdata);
1076 
1077 	if (header && user_service->is_vchi) {
1078 		spin_lock(&msg_queue_spinlock);
1079 		while (user_service->msg_insert ==
1080 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
1081 			spin_unlock(&msg_queue_spinlock);
1082 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1083 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1084 			vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
1085 			/*
1086 			 * If there is no MESSAGE_AVAILABLE in the completion
1087 			 * queue, add one
1088 			 */
1089 			if ((user_service->message_available_pos -
1090 				instance->completion_remove) < 0) {
1091 				enum vchiq_status status;
1092 
1093 				vchiq_log_info(vchiq_arm_log_level,
1094 					       "Inserting extra MESSAGE_AVAILABLE");
1095 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1096 				status = add_completion(instance, reason, NULL, user_service,
1097 							bulk_userdata);
1098 				if (status != VCHIQ_SUCCESS) {
1099 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1100 					return status;
1101 				}
1102 			}
1103 
1104 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1105 			if (wait_for_completion_interruptible(&user_service->remove_event)) {
1106 				vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
1107 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1108 				return VCHIQ_RETRY;
1109 			} else if (instance->closing) {
1110 				vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
1111 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1112 				return VCHIQ_ERROR;
1113 			}
1114 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1115 			spin_lock(&msg_queue_spinlock);
1116 		}
1117 
1118 		user_service->msg_queue[user_service->msg_insert &
1119 			(MSG_QUEUE_SIZE - 1)] = header;
1120 		user_service->msg_insert++;
1121 
1122 		/*
1123 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1124 		 * there is a MESSAGE_AVAILABLE in the completion queue then
1125 		 * bypass the completion queue.
1126 		 */
1127 		if (((user_service->message_available_pos -
1128 			instance->completion_remove) >= 0) ||
1129 			user_service->dequeue_pending) {
1130 			user_service->dequeue_pending = 0;
1131 			skip_completion = true;
1132 		}
1133 
1134 		spin_unlock(&msg_queue_spinlock);
1135 		complete(&user_service->insert_event);
1136 
1137 		header = NULL;
1138 	}
1139 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1140 
1141 	if (skip_completion)
1142 		return VCHIQ_SUCCESS;
1143 
1144 	return add_completion(instance, reason, header, user_service,
1145 		bulk_userdata);
1146 }
1147 
1148 int vchiq_dump(void *dump_context, const char *str, int len)
1149 {
1150 	struct dump_context *context = (struct dump_context *)dump_context;
1151 	int copy_bytes;
1152 
1153 	if (context->actual >= context->space)
1154 		return 0;
1155 
1156 	if (context->offset > 0) {
1157 		int skip_bytes = min_t(int, len, context->offset);
1158 
1159 		str += skip_bytes;
1160 		len -= skip_bytes;
1161 		context->offset -= skip_bytes;
1162 		if (context->offset > 0)
1163 			return 0;
1164 	}
1165 	copy_bytes = min_t(int, len, context->space - context->actual);
1166 	if (copy_bytes == 0)
1167 		return 0;
1168 	if (copy_to_user(context->buf + context->actual, str,
1169 			 copy_bytes))
1170 		return -EFAULT;
1171 	context->actual += copy_bytes;
1172 	len -= copy_bytes;
1173 
1174 	/*
1175 	 * If the terminating NUL is included in the length, then it
1176 	 * marks the end of a line and should be replaced with a
1177 	 * carriage return.
1178 	 */
1179 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1180 		char cr = '\n';
1181 
1182 		if (copy_to_user(context->buf + context->actual - 1,
1183 				 &cr, 1))
1184 			return -EFAULT;
1185 	}
1186 	return 0;
1187 }
1188 
1189 int vchiq_dump_platform_instances(void *dump_context)
1190 {
1191 	struct vchiq_state *state = vchiq_get_state();
1192 	char buf[80];
1193 	int len;
1194 	int i;
1195 
1196 	/*
1197 	 * There is no list of instances, so instead scan all services,
1198 	 * marking those that have been dumped.
1199 	 */
1200 
1201 	rcu_read_lock();
1202 	for (i = 0; i < state->unused_service; i++) {
1203 		struct vchiq_service *service;
1204 		struct vchiq_instance *instance;
1205 
1206 		service = rcu_dereference(state->services[i]);
1207 		if (!service || service->base.callback != service_callback)
1208 			continue;
1209 
1210 		instance = service->instance;
1211 		if (instance)
1212 			instance->mark = 0;
1213 	}
1214 	rcu_read_unlock();
1215 
1216 	for (i = 0; i < state->unused_service; i++) {
1217 		struct vchiq_service *service;
1218 		struct vchiq_instance *instance;
1219 		int err;
1220 
1221 		rcu_read_lock();
1222 		service = rcu_dereference(state->services[i]);
1223 		if (!service || service->base.callback != service_callback) {
1224 			rcu_read_unlock();
1225 			continue;
1226 		}
1227 
1228 		instance = service->instance;
1229 		if (!instance || instance->mark) {
1230 			rcu_read_unlock();
1231 			continue;
1232 		}
1233 		rcu_read_unlock();
1234 
1235 		len = snprintf(buf, sizeof(buf),
1236 			       "Instance %pK: pid %d,%s completions %d/%d",
1237 			       instance, instance->pid,
1238 			       instance->connected ? " connected, " :
1239 			       "",
1240 			       instance->completion_insert -
1241 			       instance->completion_remove,
1242 			       MAX_COMPLETIONS);
1243 		err = vchiq_dump(dump_context, buf, len + 1);
1244 		if (err)
1245 			return err;
1246 		instance->mark = 1;
1247 	}
1248 	return 0;
1249 }
1250 
1251 int vchiq_dump_platform_service_state(void *dump_context,
1252 				      struct vchiq_service *service)
1253 {
1254 	struct user_service *user_service =
1255 			(struct user_service *)service->base.userdata;
1256 	char buf[80];
1257 	int len;
1258 
1259 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1260 
1261 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
1262 		len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
1263 				 user_service->msg_insert - user_service->msg_remove,
1264 				 MSG_QUEUE_SIZE);
1265 
1266 		if (user_service->dequeue_pending)
1267 			len += scnprintf(buf + len, sizeof(buf) - len,
1268 				" (dequeue pending)");
1269 	}
1270 
1271 	return vchiq_dump(dump_context, buf, len + 1);
1272 }
1273 
1274 struct vchiq_state *
1275 vchiq_get_state(void)
1276 {
1277 	if (!g_state.remote)
1278 		pr_err("%s: g_state.remote == NULL\n", __func__);
1279 	else if (g_state.remote->initialised != 1)
1280 		pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1281 			  __func__, g_state.remote->initialised);
1282 
1283 	return (g_state.remote &&
1284 		(g_state.remote->initialised == 1)) ? &g_state : NULL;
1285 }
1286 
1287 /*
1288  * Autosuspend related functionality
1289  */
1290 
1291 static enum vchiq_status
1292 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
1293 			       struct vchiq_header *header,
1294 			       unsigned int service_user, void *bulk_user)
1295 {
1296 	vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
1297 	return 0;
1298 }
1299 
1300 static int
1301 vchiq_keepalive_thread_func(void *v)
1302 {
1303 	struct vchiq_state *state = (struct vchiq_state *)v;
1304 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1305 
1306 	enum vchiq_status status;
1307 	struct vchiq_instance *instance;
1308 	unsigned int ka_handle;
1309 	int ret;
1310 
1311 	struct vchiq_service_params_kernel params = {
1312 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1313 		.callback    = vchiq_keepalive_vchiq_callback,
1314 		.version     = KEEPALIVE_VER,
1315 		.version_min = KEEPALIVE_VER_MIN
1316 	};
1317 
1318 	ret = vchiq_initialise(&instance);
1319 	if (ret) {
1320 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
1321 				ret);
1322 		goto exit;
1323 	}
1324 
1325 	status = vchiq_connect(instance);
1326 	if (status != VCHIQ_SUCCESS) {
1327 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
1328 				status);
1329 		goto shutdown;
1330 	}
1331 
1332 	status = vchiq_add_service(instance, &params, &ka_handle);
1333 	if (status != VCHIQ_SUCCESS) {
1334 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
1335 				status);
1336 		goto shutdown;
1337 	}
1338 
1339 	while (1) {
1340 		long rc = 0, uc = 0;
1341 
1342 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1343 			vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
1344 			flush_signals(current);
1345 			continue;
1346 		}
1347 
1348 		/*
1349 		 * read and clear counters.  Do release_count then use_count to
1350 		 * prevent getting more releases than uses
1351 		 */
1352 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
1353 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
1354 
1355 		/*
1356 		 * Call use/release service the requisite number of times.
1357 		 * Process use before release so use counts don't go negative
1358 		 */
1359 		while (uc--) {
1360 			atomic_inc(&arm_state->ka_use_ack_count);
1361 			status = vchiq_use_service(ka_handle);
1362 			if (status != VCHIQ_SUCCESS) {
1363 				vchiq_log_error(vchiq_susp_log_level,
1364 						"%s vchiq_use_service error %d", __func__, status);
1365 			}
1366 		}
1367 		while (rc--) {
1368 			status = vchiq_release_service(ka_handle);
1369 			if (status != VCHIQ_SUCCESS) {
1370 				vchiq_log_error(vchiq_susp_log_level,
1371 						"%s vchiq_release_service error %d", __func__,
1372 						status);
1373 			}
1374 		}
1375 	}
1376 
1377 shutdown:
1378 	vchiq_shutdown(instance);
1379 exit:
1380 	return 0;
1381 }
1382 
1383 int
1384 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1385 		   enum USE_TYPE_E use_type)
1386 {
1387 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1388 	int ret = 0;
1389 	char entity[16];
1390 	int *entity_uc;
1391 	int local_uc;
1392 
1393 	if (!arm_state) {
1394 		ret = -EINVAL;
1395 		goto out;
1396 	}
1397 
1398 	if (use_type == USE_TYPE_VCHIQ) {
1399 		sprintf(entity, "VCHIQ:   ");
1400 		entity_uc = &arm_state->peer_use_count;
1401 	} else if (service) {
1402 		sprintf(entity, "%c%c%c%c:%03d",
1403 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1404 			service->client_id);
1405 		entity_uc = &service->service_use_count;
1406 	} else {
1407 		vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1408 		ret = -EINVAL;
1409 		goto out;
1410 	}
1411 
1412 	write_lock_bh(&arm_state->susp_res_lock);
1413 	local_uc = ++arm_state->videocore_use_count;
1414 	++(*entity_uc);
1415 
1416 	vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1417 			*entity_uc, local_uc);
1418 
1419 	write_unlock_bh(&arm_state->susp_res_lock);
1420 
1421 	if (!ret) {
1422 		enum vchiq_status status = VCHIQ_SUCCESS;
1423 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1424 
1425 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
1426 			/* Send the use notify to videocore */
1427 			status = vchiq_send_remote_use_active(state);
1428 			if (status == VCHIQ_SUCCESS)
1429 				ack_cnt--;
1430 			else
1431 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1432 		}
1433 	}
1434 
1435 out:
1436 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1437 	return ret;
1438 }
1439 
1440 int
1441 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1442 {
1443 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1444 	int ret = 0;
1445 	char entity[16];
1446 	int *entity_uc;
1447 
1448 	if (!arm_state) {
1449 		ret = -EINVAL;
1450 		goto out;
1451 	}
1452 
1453 	if (service) {
1454 		sprintf(entity, "%c%c%c%c:%03d",
1455 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1456 			service->client_id);
1457 		entity_uc = &service->service_use_count;
1458 	} else {
1459 		sprintf(entity, "PEER:   ");
1460 		entity_uc = &arm_state->peer_use_count;
1461 	}
1462 
1463 	write_lock_bh(&arm_state->susp_res_lock);
1464 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1465 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
1466 		WARN_ON(!arm_state->videocore_use_count);
1467 		WARN_ON(!(*entity_uc));
1468 		ret = -EINVAL;
1469 		goto unlock;
1470 	}
1471 	--arm_state->videocore_use_count;
1472 	--(*entity_uc);
1473 
1474 	vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1475 			*entity_uc, arm_state->videocore_use_count);
1476 
1477 unlock:
1478 	write_unlock_bh(&arm_state->susp_res_lock);
1479 
1480 out:
1481 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1482 	return ret;
1483 }
1484 
1485 void
1486 vchiq_on_remote_use(struct vchiq_state *state)
1487 {
1488 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1489 
1490 	atomic_inc(&arm_state->ka_use_count);
1491 	complete(&arm_state->ka_evt);
1492 }
1493 
1494 void
1495 vchiq_on_remote_release(struct vchiq_state *state)
1496 {
1497 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1498 
1499 	atomic_inc(&arm_state->ka_release_count);
1500 	complete(&arm_state->ka_evt);
1501 }
1502 
1503 int
1504 vchiq_use_service_internal(struct vchiq_service *service)
1505 {
1506 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1507 }
1508 
1509 int
1510 vchiq_release_service_internal(struct vchiq_service *service)
1511 {
1512 	return vchiq_release_internal(service->state, service);
1513 }
1514 
1515 struct vchiq_debugfs_node *
1516 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1517 {
1518 	return &instance->debugfs_node;
1519 }
1520 
1521 int
1522 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1523 {
1524 	struct vchiq_service *service;
1525 	int use_count = 0, i;
1526 
1527 	i = 0;
1528 	rcu_read_lock();
1529 	while ((service = __next_service_by_instance(instance->state,
1530 						     instance, &i)))
1531 		use_count += service->service_use_count;
1532 	rcu_read_unlock();
1533 	return use_count;
1534 }
1535 
1536 int
1537 vchiq_instance_get_pid(struct vchiq_instance *instance)
1538 {
1539 	return instance->pid;
1540 }
1541 
1542 int
1543 vchiq_instance_get_trace(struct vchiq_instance *instance)
1544 {
1545 	return instance->trace;
1546 }
1547 
1548 void
1549 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1550 {
1551 	struct vchiq_service *service;
1552 	int i;
1553 
1554 	i = 0;
1555 	rcu_read_lock();
1556 	while ((service = __next_service_by_instance(instance->state,
1557 						     instance, &i)))
1558 		service->trace = trace;
1559 	rcu_read_unlock();
1560 	instance->trace = (trace != 0);
1561 }
1562 
1563 enum vchiq_status
1564 vchiq_use_service(unsigned int handle)
1565 {
1566 	enum vchiq_status ret = VCHIQ_ERROR;
1567 	struct vchiq_service *service = find_service_by_handle(handle);
1568 
1569 	if (service) {
1570 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1571 		vchiq_service_put(service);
1572 	}
1573 	return ret;
1574 }
1575 EXPORT_SYMBOL(vchiq_use_service);
1576 
1577 enum vchiq_status
1578 vchiq_release_service(unsigned int handle)
1579 {
1580 	enum vchiq_status ret = VCHIQ_ERROR;
1581 	struct vchiq_service *service = find_service_by_handle(handle);
1582 
1583 	if (service) {
1584 		ret = vchiq_release_internal(service->state, service);
1585 		vchiq_service_put(service);
1586 	}
1587 	return ret;
1588 }
1589 EXPORT_SYMBOL(vchiq_release_service);
1590 
1591 struct service_data_struct {
1592 	int fourcc;
1593 	int clientid;
1594 	int use_count;
1595 };
1596 
1597 void
1598 vchiq_dump_service_use_state(struct vchiq_state *state)
1599 {
1600 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1601 	struct service_data_struct *service_data;
1602 	int i, found = 0;
1603 	/*
1604 	 * If there's more than 64 services, only dump ones with
1605 	 * non-zero counts
1606 	 */
1607 	int only_nonzero = 0;
1608 	static const char *nz = "<-- preventing suspend";
1609 
1610 	int peer_count;
1611 	int vc_use_count;
1612 	int active_services;
1613 
1614 	if (!arm_state)
1615 		return;
1616 
1617 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1618 				     GFP_KERNEL);
1619 	if (!service_data)
1620 		return;
1621 
1622 	read_lock_bh(&arm_state->susp_res_lock);
1623 	peer_count = arm_state->peer_use_count;
1624 	vc_use_count = arm_state->videocore_use_count;
1625 	active_services = state->unused_service;
1626 	if (active_services > MAX_SERVICES)
1627 		only_nonzero = 1;
1628 
1629 	rcu_read_lock();
1630 	for (i = 0; i < active_services; i++) {
1631 		struct vchiq_service *service_ptr =
1632 			rcu_dereference(state->services[i]);
1633 
1634 		if (!service_ptr)
1635 			continue;
1636 
1637 		if (only_nonzero && !service_ptr->service_use_count)
1638 			continue;
1639 
1640 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1641 			continue;
1642 
1643 		service_data[found].fourcc = service_ptr->base.fourcc;
1644 		service_data[found].clientid = service_ptr->client_id;
1645 		service_data[found].use_count = service_ptr->service_use_count;
1646 		found++;
1647 		if (found >= MAX_SERVICES)
1648 			break;
1649 	}
1650 	rcu_read_unlock();
1651 
1652 	read_unlock_bh(&arm_state->susp_res_lock);
1653 
1654 	if (only_nonzero)
1655 		vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
1656 				  active_services, found);
1657 
1658 	for (i = 0; i < found; i++) {
1659 		vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
1660 				  VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1661 				  service_data[i].clientid, service_data[i].use_count,
1662 				  service_data[i].use_count ? nz : "");
1663 	}
1664 	vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count count %d", peer_count);
1665 	vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
1666 			  vc_use_count);
1667 
1668 	kfree(service_data);
1669 }
1670 
1671 enum vchiq_status
1672 vchiq_check_service(struct vchiq_service *service)
1673 {
1674 	struct vchiq_arm_state *arm_state;
1675 	enum vchiq_status ret = VCHIQ_ERROR;
1676 
1677 	if (!service || !service->state)
1678 		goto out;
1679 
1680 	arm_state = vchiq_platform_get_arm_state(service->state);
1681 
1682 	read_lock_bh(&arm_state->susp_res_lock);
1683 	if (service->service_use_count)
1684 		ret = VCHIQ_SUCCESS;
1685 	read_unlock_bh(&arm_state->susp_res_lock);
1686 
1687 	if (ret == VCHIQ_ERROR) {
1688 		vchiq_log_error(vchiq_susp_log_level,
1689 				"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1690 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
1691 				service->service_use_count, arm_state->videocore_use_count);
1692 		vchiq_dump_service_use_state(service->state);
1693 	}
1694 out:
1695 	return ret;
1696 }
1697 
1698 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1699 				       enum vchiq_connstate oldstate,
1700 				       enum vchiq_connstate newstate)
1701 {
1702 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1703 	char threadname[16];
1704 
1705 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1706 		       get_conn_state_name(oldstate), get_conn_state_name(newstate));
1707 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1708 		return;
1709 
1710 	write_lock_bh(&arm_state->susp_res_lock);
1711 	if (arm_state->first_connect) {
1712 		write_unlock_bh(&arm_state->susp_res_lock);
1713 		return;
1714 	}
1715 
1716 	arm_state->first_connect = 1;
1717 	write_unlock_bh(&arm_state->susp_res_lock);
1718 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1719 		 state->id);
1720 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1721 					      (void *)state,
1722 					      threadname);
1723 	if (IS_ERR(arm_state->ka_thread)) {
1724 		vchiq_log_error(vchiq_susp_log_level,
1725 				"vchiq: FATAL: couldn't create thread %s",
1726 				threadname);
1727 	} else {
1728 		wake_up_process(arm_state->ka_thread);
1729 	}
1730 }
1731 
1732 static const struct of_device_id vchiq_of_match[] = {
1733 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1734 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1735 	{},
1736 };
1737 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1738 
1739 static struct platform_device *
1740 vchiq_register_child(struct platform_device *pdev, const char *name)
1741 {
1742 	struct platform_device_info pdevinfo;
1743 	struct platform_device *child;
1744 
1745 	memset(&pdevinfo, 0, sizeof(pdevinfo));
1746 
1747 	pdevinfo.parent = &pdev->dev;
1748 	pdevinfo.name = name;
1749 	pdevinfo.id = PLATFORM_DEVID_NONE;
1750 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
1751 
1752 	child = platform_device_register_full(&pdevinfo);
1753 	if (IS_ERR(child)) {
1754 		dev_warn(&pdev->dev, "%s not registered\n", name);
1755 		child = NULL;
1756 	}
1757 
1758 	return child;
1759 }
1760 
1761 static int vchiq_probe(struct platform_device *pdev)
1762 {
1763 	struct device_node *fw_node;
1764 	const struct of_device_id *of_id;
1765 	struct vchiq_drvdata *drvdata;
1766 	int err;
1767 
1768 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1769 	drvdata = (struct vchiq_drvdata *)of_id->data;
1770 	if (!drvdata)
1771 		return -EINVAL;
1772 
1773 	fw_node = of_find_compatible_node(NULL, NULL,
1774 					  "raspberrypi,bcm2835-firmware");
1775 	if (!fw_node) {
1776 		dev_err(&pdev->dev, "Missing firmware node\n");
1777 		return -ENOENT;
1778 	}
1779 
1780 	drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1781 	of_node_put(fw_node);
1782 	if (!drvdata->fw)
1783 		return -EPROBE_DEFER;
1784 
1785 	platform_set_drvdata(pdev, drvdata);
1786 
1787 	err = vchiq_platform_init(pdev, &g_state);
1788 	if (err)
1789 		goto failed_platform_init;
1790 
1791 	vchiq_debugfs_init();
1792 
1793 	vchiq_log_info(vchiq_arm_log_level,
1794 		       "vchiq: platform initialised - version %d (min %d)",
1795 		       VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1796 
1797 	/*
1798 	 * Simply exit on error since the function handles cleanup in
1799 	 * cases of failure.
1800 	 */
1801 	err = vchiq_register_chrdev(&pdev->dev);
1802 	if (err) {
1803 		vchiq_log_warning(vchiq_arm_log_level,
1804 				  "Failed to initialize vchiq cdev");
1805 		goto error_exit;
1806 	}
1807 
1808 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1809 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1810 
1811 	return 0;
1812 
1813 failed_platform_init:
1814 	vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1815 error_exit:
1816 	return err;
1817 }
1818 
1819 static int vchiq_remove(struct platform_device *pdev)
1820 {
1821 	platform_device_unregister(bcm2835_audio);
1822 	platform_device_unregister(bcm2835_camera);
1823 	vchiq_debugfs_deinit();
1824 	vchiq_deregister_chrdev();
1825 
1826 	return 0;
1827 }
1828 
1829 static struct platform_driver vchiq_driver = {
1830 	.driver = {
1831 		.name = "bcm2835_vchiq",
1832 		.of_match_table = vchiq_of_match,
1833 	},
1834 	.probe = vchiq_probe,
1835 	.remove = vchiq_remove,
1836 };
1837 
1838 static int __init vchiq_driver_init(void)
1839 {
1840 	int ret;
1841 
1842 	ret = platform_driver_register(&vchiq_driver);
1843 	if (ret)
1844 		pr_err("Failed to register vchiq driver\n");
1845 
1846 	return ret;
1847 }
1848 module_init(vchiq_driver_init);
1849 
1850 static void __exit vchiq_driver_exit(void)
1851 {
1852 	platform_driver_unregister(&vchiq_driver);
1853 }
1854 module_exit(vchiq_driver_exit);
1855 
1856 MODULE_LICENSE("Dual BSD/GPL");
1857 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1858 MODULE_AUTHOR("Broadcom Corporation");
1859