1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32 
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_debugfs.h"
37 #include "vchiq_connected.h"
38 #include "vchiq_pagelist.h"
39 
40 #define DEVICE_NAME "vchiq"
41 
42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
43 
44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
45 
46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
48 
49 #define BELL0	0x00
50 #define BELL2	0x08
51 
52 #define ARM_DS_ACTIVE	BIT(2)
53 
54 /* Override the default prefix, which would be vchiq_arm (from the filename) */
55 #undef MODULE_PARAM_PREFIX
56 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
57 
58 #define KEEPALIVE_VER 1
59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
60 
61 /* Run time control of log level, based on KERN_XXX level. */
62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
64 
65 DEFINE_SPINLOCK(msg_queue_spinlock);
66 struct vchiq_state g_state;
67 
68 static struct platform_device *bcm2835_camera;
69 static struct platform_device *bcm2835_audio;
70 
71 struct vchiq_drvdata {
72 	const unsigned int cache_line_size;
73 	struct rpi_firmware *fw;
74 };
75 
76 static struct vchiq_drvdata bcm2835_drvdata = {
77 	.cache_line_size = 32,
78 };
79 
80 static struct vchiq_drvdata bcm2836_drvdata = {
81 	.cache_line_size = 64,
82 };
83 
84 struct vchiq_arm_state {
85 	/* Keepalive-related data */
86 	struct task_struct *ka_thread;
87 	struct completion ka_evt;
88 	atomic_t ka_use_count;
89 	atomic_t ka_use_ack_count;
90 	atomic_t ka_release_count;
91 
92 	rwlock_t susp_res_lock;
93 
94 	struct vchiq_state *state;
95 
96 	/*
97 	 * Global use count for videocore.
98 	 * This is equal to the sum of the use counts for all services.  When
99 	 * this hits zero the videocore suspend procedure will be initiated.
100 	 */
101 	int videocore_use_count;
102 
103 	/*
104 	 * Use count to track requests from videocore peer.
105 	 * This use count is not associated with a service, so needs to be
106 	 * tracked separately with the state.
107 	 */
108 	int peer_use_count;
109 
110 	/*
111 	 * Flag to indicate that the first vchiq connect has made it through.
112 	 * This means that both sides should be fully ready, and we should
113 	 * be able to suspend after this point.
114 	 */
115 	int first_connect;
116 };
117 
118 struct vchiq_2835_state {
119 	int inited;
120 	struct vchiq_arm_state arm_state;
121 };
122 
123 struct vchiq_pagelist_info {
124 	struct pagelist *pagelist;
125 	size_t pagelist_buffer_size;
126 	dma_addr_t dma_addr;
127 	enum dma_data_direction dma_dir;
128 	unsigned int num_pages;
129 	unsigned int pages_need_release;
130 	struct page **pages;
131 	struct scatterlist *scatterlist;
132 	unsigned int scatterlist_mapped;
133 };
134 
135 static void __iomem *g_regs;
136 /* This value is the size of the L2 cache lines as understood by the
137  * VPU firmware, which determines the required alignment of the
138  * offsets/sizes in pagelists.
139  *
140  * Modern VPU firmware looks for a DT "cache-line-size" property in
141  * the VCHIQ node and will overwrite it with the actual L2 cache size,
142  * which the kernel must then respect.  That property was rejected
143  * upstream, so we have to use the VPU firmware's compatibility value
144  * of 32.
145  */
146 static unsigned int g_cache_line_size = 32;
147 static unsigned int g_fragments_size;
148 static char *g_fragments_base;
149 static char *g_free_fragments;
150 static struct semaphore g_free_fragments_sema;
151 static struct device *g_dev;
152 
153 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
154 
155 static enum vchiq_status
156 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
157 			     unsigned int size, enum vchiq_bulk_dir dir);
158 
159 static irqreturn_t
160 vchiq_doorbell_irq(int irq, void *dev_id)
161 {
162 	struct vchiq_state *state = dev_id;
163 	irqreturn_t ret = IRQ_NONE;
164 	unsigned int status;
165 
166 	/* Read (and clear) the doorbell */
167 	status = readl(g_regs + BELL0);
168 
169 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
170 		remote_event_pollall(state);
171 		ret = IRQ_HANDLED;
172 	}
173 
174 	return ret;
175 }
176 
177 static void
178 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
179 {
180 	if (pagelistinfo->scatterlist_mapped) {
181 		dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
182 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
183 	}
184 
185 	if (pagelistinfo->pages_need_release)
186 		unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
187 
188 	dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
189 			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
190 }
191 
192 static inline bool
193 is_adjacent_block(u32 *addrs, u32 addr, unsigned int k)
194 {
195 	u32 tmp;
196 
197 	if (!k)
198 		return false;
199 
200 	tmp = (addrs[k - 1] & PAGE_MASK) +
201 	      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
202 
203 	return tmp == (addr & PAGE_MASK);
204 }
205 
206 /* There is a potential problem with partial cache lines (pages?)
207  * at the ends of the block when reading. If the CPU accessed anything in
208  * the same line (page?) then it may have pulled old data into the cache,
209  * obscuring the new data underneath. We can solve this by transferring the
210  * partial cache lines separately, and allowing the ARM to copy into the
211  * cached area.
212  */
213 
214 static struct vchiq_pagelist_info *
215 create_pagelist(char *buf, char __user *ubuf,
216 		size_t count, unsigned short type)
217 {
218 	struct pagelist *pagelist;
219 	struct vchiq_pagelist_info *pagelistinfo;
220 	struct page **pages;
221 	u32 *addrs;
222 	unsigned int num_pages, offset, i, k;
223 	int actual_pages;
224 	size_t pagelist_size;
225 	struct scatterlist *scatterlist, *sg;
226 	int dma_buffers;
227 	dma_addr_t dma_addr;
228 
229 	if (count >= INT_MAX - PAGE_SIZE)
230 		return NULL;
231 
232 	if (buf)
233 		offset = (uintptr_t)buf & (PAGE_SIZE - 1);
234 	else
235 		offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
236 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
237 
238 	if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
239 			 sizeof(struct vchiq_pagelist_info)) /
240 			(sizeof(u32) + sizeof(pages[0]) +
241 			 sizeof(struct scatterlist)))
242 		return NULL;
243 
244 	pagelist_size = sizeof(struct pagelist) +
245 			(num_pages * sizeof(u32)) +
246 			(num_pages * sizeof(pages[0]) +
247 			(num_pages * sizeof(struct scatterlist))) +
248 			sizeof(struct vchiq_pagelist_info);
249 
250 	/* Allocate enough storage to hold the page pointers and the page
251 	 * list
252 	 */
253 	pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
254 				      GFP_KERNEL);
255 
256 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
257 
258 	if (!pagelist)
259 		return NULL;
260 
261 	addrs		= pagelist->addrs;
262 	pages		= (struct page **)(addrs + num_pages);
263 	scatterlist	= (struct scatterlist *)(pages + num_pages);
264 	pagelistinfo	= (struct vchiq_pagelist_info *)
265 			  (scatterlist + num_pages);
266 
267 	pagelist->length = count;
268 	pagelist->type = type;
269 	pagelist->offset = offset;
270 
271 	/* Populate the fields of the pagelistinfo structure */
272 	pagelistinfo->pagelist = pagelist;
273 	pagelistinfo->pagelist_buffer_size = pagelist_size;
274 	pagelistinfo->dma_addr = dma_addr;
275 	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
276 				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
277 	pagelistinfo->num_pages = num_pages;
278 	pagelistinfo->pages_need_release = 0;
279 	pagelistinfo->pages = pages;
280 	pagelistinfo->scatterlist = scatterlist;
281 	pagelistinfo->scatterlist_mapped = 0;
282 
283 	if (buf) {
284 		unsigned long length = count;
285 		unsigned int off = offset;
286 
287 		for (actual_pages = 0; actual_pages < num_pages;
288 		     actual_pages++) {
289 			struct page *pg =
290 				vmalloc_to_page((buf +
291 						 (actual_pages * PAGE_SIZE)));
292 			size_t bytes = PAGE_SIZE - off;
293 
294 			if (!pg) {
295 				cleanup_pagelistinfo(pagelistinfo);
296 				return NULL;
297 			}
298 
299 			if (bytes > length)
300 				bytes = length;
301 			pages[actual_pages] = pg;
302 			length -= bytes;
303 			off = 0;
304 		}
305 		/* do not try and release vmalloc pages */
306 	} else {
307 		actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
308 						   type == PAGELIST_READ, pages);
309 
310 		if (actual_pages != num_pages) {
311 			vchiq_log_info(vchiq_arm_log_level,
312 				       "%s - only %d/%d pages locked",
313 				       __func__, actual_pages, num_pages);
314 
315 			/* This is probably due to the process being killed */
316 			if (actual_pages > 0)
317 				unpin_user_pages(pages, actual_pages);
318 			cleanup_pagelistinfo(pagelistinfo);
319 			return NULL;
320 		}
321 		 /* release user pages */
322 		pagelistinfo->pages_need_release = 1;
323 	}
324 
325 	/*
326 	 * Initialize the scatterlist so that the magic cookie
327 	 *  is filled if debugging is enabled
328 	 */
329 	sg_init_table(scatterlist, num_pages);
330 	/* Now set the pages for each scatterlist */
331 	for (i = 0; i < num_pages; i++)	{
332 		unsigned int len = PAGE_SIZE - offset;
333 
334 		if (len > count)
335 			len = count;
336 		sg_set_page(scatterlist + i, pages[i], len, offset);
337 		offset = 0;
338 		count -= len;
339 	}
340 
341 	dma_buffers = dma_map_sg(g_dev,
342 				 scatterlist,
343 				 num_pages,
344 				 pagelistinfo->dma_dir);
345 
346 	if (dma_buffers == 0) {
347 		cleanup_pagelistinfo(pagelistinfo);
348 		return NULL;
349 	}
350 
351 	pagelistinfo->scatterlist_mapped = 1;
352 
353 	/* Combine adjacent blocks for performance */
354 	k = 0;
355 	for_each_sg(scatterlist, sg, dma_buffers, i) {
356 		u32 len = sg_dma_len(sg);
357 		u32 addr = sg_dma_address(sg);
358 
359 		/* Note: addrs is the address + page_count - 1
360 		 * The firmware expects blocks after the first to be page-
361 		 * aligned and a multiple of the page size
362 		 */
363 		WARN_ON(len == 0);
364 		WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
365 		WARN_ON(i && (addr & ~PAGE_MASK));
366 		if (is_adjacent_block(addrs, addr, k))
367 			addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
368 		else
369 			addrs[k++] = (addr & PAGE_MASK) |
370 				(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
371 	}
372 
373 	/* Partial cache lines (fragments) require special measures */
374 	if ((type == PAGELIST_READ) &&
375 	    ((pagelist->offset & (g_cache_line_size - 1)) ||
376 	    ((pagelist->offset + pagelist->length) &
377 	    (g_cache_line_size - 1)))) {
378 		char *fragments;
379 
380 		if (down_interruptible(&g_free_fragments_sema)) {
381 			cleanup_pagelistinfo(pagelistinfo);
382 			return NULL;
383 		}
384 
385 		WARN_ON(!g_free_fragments);
386 
387 		down(&g_free_fragments_mutex);
388 		fragments = g_free_fragments;
389 		WARN_ON(!fragments);
390 		g_free_fragments = *(char **)g_free_fragments;
391 		up(&g_free_fragments_mutex);
392 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
393 			(fragments - g_fragments_base) / g_fragments_size;
394 	}
395 
396 	return pagelistinfo;
397 }
398 
399 static void
400 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
401 	      int actual)
402 {
403 	struct pagelist *pagelist = pagelistinfo->pagelist;
404 	struct page **pages = pagelistinfo->pages;
405 	unsigned int num_pages = pagelistinfo->num_pages;
406 
407 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
408 			__func__, pagelistinfo->pagelist, actual);
409 
410 	/*
411 	 * NOTE: dma_unmap_sg must be called before the
412 	 * cpu can touch any of the data/pages.
413 	 */
414 	dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
415 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
416 	pagelistinfo->scatterlist_mapped = 0;
417 
418 	/* Deal with any partial cache lines (fragments) */
419 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
420 		char *fragments = g_fragments_base +
421 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
422 			g_fragments_size;
423 		int head_bytes, tail_bytes;
424 
425 		head_bytes = (g_cache_line_size - pagelist->offset) &
426 			(g_cache_line_size - 1);
427 		tail_bytes = (pagelist->offset + actual) &
428 			(g_cache_line_size - 1);
429 
430 		if ((actual >= 0) && (head_bytes != 0)) {
431 			if (head_bytes > actual)
432 				head_bytes = actual;
433 
434 			memcpy((char *)kmap(pages[0]) +
435 				pagelist->offset,
436 				fragments,
437 				head_bytes);
438 			kunmap(pages[0]);
439 		}
440 		if ((actual >= 0) && (head_bytes < actual) &&
441 		    (tail_bytes != 0)) {
442 			memcpy((char *)kmap(pages[num_pages - 1]) +
443 				((pagelist->offset + actual) &
444 				(PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
445 				fragments + g_cache_line_size,
446 				tail_bytes);
447 			kunmap(pages[num_pages - 1]);
448 		}
449 
450 		down(&g_free_fragments_mutex);
451 		*(char **)fragments = g_free_fragments;
452 		g_free_fragments = fragments;
453 		up(&g_free_fragments_mutex);
454 		up(&g_free_fragments_sema);
455 	}
456 
457 	/* Need to mark all the pages dirty. */
458 	if (pagelist->type != PAGELIST_WRITE &&
459 	    pagelistinfo->pages_need_release) {
460 		unsigned int i;
461 
462 		for (i = 0; i < num_pages; i++)
463 			set_page_dirty(pages[i]);
464 	}
465 
466 	cleanup_pagelistinfo(pagelistinfo);
467 }
468 
469 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
470 {
471 	struct device *dev = &pdev->dev;
472 	struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
473 	struct rpi_firmware *fw = drvdata->fw;
474 	struct vchiq_slot_zero *vchiq_slot_zero;
475 	void *slot_mem;
476 	dma_addr_t slot_phys;
477 	u32 channelbase;
478 	int slot_mem_size, frag_mem_size;
479 	int err, irq, i;
480 
481 	/*
482 	 * VCHI messages between the CPU and firmware use
483 	 * 32-bit bus addresses.
484 	 */
485 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
486 
487 	if (err < 0)
488 		return err;
489 
490 	g_cache_line_size = drvdata->cache_line_size;
491 	g_fragments_size = 2 * g_cache_line_size;
492 
493 	/* Allocate space for the channels in coherent memory */
494 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
495 	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
496 
497 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
498 				       &slot_phys, GFP_KERNEL);
499 	if (!slot_mem) {
500 		dev_err(dev, "could not allocate DMA memory\n");
501 		return -ENOMEM;
502 	}
503 
504 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
505 
506 	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
507 	if (!vchiq_slot_zero)
508 		return -EINVAL;
509 
510 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
511 		(int)slot_phys + slot_mem_size;
512 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
513 		MAX_FRAGMENTS;
514 
515 	g_fragments_base = (char *)slot_mem + slot_mem_size;
516 
517 	g_free_fragments = g_fragments_base;
518 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
519 		*(char **)&g_fragments_base[i * g_fragments_size] =
520 			&g_fragments_base[(i + 1) * g_fragments_size];
521 	}
522 	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
523 	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
524 
525 	err = vchiq_init_state(state, vchiq_slot_zero);
526 	if (err)
527 		return err;
528 
529 	g_regs = devm_platform_ioremap_resource(pdev, 0);
530 	if (IS_ERR(g_regs))
531 		return PTR_ERR(g_regs);
532 
533 	irq = platform_get_irq(pdev, 0);
534 	if (irq <= 0)
535 		return irq;
536 
537 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
538 			       "VCHIQ doorbell", state);
539 	if (err) {
540 		dev_err(dev, "failed to register irq=%d\n", irq);
541 		return err;
542 	}
543 
544 	/* Send the base address of the slots to VideoCore */
545 	channelbase = slot_phys;
546 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
547 				    &channelbase, sizeof(channelbase));
548 	if (err || channelbase) {
549 		dev_err(dev, "failed to set channelbase\n");
550 		return err ? : -ENXIO;
551 	}
552 
553 	g_dev = dev;
554 	vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
555 		       vchiq_slot_zero, &slot_phys);
556 
557 	vchiq_call_connected_callbacks();
558 
559 	return 0;
560 }
561 
562 static void
563 vchiq_arm_init_state(struct vchiq_state *state,
564 		     struct vchiq_arm_state *arm_state)
565 {
566 	if (arm_state) {
567 		rwlock_init(&arm_state->susp_res_lock);
568 
569 		init_completion(&arm_state->ka_evt);
570 		atomic_set(&arm_state->ka_use_count, 0);
571 		atomic_set(&arm_state->ka_use_ack_count, 0);
572 		atomic_set(&arm_state->ka_release_count, 0);
573 
574 		arm_state->state = state;
575 		arm_state->first_connect = 0;
576 	}
577 }
578 
579 int
580 vchiq_platform_init_state(struct vchiq_state *state)
581 {
582 	struct vchiq_2835_state *platform_state;
583 
584 	state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
585 	if (!state->platform_state)
586 		return -ENOMEM;
587 
588 	platform_state = (struct vchiq_2835_state *)state->platform_state;
589 
590 	platform_state->inited = 1;
591 	vchiq_arm_init_state(state, &platform_state->arm_state);
592 
593 	return 0;
594 }
595 
596 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
597 {
598 	struct vchiq_2835_state *platform_state;
599 
600 	platform_state   = (struct vchiq_2835_state *)state->platform_state;
601 
602 	WARN_ON_ONCE(!platform_state->inited);
603 
604 	return &platform_state->arm_state;
605 }
606 
607 void
608 remote_event_signal(struct remote_event *event)
609 {
610 	wmb();
611 
612 	event->fired = 1;
613 
614 	dsb(sy);         /* data barrier operation */
615 
616 	if (event->armed)
617 		writel(0, g_regs + BELL2); /* trigger vc interrupt */
618 }
619 
620 int
621 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
622 			void __user *uoffset, int size, int dir)
623 {
624 	struct vchiq_pagelist_info *pagelistinfo;
625 
626 	pagelistinfo = create_pagelist(offset, uoffset, size,
627 				       (dir == VCHIQ_BULK_RECEIVE)
628 				       ? PAGELIST_READ
629 				       : PAGELIST_WRITE);
630 
631 	if (!pagelistinfo)
632 		return -ENOMEM;
633 
634 	bulk->data = pagelistinfo->dma_addr;
635 
636 	/*
637 	 * Store the pagelistinfo address in remote_data,
638 	 * which isn't used by the slave.
639 	 */
640 	bulk->remote_data = pagelistinfo;
641 
642 	return 0;
643 }
644 
645 void
646 vchiq_complete_bulk(struct vchiq_bulk *bulk)
647 {
648 	if (bulk && bulk->remote_data && bulk->actual)
649 		free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
650 			      bulk->actual);
651 }
652 
653 int vchiq_dump_platform_state(void *dump_context)
654 {
655 	char buf[80];
656 	int len;
657 
658 	len = snprintf(buf, sizeof(buf), "  Platform: 2835 (VC master)");
659 	return vchiq_dump(dump_context, buf, len + 1);
660 }
661 
662 #define VCHIQ_INIT_RETRIES 10
663 int vchiq_initialise(struct vchiq_instance **instance_out)
664 {
665 	struct vchiq_state *state;
666 	struct vchiq_instance *instance = NULL;
667 	int i, ret;
668 
669 	/*
670 	 * VideoCore may not be ready due to boot up timing.
671 	 * It may never be ready if kernel and firmware are mismatched,so don't
672 	 * block forever.
673 	 */
674 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
675 		state = vchiq_get_state();
676 		if (state)
677 			break;
678 		usleep_range(500, 600);
679 	}
680 	if (i == VCHIQ_INIT_RETRIES) {
681 		vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
682 		ret = -ENOTCONN;
683 		goto failed;
684 	} else if (i > 0) {
685 		vchiq_log_warning(vchiq_core_log_level,
686 				  "%s: videocore initialized after %d retries\n", __func__, i);
687 	}
688 
689 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
690 	if (!instance) {
691 		vchiq_log_error(vchiq_core_log_level,
692 				"%s: error allocating vchiq instance\n", __func__);
693 		ret = -ENOMEM;
694 		goto failed;
695 	}
696 
697 	instance->connected = 0;
698 	instance->state = state;
699 	mutex_init(&instance->bulk_waiter_list_mutex);
700 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
701 
702 	*instance_out = instance;
703 
704 	ret = 0;
705 
706 failed:
707 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
708 
709 	return ret;
710 }
711 EXPORT_SYMBOL(vchiq_initialise);
712 
713 void free_bulk_waiter(struct vchiq_instance *instance)
714 {
715 	struct bulk_waiter_node *waiter, *next;
716 
717 	list_for_each_entry_safe(waiter, next,
718 				 &instance->bulk_waiter_list, list) {
719 		list_del(&waiter->list);
720 		vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
721 			       waiter, waiter->pid);
722 		kfree(waiter);
723 	}
724 }
725 
726 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
727 {
728 	enum vchiq_status status = VCHIQ_SUCCESS;
729 	struct vchiq_state *state = instance->state;
730 
731 	if (mutex_lock_killable(&state->mutex))
732 		return VCHIQ_RETRY;
733 
734 	/* Remove all services */
735 	vchiq_shutdown_internal(state, instance);
736 
737 	mutex_unlock(&state->mutex);
738 
739 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
740 
741 	free_bulk_waiter(instance);
742 	kfree(instance);
743 
744 	return status;
745 }
746 EXPORT_SYMBOL(vchiq_shutdown);
747 
748 static int vchiq_is_connected(struct vchiq_instance *instance)
749 {
750 	return instance->connected;
751 }
752 
753 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
754 {
755 	enum vchiq_status status;
756 	struct vchiq_state *state = instance->state;
757 
758 	if (mutex_lock_killable(&state->mutex)) {
759 		vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
760 		status = VCHIQ_RETRY;
761 		goto failed;
762 	}
763 	status = vchiq_connect_internal(state, instance);
764 
765 	if (status == VCHIQ_SUCCESS)
766 		instance->connected = 1;
767 
768 	mutex_unlock(&state->mutex);
769 
770 failed:
771 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
772 
773 	return status;
774 }
775 EXPORT_SYMBOL(vchiq_connect);
776 
777 static enum vchiq_status
778 vchiq_add_service(struct vchiq_instance *instance,
779 		  const struct vchiq_service_params_kernel *params,
780 		  unsigned int *phandle)
781 {
782 	enum vchiq_status status;
783 	struct vchiq_state *state = instance->state;
784 	struct vchiq_service *service = NULL;
785 	int srvstate;
786 
787 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
788 
789 	srvstate = vchiq_is_connected(instance)
790 		? VCHIQ_SRVSTATE_LISTENING
791 		: VCHIQ_SRVSTATE_HIDDEN;
792 
793 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
794 
795 	if (service) {
796 		*phandle = service->handle;
797 		status = VCHIQ_SUCCESS;
798 	} else {
799 		status = VCHIQ_ERROR;
800 	}
801 
802 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
803 
804 	return status;
805 }
806 
807 enum vchiq_status
808 vchiq_open_service(struct vchiq_instance *instance,
809 		   const struct vchiq_service_params_kernel *params,
810 		   unsigned int *phandle)
811 {
812 	enum vchiq_status   status = VCHIQ_ERROR;
813 	struct vchiq_state   *state = instance->state;
814 	struct vchiq_service *service = NULL;
815 
816 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
817 
818 	if (!vchiq_is_connected(instance))
819 		goto failed;
820 
821 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
822 
823 	if (service) {
824 		*phandle = service->handle;
825 		status = vchiq_open_service_internal(service, current->pid);
826 		if (status != VCHIQ_SUCCESS) {
827 			vchiq_remove_service(service->handle);
828 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
829 		}
830 	}
831 
832 failed:
833 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
834 
835 	return status;
836 }
837 EXPORT_SYMBOL(vchiq_open_service);
838 
839 enum vchiq_status
840 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
841 		    void *userdata, enum vchiq_bulk_mode mode)
842 {
843 	enum vchiq_status status;
844 
845 	while (1) {
846 		switch (mode) {
847 		case VCHIQ_BULK_MODE_NOCALLBACK:
848 		case VCHIQ_BULK_MODE_CALLBACK:
849 			status = vchiq_bulk_transfer(handle,
850 						     (void *)data, NULL,
851 						     size, userdata, mode,
852 						     VCHIQ_BULK_TRANSMIT);
853 			break;
854 		case VCHIQ_BULK_MODE_BLOCKING:
855 			status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
856 							      VCHIQ_BULK_TRANSMIT);
857 			break;
858 		default:
859 			return VCHIQ_ERROR;
860 		}
861 
862 		/*
863 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
864 		 * to implement a retry mechanism since this function is
865 		 * supposed to block until queued
866 		 */
867 		if (status != VCHIQ_RETRY)
868 			break;
869 
870 		msleep(1);
871 	}
872 
873 	return status;
874 }
875 EXPORT_SYMBOL(vchiq_bulk_transmit);
876 
877 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
878 				     unsigned int size, void *userdata,
879 				     enum vchiq_bulk_mode mode)
880 {
881 	enum vchiq_status status;
882 
883 	while (1) {
884 		switch (mode) {
885 		case VCHIQ_BULK_MODE_NOCALLBACK:
886 		case VCHIQ_BULK_MODE_CALLBACK:
887 			status = vchiq_bulk_transfer(handle, data, NULL,
888 						     size, userdata,
889 						     mode, VCHIQ_BULK_RECEIVE);
890 			break;
891 		case VCHIQ_BULK_MODE_BLOCKING:
892 			status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
893 							      VCHIQ_BULK_RECEIVE);
894 			break;
895 		default:
896 			return VCHIQ_ERROR;
897 		}
898 
899 		/*
900 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
901 		 * to implement a retry mechanism since this function is
902 		 * supposed to block until queued
903 		 */
904 		if (status != VCHIQ_RETRY)
905 			break;
906 
907 		msleep(1);
908 	}
909 
910 	return status;
911 }
912 EXPORT_SYMBOL(vchiq_bulk_receive);
913 
914 static enum vchiq_status
915 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
916 			     enum vchiq_bulk_dir dir)
917 {
918 	struct vchiq_instance *instance;
919 	struct vchiq_service *service;
920 	enum vchiq_status status;
921 	struct bulk_waiter_node *waiter = NULL;
922 	bool found = false;
923 
924 	service = find_service_by_handle(handle);
925 	if (!service)
926 		return VCHIQ_ERROR;
927 
928 	instance = service->instance;
929 
930 	vchiq_service_put(service);
931 
932 	mutex_lock(&instance->bulk_waiter_list_mutex);
933 	list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
934 		if (waiter->pid == current->pid) {
935 			list_del(&waiter->list);
936 			found = true;
937 			break;
938 		}
939 	}
940 	mutex_unlock(&instance->bulk_waiter_list_mutex);
941 
942 	if (found) {
943 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
944 
945 		if (bulk) {
946 			/* This thread has an outstanding bulk transfer. */
947 			/* FIXME: why compare a dma address to a pointer? */
948 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
949 				/*
950 				 * This is not a retry of the previous one.
951 				 * Cancel the signal when the transfer completes.
952 				 */
953 				spin_lock(&bulk_waiter_spinlock);
954 				bulk->userdata = NULL;
955 				spin_unlock(&bulk_waiter_spinlock);
956 			}
957 		}
958 	} else {
959 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
960 		if (!waiter) {
961 			vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
962 			return VCHIQ_ERROR;
963 		}
964 	}
965 
966 	status = vchiq_bulk_transfer(handle, data, NULL, size,
967 				     &waiter->bulk_waiter,
968 				     VCHIQ_BULK_MODE_BLOCKING, dir);
969 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
970 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
971 
972 		if (bulk) {
973 			/* Cancel the signal when the transfer completes. */
974 			spin_lock(&bulk_waiter_spinlock);
975 			bulk->userdata = NULL;
976 			spin_unlock(&bulk_waiter_spinlock);
977 		}
978 		kfree(waiter);
979 	} else {
980 		waiter->pid = current->pid;
981 		mutex_lock(&instance->bulk_waiter_list_mutex);
982 		list_add(&waiter->list, &instance->bulk_waiter_list);
983 		mutex_unlock(&instance->bulk_waiter_list_mutex);
984 		vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
985 			       current->pid);
986 	}
987 
988 	return status;
989 }
990 
991 static enum vchiq_status
992 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
993 	       struct vchiq_header *header, struct user_service *user_service,
994 	       void *bulk_userdata)
995 {
996 	struct vchiq_completion_data_kernel *completion;
997 	int insert;
998 
999 	DEBUG_INITIALISE(g_state.local);
1000 
1001 	insert = instance->completion_insert;
1002 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
1003 		/* Out of space - wait for the client */
1004 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1005 		vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
1006 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
1007 		if (wait_for_completion_interruptible(&instance->remove_event)) {
1008 			vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
1009 			return VCHIQ_RETRY;
1010 		} else if (instance->closing) {
1011 			vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
1012 			return VCHIQ_SUCCESS;
1013 		}
1014 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1015 	}
1016 
1017 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1018 
1019 	completion->header = header;
1020 	completion->reason = reason;
1021 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1022 	completion->service_userdata = user_service->service;
1023 	completion->bulk_userdata = bulk_userdata;
1024 
1025 	if (reason == VCHIQ_SERVICE_CLOSED) {
1026 		/*
1027 		 * Take an extra reference, to be held until
1028 		 * this CLOSED notification is delivered.
1029 		 */
1030 		vchiq_service_get(user_service->service);
1031 		if (instance->use_close_delivered)
1032 			user_service->close_pending = 1;
1033 	}
1034 
1035 	/*
1036 	 * A write barrier is needed here to ensure that the entire completion
1037 	 * record is written out before the insert point.
1038 	 */
1039 	wmb();
1040 
1041 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
1042 		user_service->message_available_pos = insert;
1043 
1044 	insert++;
1045 	instance->completion_insert = insert;
1046 
1047 	complete(&instance->insert_event);
1048 
1049 	return VCHIQ_SUCCESS;
1050 }
1051 
1052 enum vchiq_status
1053 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
1054 		 unsigned int handle, void *bulk_userdata)
1055 {
1056 	/*
1057 	 * How do we ensure the callback goes to the right client?
1058 	 * The service_user data points to a user_service record
1059 	 * containing the original callback and the user state structure, which
1060 	 * contains a circular buffer for completion records.
1061 	 */
1062 	struct user_service *user_service;
1063 	struct vchiq_service *service;
1064 	struct vchiq_instance *instance;
1065 	bool skip_completion = false;
1066 
1067 	DEBUG_INITIALISE(g_state.local);
1068 
1069 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1070 
1071 	rcu_read_lock();
1072 	service = handle_to_service(handle);
1073 	if (WARN_ON(!service)) {
1074 		rcu_read_unlock();
1075 		return VCHIQ_SUCCESS;
1076 	}
1077 
1078 	user_service = (struct user_service *)service->base.userdata;
1079 	instance = user_service->instance;
1080 
1081 	if (!instance || instance->closing) {
1082 		rcu_read_unlock();
1083 		return VCHIQ_SUCCESS;
1084 	}
1085 
1086 	/*
1087 	 * As hopping around different synchronization mechanism,
1088 	 * taking an extra reference results in simpler implementation.
1089 	 */
1090 	vchiq_service_get(service);
1091 	rcu_read_unlock();
1092 
1093 	vchiq_log_trace(vchiq_arm_log_level,
1094 			"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1095 			__func__, (unsigned long)user_service, service->localport,
1096 			user_service->userdata, reason, (unsigned long)header,
1097 			(unsigned long)instance, (unsigned long)bulk_userdata);
1098 
1099 	if (header && user_service->is_vchi) {
1100 		spin_lock(&msg_queue_spinlock);
1101 		while (user_service->msg_insert ==
1102 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
1103 			spin_unlock(&msg_queue_spinlock);
1104 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1105 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1106 			vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
1107 			/*
1108 			 * If there is no MESSAGE_AVAILABLE in the completion
1109 			 * queue, add one
1110 			 */
1111 			if ((user_service->message_available_pos -
1112 				instance->completion_remove) < 0) {
1113 				enum vchiq_status status;
1114 
1115 				vchiq_log_info(vchiq_arm_log_level,
1116 					       "Inserting extra MESSAGE_AVAILABLE");
1117 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1118 				status = add_completion(instance, reason, NULL, user_service,
1119 							bulk_userdata);
1120 				if (status != VCHIQ_SUCCESS) {
1121 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1122 					vchiq_service_put(service);
1123 					return status;
1124 				}
1125 			}
1126 
1127 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1128 			if (wait_for_completion_interruptible(&user_service->remove_event)) {
1129 				vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
1130 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1131 				vchiq_service_put(service);
1132 				return VCHIQ_RETRY;
1133 			} else if (instance->closing) {
1134 				vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
1135 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1136 				vchiq_service_put(service);
1137 				return VCHIQ_ERROR;
1138 			}
1139 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1140 			spin_lock(&msg_queue_spinlock);
1141 		}
1142 
1143 		user_service->msg_queue[user_service->msg_insert &
1144 			(MSG_QUEUE_SIZE - 1)] = header;
1145 		user_service->msg_insert++;
1146 
1147 		/*
1148 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1149 		 * there is a MESSAGE_AVAILABLE in the completion queue then
1150 		 * bypass the completion queue.
1151 		 */
1152 		if (((user_service->message_available_pos -
1153 			instance->completion_remove) >= 0) ||
1154 			user_service->dequeue_pending) {
1155 			user_service->dequeue_pending = 0;
1156 			skip_completion = true;
1157 		}
1158 
1159 		spin_unlock(&msg_queue_spinlock);
1160 		complete(&user_service->insert_event);
1161 
1162 		header = NULL;
1163 	}
1164 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1165 	vchiq_service_put(service);
1166 
1167 	if (skip_completion)
1168 		return VCHIQ_SUCCESS;
1169 
1170 	return add_completion(instance, reason, header, user_service,
1171 		bulk_userdata);
1172 }
1173 
1174 int vchiq_dump(void *dump_context, const char *str, int len)
1175 {
1176 	struct dump_context *context = (struct dump_context *)dump_context;
1177 	int copy_bytes;
1178 
1179 	if (context->actual >= context->space)
1180 		return 0;
1181 
1182 	if (context->offset > 0) {
1183 		int skip_bytes = min_t(int, len, context->offset);
1184 
1185 		str += skip_bytes;
1186 		len -= skip_bytes;
1187 		context->offset -= skip_bytes;
1188 		if (context->offset > 0)
1189 			return 0;
1190 	}
1191 	copy_bytes = min_t(int, len, context->space - context->actual);
1192 	if (copy_bytes == 0)
1193 		return 0;
1194 	if (copy_to_user(context->buf + context->actual, str,
1195 			 copy_bytes))
1196 		return -EFAULT;
1197 	context->actual += copy_bytes;
1198 	len -= copy_bytes;
1199 
1200 	/*
1201 	 * If the terminating NUL is included in the length, then it
1202 	 * marks the end of a line and should be replaced with a
1203 	 * carriage return.
1204 	 */
1205 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1206 		char cr = '\n';
1207 
1208 		if (copy_to_user(context->buf + context->actual - 1,
1209 				 &cr, 1))
1210 			return -EFAULT;
1211 	}
1212 	return 0;
1213 }
1214 
1215 int vchiq_dump_platform_instances(void *dump_context)
1216 {
1217 	struct vchiq_state *state = vchiq_get_state();
1218 	char buf[80];
1219 	int len;
1220 	int i;
1221 
1222 	if (!state)
1223 		return -ENOTCONN;
1224 
1225 	/*
1226 	 * There is no list of instances, so instead scan all services,
1227 	 * marking those that have been dumped.
1228 	 */
1229 
1230 	rcu_read_lock();
1231 	for (i = 0; i < state->unused_service; i++) {
1232 		struct vchiq_service *service;
1233 		struct vchiq_instance *instance;
1234 
1235 		service = rcu_dereference(state->services[i]);
1236 		if (!service || service->base.callback != service_callback)
1237 			continue;
1238 
1239 		instance = service->instance;
1240 		if (instance)
1241 			instance->mark = 0;
1242 	}
1243 	rcu_read_unlock();
1244 
1245 	for (i = 0; i < state->unused_service; i++) {
1246 		struct vchiq_service *service;
1247 		struct vchiq_instance *instance;
1248 		int err;
1249 
1250 		rcu_read_lock();
1251 		service = rcu_dereference(state->services[i]);
1252 		if (!service || service->base.callback != service_callback) {
1253 			rcu_read_unlock();
1254 			continue;
1255 		}
1256 
1257 		instance = service->instance;
1258 		if (!instance || instance->mark) {
1259 			rcu_read_unlock();
1260 			continue;
1261 		}
1262 		rcu_read_unlock();
1263 
1264 		len = snprintf(buf, sizeof(buf),
1265 			       "Instance %pK: pid %d,%s completions %d/%d",
1266 			       instance, instance->pid,
1267 			       instance->connected ? " connected, " :
1268 			       "",
1269 			       instance->completion_insert -
1270 			       instance->completion_remove,
1271 			       MAX_COMPLETIONS);
1272 		err = vchiq_dump(dump_context, buf, len + 1);
1273 		if (err)
1274 			return err;
1275 		instance->mark = 1;
1276 	}
1277 	return 0;
1278 }
1279 
1280 int vchiq_dump_platform_service_state(void *dump_context,
1281 				      struct vchiq_service *service)
1282 {
1283 	struct user_service *user_service =
1284 			(struct user_service *)service->base.userdata;
1285 	char buf[80];
1286 	int len;
1287 
1288 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1289 
1290 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
1291 		len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
1292 				 user_service->msg_insert - user_service->msg_remove,
1293 				 MSG_QUEUE_SIZE);
1294 
1295 		if (user_service->dequeue_pending)
1296 			len += scnprintf(buf + len, sizeof(buf) - len,
1297 				" (dequeue pending)");
1298 	}
1299 
1300 	return vchiq_dump(dump_context, buf, len + 1);
1301 }
1302 
1303 struct vchiq_state *
1304 vchiq_get_state(void)
1305 {
1306 	if (!g_state.remote) {
1307 		pr_err("%s: g_state.remote == NULL\n", __func__);
1308 		return NULL;
1309 	}
1310 
1311 	if (g_state.remote->initialised != 1) {
1312 		pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1313 			  __func__, g_state.remote->initialised);
1314 		return NULL;
1315 	}
1316 
1317 	return &g_state;
1318 }
1319 
1320 /*
1321  * Autosuspend related functionality
1322  */
1323 
1324 static enum vchiq_status
1325 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
1326 			       struct vchiq_header *header,
1327 			       unsigned int service_user, void *bulk_user)
1328 {
1329 	vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
1330 	return 0;
1331 }
1332 
1333 static int
1334 vchiq_keepalive_thread_func(void *v)
1335 {
1336 	struct vchiq_state *state = (struct vchiq_state *)v;
1337 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1338 
1339 	enum vchiq_status status;
1340 	struct vchiq_instance *instance;
1341 	unsigned int ka_handle;
1342 	int ret;
1343 
1344 	struct vchiq_service_params_kernel params = {
1345 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1346 		.callback    = vchiq_keepalive_vchiq_callback,
1347 		.version     = KEEPALIVE_VER,
1348 		.version_min = KEEPALIVE_VER_MIN
1349 	};
1350 
1351 	ret = vchiq_initialise(&instance);
1352 	if (ret) {
1353 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
1354 				ret);
1355 		goto exit;
1356 	}
1357 
1358 	status = vchiq_connect(instance);
1359 	if (status != VCHIQ_SUCCESS) {
1360 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
1361 				status);
1362 		goto shutdown;
1363 	}
1364 
1365 	status = vchiq_add_service(instance, &params, &ka_handle);
1366 	if (status != VCHIQ_SUCCESS) {
1367 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
1368 				status);
1369 		goto shutdown;
1370 	}
1371 
1372 	while (1) {
1373 		long rc = 0, uc = 0;
1374 
1375 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1376 			vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
1377 			flush_signals(current);
1378 			continue;
1379 		}
1380 
1381 		/*
1382 		 * read and clear counters.  Do release_count then use_count to
1383 		 * prevent getting more releases than uses
1384 		 */
1385 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
1386 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
1387 
1388 		/*
1389 		 * Call use/release service the requisite number of times.
1390 		 * Process use before release so use counts don't go negative
1391 		 */
1392 		while (uc--) {
1393 			atomic_inc(&arm_state->ka_use_ack_count);
1394 			status = vchiq_use_service(ka_handle);
1395 			if (status != VCHIQ_SUCCESS) {
1396 				vchiq_log_error(vchiq_susp_log_level,
1397 						"%s vchiq_use_service error %d", __func__, status);
1398 			}
1399 		}
1400 		while (rc--) {
1401 			status = vchiq_release_service(ka_handle);
1402 			if (status != VCHIQ_SUCCESS) {
1403 				vchiq_log_error(vchiq_susp_log_level,
1404 						"%s vchiq_release_service error %d", __func__,
1405 						status);
1406 			}
1407 		}
1408 	}
1409 
1410 shutdown:
1411 	vchiq_shutdown(instance);
1412 exit:
1413 	return 0;
1414 }
1415 
1416 int
1417 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1418 		   enum USE_TYPE_E use_type)
1419 {
1420 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1421 	int ret = 0;
1422 	char entity[16];
1423 	int *entity_uc;
1424 	int local_uc;
1425 
1426 	if (!arm_state) {
1427 		ret = -EINVAL;
1428 		goto out;
1429 	}
1430 
1431 	if (use_type == USE_TYPE_VCHIQ) {
1432 		sprintf(entity, "VCHIQ:   ");
1433 		entity_uc = &arm_state->peer_use_count;
1434 	} else if (service) {
1435 		sprintf(entity, "%c%c%c%c:%03d",
1436 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1437 			service->client_id);
1438 		entity_uc = &service->service_use_count;
1439 	} else {
1440 		vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1441 		ret = -EINVAL;
1442 		goto out;
1443 	}
1444 
1445 	write_lock_bh(&arm_state->susp_res_lock);
1446 	local_uc = ++arm_state->videocore_use_count;
1447 	++(*entity_uc);
1448 
1449 	vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1450 			*entity_uc, local_uc);
1451 
1452 	write_unlock_bh(&arm_state->susp_res_lock);
1453 
1454 	if (!ret) {
1455 		enum vchiq_status status = VCHIQ_SUCCESS;
1456 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1457 
1458 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
1459 			/* Send the use notify to videocore */
1460 			status = vchiq_send_remote_use_active(state);
1461 			if (status == VCHIQ_SUCCESS)
1462 				ack_cnt--;
1463 			else
1464 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1465 		}
1466 	}
1467 
1468 out:
1469 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1470 	return ret;
1471 }
1472 
1473 int
1474 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1475 {
1476 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1477 	int ret = 0;
1478 	char entity[16];
1479 	int *entity_uc;
1480 
1481 	if (!arm_state) {
1482 		ret = -EINVAL;
1483 		goto out;
1484 	}
1485 
1486 	if (service) {
1487 		sprintf(entity, "%c%c%c%c:%03d",
1488 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1489 			service->client_id);
1490 		entity_uc = &service->service_use_count;
1491 	} else {
1492 		sprintf(entity, "PEER:   ");
1493 		entity_uc = &arm_state->peer_use_count;
1494 	}
1495 
1496 	write_lock_bh(&arm_state->susp_res_lock);
1497 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1498 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
1499 		WARN_ON(!arm_state->videocore_use_count);
1500 		WARN_ON(!(*entity_uc));
1501 		ret = -EINVAL;
1502 		goto unlock;
1503 	}
1504 	--arm_state->videocore_use_count;
1505 	--(*entity_uc);
1506 
1507 	vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1508 			*entity_uc, arm_state->videocore_use_count);
1509 
1510 unlock:
1511 	write_unlock_bh(&arm_state->susp_res_lock);
1512 
1513 out:
1514 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1515 	return ret;
1516 }
1517 
1518 void
1519 vchiq_on_remote_use(struct vchiq_state *state)
1520 {
1521 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1522 
1523 	atomic_inc(&arm_state->ka_use_count);
1524 	complete(&arm_state->ka_evt);
1525 }
1526 
1527 void
1528 vchiq_on_remote_release(struct vchiq_state *state)
1529 {
1530 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1531 
1532 	atomic_inc(&arm_state->ka_release_count);
1533 	complete(&arm_state->ka_evt);
1534 }
1535 
1536 int
1537 vchiq_use_service_internal(struct vchiq_service *service)
1538 {
1539 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1540 }
1541 
1542 int
1543 vchiq_release_service_internal(struct vchiq_service *service)
1544 {
1545 	return vchiq_release_internal(service->state, service);
1546 }
1547 
1548 struct vchiq_debugfs_node *
1549 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1550 {
1551 	return &instance->debugfs_node;
1552 }
1553 
1554 int
1555 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1556 {
1557 	struct vchiq_service *service;
1558 	int use_count = 0, i;
1559 
1560 	i = 0;
1561 	rcu_read_lock();
1562 	while ((service = __next_service_by_instance(instance->state,
1563 						     instance, &i)))
1564 		use_count += service->service_use_count;
1565 	rcu_read_unlock();
1566 	return use_count;
1567 }
1568 
1569 int
1570 vchiq_instance_get_pid(struct vchiq_instance *instance)
1571 {
1572 	return instance->pid;
1573 }
1574 
1575 int
1576 vchiq_instance_get_trace(struct vchiq_instance *instance)
1577 {
1578 	return instance->trace;
1579 }
1580 
1581 void
1582 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1583 {
1584 	struct vchiq_service *service;
1585 	int i;
1586 
1587 	i = 0;
1588 	rcu_read_lock();
1589 	while ((service = __next_service_by_instance(instance->state,
1590 						     instance, &i)))
1591 		service->trace = trace;
1592 	rcu_read_unlock();
1593 	instance->trace = (trace != 0);
1594 }
1595 
1596 enum vchiq_status
1597 vchiq_use_service(unsigned int handle)
1598 {
1599 	enum vchiq_status ret = VCHIQ_ERROR;
1600 	struct vchiq_service *service = find_service_by_handle(handle);
1601 
1602 	if (service) {
1603 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1604 		vchiq_service_put(service);
1605 	}
1606 	return ret;
1607 }
1608 EXPORT_SYMBOL(vchiq_use_service);
1609 
1610 enum vchiq_status
1611 vchiq_release_service(unsigned int handle)
1612 {
1613 	enum vchiq_status ret = VCHIQ_ERROR;
1614 	struct vchiq_service *service = find_service_by_handle(handle);
1615 
1616 	if (service) {
1617 		ret = vchiq_release_internal(service->state, service);
1618 		vchiq_service_put(service);
1619 	}
1620 	return ret;
1621 }
1622 EXPORT_SYMBOL(vchiq_release_service);
1623 
1624 struct service_data_struct {
1625 	int fourcc;
1626 	int clientid;
1627 	int use_count;
1628 };
1629 
1630 void
1631 vchiq_dump_service_use_state(struct vchiq_state *state)
1632 {
1633 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1634 	struct service_data_struct *service_data;
1635 	int i, found = 0;
1636 	/*
1637 	 * If there's more than 64 services, only dump ones with
1638 	 * non-zero counts
1639 	 */
1640 	int only_nonzero = 0;
1641 	static const char *nz = "<-- preventing suspend";
1642 
1643 	int peer_count;
1644 	int vc_use_count;
1645 	int active_services;
1646 
1647 	if (!arm_state)
1648 		return;
1649 
1650 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1651 				     GFP_KERNEL);
1652 	if (!service_data)
1653 		return;
1654 
1655 	read_lock_bh(&arm_state->susp_res_lock);
1656 	peer_count = arm_state->peer_use_count;
1657 	vc_use_count = arm_state->videocore_use_count;
1658 	active_services = state->unused_service;
1659 	if (active_services > MAX_SERVICES)
1660 		only_nonzero = 1;
1661 
1662 	rcu_read_lock();
1663 	for (i = 0; i < active_services; i++) {
1664 		struct vchiq_service *service_ptr =
1665 			rcu_dereference(state->services[i]);
1666 
1667 		if (!service_ptr)
1668 			continue;
1669 
1670 		if (only_nonzero && !service_ptr->service_use_count)
1671 			continue;
1672 
1673 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1674 			continue;
1675 
1676 		service_data[found].fourcc = service_ptr->base.fourcc;
1677 		service_data[found].clientid = service_ptr->client_id;
1678 		service_data[found].use_count = service_ptr->service_use_count;
1679 		found++;
1680 		if (found >= MAX_SERVICES)
1681 			break;
1682 	}
1683 	rcu_read_unlock();
1684 
1685 	read_unlock_bh(&arm_state->susp_res_lock);
1686 
1687 	if (only_nonzero)
1688 		vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
1689 				  active_services, found);
1690 
1691 	for (i = 0; i < found; i++) {
1692 		vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
1693 				  VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1694 				  service_data[i].clientid, service_data[i].use_count,
1695 				  service_data[i].use_count ? nz : "");
1696 	}
1697 	vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
1698 	vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
1699 			  vc_use_count);
1700 
1701 	kfree(service_data);
1702 }
1703 
1704 enum vchiq_status
1705 vchiq_check_service(struct vchiq_service *service)
1706 {
1707 	struct vchiq_arm_state *arm_state;
1708 	enum vchiq_status ret = VCHIQ_ERROR;
1709 
1710 	if (!service || !service->state)
1711 		goto out;
1712 
1713 	arm_state = vchiq_platform_get_arm_state(service->state);
1714 
1715 	read_lock_bh(&arm_state->susp_res_lock);
1716 	if (service->service_use_count)
1717 		ret = VCHIQ_SUCCESS;
1718 	read_unlock_bh(&arm_state->susp_res_lock);
1719 
1720 	if (ret == VCHIQ_ERROR) {
1721 		vchiq_log_error(vchiq_susp_log_level,
1722 				"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1723 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
1724 				service->service_use_count, arm_state->videocore_use_count);
1725 		vchiq_dump_service_use_state(service->state);
1726 	}
1727 out:
1728 	return ret;
1729 }
1730 
1731 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1732 				       enum vchiq_connstate oldstate,
1733 				       enum vchiq_connstate newstate)
1734 {
1735 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1736 	char threadname[16];
1737 
1738 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1739 		       get_conn_state_name(oldstate), get_conn_state_name(newstate));
1740 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1741 		return;
1742 
1743 	write_lock_bh(&arm_state->susp_res_lock);
1744 	if (arm_state->first_connect) {
1745 		write_unlock_bh(&arm_state->susp_res_lock);
1746 		return;
1747 	}
1748 
1749 	arm_state->first_connect = 1;
1750 	write_unlock_bh(&arm_state->susp_res_lock);
1751 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1752 		 state->id);
1753 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1754 					      (void *)state,
1755 					      threadname);
1756 	if (IS_ERR(arm_state->ka_thread)) {
1757 		vchiq_log_error(vchiq_susp_log_level,
1758 				"vchiq: FATAL: couldn't create thread %s",
1759 				threadname);
1760 	} else {
1761 		wake_up_process(arm_state->ka_thread);
1762 	}
1763 }
1764 
1765 static const struct of_device_id vchiq_of_match[] = {
1766 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1767 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1768 	{},
1769 };
1770 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1771 
1772 static struct platform_device *
1773 vchiq_register_child(struct platform_device *pdev, const char *name)
1774 {
1775 	struct platform_device_info pdevinfo;
1776 	struct platform_device *child;
1777 
1778 	memset(&pdevinfo, 0, sizeof(pdevinfo));
1779 
1780 	pdevinfo.parent = &pdev->dev;
1781 	pdevinfo.name = name;
1782 	pdevinfo.id = PLATFORM_DEVID_NONE;
1783 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
1784 
1785 	child = platform_device_register_full(&pdevinfo);
1786 	if (IS_ERR(child)) {
1787 		dev_warn(&pdev->dev, "%s not registered\n", name);
1788 		child = NULL;
1789 	}
1790 
1791 	return child;
1792 }
1793 
1794 static int vchiq_probe(struct platform_device *pdev)
1795 {
1796 	struct device_node *fw_node;
1797 	const struct of_device_id *of_id;
1798 	struct vchiq_drvdata *drvdata;
1799 	int err;
1800 
1801 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1802 	drvdata = (struct vchiq_drvdata *)of_id->data;
1803 	if (!drvdata)
1804 		return -EINVAL;
1805 
1806 	fw_node = of_find_compatible_node(NULL, NULL,
1807 					  "raspberrypi,bcm2835-firmware");
1808 	if (!fw_node) {
1809 		dev_err(&pdev->dev, "Missing firmware node\n");
1810 		return -ENOENT;
1811 	}
1812 
1813 	drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1814 	of_node_put(fw_node);
1815 	if (!drvdata->fw)
1816 		return -EPROBE_DEFER;
1817 
1818 	platform_set_drvdata(pdev, drvdata);
1819 
1820 	err = vchiq_platform_init(pdev, &g_state);
1821 	if (err)
1822 		goto failed_platform_init;
1823 
1824 	vchiq_debugfs_init();
1825 
1826 	vchiq_log_info(vchiq_arm_log_level,
1827 		       "vchiq: platform initialised - version %d (min %d)",
1828 		       VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1829 
1830 	/*
1831 	 * Simply exit on error since the function handles cleanup in
1832 	 * cases of failure.
1833 	 */
1834 	err = vchiq_register_chrdev(&pdev->dev);
1835 	if (err) {
1836 		vchiq_log_warning(vchiq_arm_log_level,
1837 				  "Failed to initialize vchiq cdev");
1838 		goto error_exit;
1839 	}
1840 
1841 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1842 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1843 
1844 	return 0;
1845 
1846 failed_platform_init:
1847 	vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1848 error_exit:
1849 	return err;
1850 }
1851 
1852 static int vchiq_remove(struct platform_device *pdev)
1853 {
1854 	platform_device_unregister(bcm2835_audio);
1855 	platform_device_unregister(bcm2835_camera);
1856 	vchiq_debugfs_deinit();
1857 	vchiq_deregister_chrdev();
1858 
1859 	return 0;
1860 }
1861 
1862 static struct platform_driver vchiq_driver = {
1863 	.driver = {
1864 		.name = "bcm2835_vchiq",
1865 		.of_match_table = vchiq_of_match,
1866 	},
1867 	.probe = vchiq_probe,
1868 	.remove = vchiq_remove,
1869 };
1870 
1871 static int __init vchiq_driver_init(void)
1872 {
1873 	int ret;
1874 
1875 	ret = platform_driver_register(&vchiq_driver);
1876 	if (ret)
1877 		pr_err("Failed to register vchiq driver\n");
1878 
1879 	return ret;
1880 }
1881 module_init(vchiq_driver_init);
1882 
1883 static void __exit vchiq_driver_exit(void)
1884 {
1885 	platform_driver_unregister(&vchiq_driver);
1886 }
1887 module_exit(vchiq_driver_exit);
1888 
1889 MODULE_LICENSE("Dual BSD/GPL");
1890 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1891 MODULE_AUTHOR("Broadcom Corporation");
1892