1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32 
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_debugfs.h"
37 #include "vchiq_connected.h"
38 #include "vchiq_pagelist.h"
39 
40 #define DEVICE_NAME "vchiq"
41 
42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
43 
44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
45 
46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
48 
49 #define BELL0	0x00
50 #define BELL2	0x08
51 
52 #define ARM_DS_ACTIVE	BIT(2)
53 
54 /* Override the default prefix, which would be vchiq_arm (from the filename) */
55 #undef MODULE_PARAM_PREFIX
56 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
57 
58 #define KEEPALIVE_VER 1
59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
60 
61 /* Run time control of log level, based on KERN_XXX level. */
62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
64 
65 DEFINE_SPINLOCK(msg_queue_spinlock);
66 struct vchiq_state g_state;
67 
68 static struct platform_device *bcm2835_camera;
69 static struct platform_device *bcm2835_audio;
70 
71 struct vchiq_drvdata {
72 	const unsigned int cache_line_size;
73 	struct rpi_firmware *fw;
74 };
75 
76 static struct vchiq_drvdata bcm2835_drvdata = {
77 	.cache_line_size = 32,
78 };
79 
80 static struct vchiq_drvdata bcm2836_drvdata = {
81 	.cache_line_size = 64,
82 };
83 
84 struct vchiq_arm_state {
85 	/* Keepalive-related data */
86 	struct task_struct *ka_thread;
87 	struct completion ka_evt;
88 	atomic_t ka_use_count;
89 	atomic_t ka_use_ack_count;
90 	atomic_t ka_release_count;
91 
92 	rwlock_t susp_res_lock;
93 
94 	struct vchiq_state *state;
95 
96 	/*
97 	 * Global use count for videocore.
98 	 * This is equal to the sum of the use counts for all services.  When
99 	 * this hits zero the videocore suspend procedure will be initiated.
100 	 */
101 	int videocore_use_count;
102 
103 	/*
104 	 * Use count to track requests from videocore peer.
105 	 * This use count is not associated with a service, so needs to be
106 	 * tracked separately with the state.
107 	 */
108 	int peer_use_count;
109 
110 	/*
111 	 * Flag to indicate that the first vchiq connect has made it through.
112 	 * This means that both sides should be fully ready, and we should
113 	 * be able to suspend after this point.
114 	 */
115 	int first_connect;
116 };
117 
118 struct vchiq_pagelist_info {
119 	struct pagelist *pagelist;
120 	size_t pagelist_buffer_size;
121 	dma_addr_t dma_addr;
122 	enum dma_data_direction dma_dir;
123 	unsigned int num_pages;
124 	unsigned int pages_need_release;
125 	struct page **pages;
126 	struct scatterlist *scatterlist;
127 	unsigned int scatterlist_mapped;
128 };
129 
130 static void __iomem *g_regs;
131 /* This value is the size of the L2 cache lines as understood by the
132  * VPU firmware, which determines the required alignment of the
133  * offsets/sizes in pagelists.
134  *
135  * Modern VPU firmware looks for a DT "cache-line-size" property in
136  * the VCHIQ node and will overwrite it with the actual L2 cache size,
137  * which the kernel must then respect.  That property was rejected
138  * upstream, so we have to use the VPU firmware's compatibility value
139  * of 32.
140  */
141 static unsigned int g_cache_line_size = 32;
142 static unsigned int g_fragments_size;
143 static char *g_fragments_base;
144 static char *g_free_fragments;
145 static struct semaphore g_free_fragments_sema;
146 
147 static DEFINE_SEMAPHORE(g_free_fragments_mutex, 1);
148 
149 static int
150 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
151 			     unsigned int size, enum vchiq_bulk_dir dir);
152 
153 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)154 vchiq_doorbell_irq(int irq, void *dev_id)
155 {
156 	struct vchiq_state *state = dev_id;
157 	irqreturn_t ret = IRQ_NONE;
158 	unsigned int status;
159 
160 	/* Read (and clear) the doorbell */
161 	status = readl(g_regs + BELL0);
162 
163 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
164 		remote_event_pollall(state);
165 		ret = IRQ_HANDLED;
166 	}
167 
168 	return ret;
169 }
170 
171 static void
cleanup_pagelistinfo(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo)172 cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
173 {
174 	if (pagelistinfo->scatterlist_mapped) {
175 		dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
176 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
177 	}
178 
179 	if (pagelistinfo->pages_need_release)
180 		unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
181 
182 	dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
183 			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
184 }
185 
186 static inline bool
is_adjacent_block(u32 * addrs,u32 addr,unsigned int k)187 is_adjacent_block(u32 *addrs, u32 addr, unsigned int k)
188 {
189 	u32 tmp;
190 
191 	if (!k)
192 		return false;
193 
194 	tmp = (addrs[k - 1] & PAGE_MASK) +
195 	      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
196 
197 	return tmp == (addr & PAGE_MASK);
198 }
199 
200 /* There is a potential problem with partial cache lines (pages?)
201  * at the ends of the block when reading. If the CPU accessed anything in
202  * the same line (page?) then it may have pulled old data into the cache,
203  * obscuring the new data underneath. We can solve this by transferring the
204  * partial cache lines separately, and allowing the ARM to copy into the
205  * cached area.
206  */
207 
208 static struct vchiq_pagelist_info *
create_pagelist(struct vchiq_instance * instance,char * buf,char __user * ubuf,size_t count,unsigned short type)209 create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
210 		size_t count, unsigned short type)
211 {
212 	struct pagelist *pagelist;
213 	struct vchiq_pagelist_info *pagelistinfo;
214 	struct page **pages;
215 	u32 *addrs;
216 	unsigned int num_pages, offset, i, k;
217 	int actual_pages;
218 	size_t pagelist_size;
219 	struct scatterlist *scatterlist, *sg;
220 	int dma_buffers;
221 	dma_addr_t dma_addr;
222 
223 	if (count >= INT_MAX - PAGE_SIZE)
224 		return NULL;
225 
226 	if (buf)
227 		offset = (uintptr_t)buf & (PAGE_SIZE - 1);
228 	else
229 		offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
230 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
231 
232 	if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
233 			 sizeof(struct vchiq_pagelist_info)) /
234 			(sizeof(u32) + sizeof(pages[0]) +
235 			 sizeof(struct scatterlist)))
236 		return NULL;
237 
238 	pagelist_size = sizeof(struct pagelist) +
239 			(num_pages * sizeof(u32)) +
240 			(num_pages * sizeof(pages[0]) +
241 			(num_pages * sizeof(struct scatterlist))) +
242 			sizeof(struct vchiq_pagelist_info);
243 
244 	/* Allocate enough storage to hold the page pointers and the page
245 	 * list
246 	 */
247 	pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
248 				      GFP_KERNEL);
249 
250 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
251 
252 	if (!pagelist)
253 		return NULL;
254 
255 	addrs		= pagelist->addrs;
256 	pages		= (struct page **)(addrs + num_pages);
257 	scatterlist	= (struct scatterlist *)(pages + num_pages);
258 	pagelistinfo	= (struct vchiq_pagelist_info *)
259 			  (scatterlist + num_pages);
260 
261 	pagelist->length = count;
262 	pagelist->type = type;
263 	pagelist->offset = offset;
264 
265 	/* Populate the fields of the pagelistinfo structure */
266 	pagelistinfo->pagelist = pagelist;
267 	pagelistinfo->pagelist_buffer_size = pagelist_size;
268 	pagelistinfo->dma_addr = dma_addr;
269 	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
270 				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
271 	pagelistinfo->num_pages = num_pages;
272 	pagelistinfo->pages_need_release = 0;
273 	pagelistinfo->pages = pages;
274 	pagelistinfo->scatterlist = scatterlist;
275 	pagelistinfo->scatterlist_mapped = 0;
276 
277 	if (buf) {
278 		unsigned long length = count;
279 		unsigned int off = offset;
280 
281 		for (actual_pages = 0; actual_pages < num_pages;
282 		     actual_pages++) {
283 			struct page *pg =
284 				vmalloc_to_page((buf +
285 						 (actual_pages * PAGE_SIZE)));
286 			size_t bytes = PAGE_SIZE - off;
287 
288 			if (!pg) {
289 				cleanup_pagelistinfo(instance, pagelistinfo);
290 				return NULL;
291 			}
292 
293 			if (bytes > length)
294 				bytes = length;
295 			pages[actual_pages] = pg;
296 			length -= bytes;
297 			off = 0;
298 		}
299 		/* do not try and release vmalloc pages */
300 	} else {
301 		actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
302 						   type == PAGELIST_READ, pages);
303 
304 		if (actual_pages != num_pages) {
305 			vchiq_log_info(vchiq_arm_log_level,
306 				       "%s - only %d/%d pages locked",
307 				       __func__, actual_pages, num_pages);
308 
309 			/* This is probably due to the process being killed */
310 			if (actual_pages > 0)
311 				unpin_user_pages(pages, actual_pages);
312 			cleanup_pagelistinfo(instance, pagelistinfo);
313 			return NULL;
314 		}
315 		 /* release user pages */
316 		pagelistinfo->pages_need_release = 1;
317 	}
318 
319 	/*
320 	 * Initialize the scatterlist so that the magic cookie
321 	 *  is filled if debugging is enabled
322 	 */
323 	sg_init_table(scatterlist, num_pages);
324 	/* Now set the pages for each scatterlist */
325 	for (i = 0; i < num_pages; i++)	{
326 		unsigned int len = PAGE_SIZE - offset;
327 
328 		if (len > count)
329 			len = count;
330 		sg_set_page(scatterlist + i, pages[i], len, offset);
331 		offset = 0;
332 		count -= len;
333 	}
334 
335 	dma_buffers = dma_map_sg(instance->state->dev,
336 				 scatterlist,
337 				 num_pages,
338 				 pagelistinfo->dma_dir);
339 
340 	if (dma_buffers == 0) {
341 		cleanup_pagelistinfo(instance, pagelistinfo);
342 		return NULL;
343 	}
344 
345 	pagelistinfo->scatterlist_mapped = 1;
346 
347 	/* Combine adjacent blocks for performance */
348 	k = 0;
349 	for_each_sg(scatterlist, sg, dma_buffers, i) {
350 		u32 len = sg_dma_len(sg);
351 		u32 addr = sg_dma_address(sg);
352 
353 		/* Note: addrs is the address + page_count - 1
354 		 * The firmware expects blocks after the first to be page-
355 		 * aligned and a multiple of the page size
356 		 */
357 		WARN_ON(len == 0);
358 		WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
359 		WARN_ON(i && (addr & ~PAGE_MASK));
360 		if (is_adjacent_block(addrs, addr, k))
361 			addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
362 		else
363 			addrs[k++] = (addr & PAGE_MASK) |
364 				(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
365 	}
366 
367 	/* Partial cache lines (fragments) require special measures */
368 	if ((type == PAGELIST_READ) &&
369 	    ((pagelist->offset & (g_cache_line_size - 1)) ||
370 	    ((pagelist->offset + pagelist->length) &
371 	    (g_cache_line_size - 1)))) {
372 		char *fragments;
373 
374 		if (down_interruptible(&g_free_fragments_sema)) {
375 			cleanup_pagelistinfo(instance, pagelistinfo);
376 			return NULL;
377 		}
378 
379 		WARN_ON(!g_free_fragments);
380 
381 		down(&g_free_fragments_mutex);
382 		fragments = g_free_fragments;
383 		WARN_ON(!fragments);
384 		g_free_fragments = *(char **)g_free_fragments;
385 		up(&g_free_fragments_mutex);
386 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
387 			(fragments - g_fragments_base) / g_fragments_size;
388 	}
389 
390 	return pagelistinfo;
391 }
392 
393 static void
free_pagelist(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo,int actual)394 free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
395 	      int actual)
396 {
397 	struct pagelist *pagelist = pagelistinfo->pagelist;
398 	struct page **pages = pagelistinfo->pages;
399 	unsigned int num_pages = pagelistinfo->num_pages;
400 
401 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
402 			__func__, pagelistinfo->pagelist, actual);
403 
404 	/*
405 	 * NOTE: dma_unmap_sg must be called before the
406 	 * cpu can touch any of the data/pages.
407 	 */
408 	dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
409 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
410 	pagelistinfo->scatterlist_mapped = 0;
411 
412 	/* Deal with any partial cache lines (fragments) */
413 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) {
414 		char *fragments = g_fragments_base +
415 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
416 			g_fragments_size;
417 		int head_bytes, tail_bytes;
418 
419 		head_bytes = (g_cache_line_size - pagelist->offset) &
420 			(g_cache_line_size - 1);
421 		tail_bytes = (pagelist->offset + actual) &
422 			(g_cache_line_size - 1);
423 
424 		if ((actual >= 0) && (head_bytes != 0)) {
425 			if (head_bytes > actual)
426 				head_bytes = actual;
427 
428 			memcpy_to_page(pages[0],
429 				pagelist->offset,
430 				fragments,
431 				head_bytes);
432 		}
433 		if ((actual >= 0) && (head_bytes < actual) &&
434 		    (tail_bytes != 0))
435 			memcpy_to_page(pages[num_pages - 1],
436 				(pagelist->offset + actual) &
437 				(PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
438 				fragments + g_cache_line_size,
439 				tail_bytes);
440 
441 		down(&g_free_fragments_mutex);
442 		*(char **)fragments = g_free_fragments;
443 		g_free_fragments = fragments;
444 		up(&g_free_fragments_mutex);
445 		up(&g_free_fragments_sema);
446 	}
447 
448 	/* Need to mark all the pages dirty. */
449 	if (pagelist->type != PAGELIST_WRITE &&
450 	    pagelistinfo->pages_need_release) {
451 		unsigned int i;
452 
453 		for (i = 0; i < num_pages; i++)
454 			set_page_dirty(pages[i]);
455 	}
456 
457 	cleanup_pagelistinfo(instance, pagelistinfo);
458 }
459 
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)460 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
461 {
462 	struct device *dev = &pdev->dev;
463 	struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
464 	struct rpi_firmware *fw = drvdata->fw;
465 	struct vchiq_slot_zero *vchiq_slot_zero;
466 	void *slot_mem;
467 	dma_addr_t slot_phys;
468 	u32 channelbase;
469 	int slot_mem_size, frag_mem_size;
470 	int err, irq, i;
471 
472 	/*
473 	 * VCHI messages between the CPU and firmware use
474 	 * 32-bit bus addresses.
475 	 */
476 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
477 
478 	if (err < 0)
479 		return err;
480 
481 	g_cache_line_size = drvdata->cache_line_size;
482 	g_fragments_size = 2 * g_cache_line_size;
483 
484 	/* Allocate space for the channels in coherent memory */
485 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
486 	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
487 
488 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
489 				       &slot_phys, GFP_KERNEL);
490 	if (!slot_mem) {
491 		dev_err(dev, "could not allocate DMA memory\n");
492 		return -ENOMEM;
493 	}
494 
495 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
496 
497 	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
498 	if (!vchiq_slot_zero)
499 		return -ENOMEM;
500 
501 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
502 		(int)slot_phys + slot_mem_size;
503 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
504 		MAX_FRAGMENTS;
505 
506 	g_fragments_base = (char *)slot_mem + slot_mem_size;
507 
508 	g_free_fragments = g_fragments_base;
509 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
510 		*(char **)&g_fragments_base[i * g_fragments_size] =
511 			&g_fragments_base[(i + 1) * g_fragments_size];
512 	}
513 	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
514 	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
515 
516 	err = vchiq_init_state(state, vchiq_slot_zero, dev);
517 	if (err)
518 		return err;
519 
520 	g_regs = devm_platform_ioremap_resource(pdev, 0);
521 	if (IS_ERR(g_regs))
522 		return PTR_ERR(g_regs);
523 
524 	irq = platform_get_irq(pdev, 0);
525 	if (irq <= 0)
526 		return irq;
527 
528 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
529 			       "VCHIQ doorbell", state);
530 	if (err) {
531 		dev_err(dev, "failed to register irq=%d\n", irq);
532 		return err;
533 	}
534 
535 	/* Send the base address of the slots to VideoCore */
536 	channelbase = slot_phys;
537 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
538 				    &channelbase, sizeof(channelbase));
539 	if (err) {
540 		dev_err(dev, "failed to send firmware property: %d\n", err);
541 		return err;
542 	}
543 
544 	if (channelbase) {
545 		dev_err(dev, "failed to set channelbase (response: %x)\n",
546 			channelbase);
547 		return -ENXIO;
548 	}
549 
550 	vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
551 		       vchiq_slot_zero, &slot_phys);
552 
553 	vchiq_call_connected_callbacks();
554 
555 	return 0;
556 }
557 
558 static void
vchiq_arm_init_state(struct vchiq_state * state,struct vchiq_arm_state * arm_state)559 vchiq_arm_init_state(struct vchiq_state *state,
560 		     struct vchiq_arm_state *arm_state)
561 {
562 	if (arm_state) {
563 		rwlock_init(&arm_state->susp_res_lock);
564 
565 		init_completion(&arm_state->ka_evt);
566 		atomic_set(&arm_state->ka_use_count, 0);
567 		atomic_set(&arm_state->ka_use_ack_count, 0);
568 		atomic_set(&arm_state->ka_release_count, 0);
569 
570 		arm_state->state = state;
571 		arm_state->first_connect = 0;
572 	}
573 }
574 
575 int
vchiq_platform_init_state(struct vchiq_state * state)576 vchiq_platform_init_state(struct vchiq_state *state)
577 {
578 	struct vchiq_arm_state *platform_state;
579 
580 	platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
581 	if (!platform_state)
582 		return -ENOMEM;
583 
584 	vchiq_arm_init_state(state, platform_state);
585 	state->platform_state = (struct opaque_platform_state *)platform_state;
586 
587 	return 0;
588 }
589 
vchiq_platform_get_arm_state(struct vchiq_state * state)590 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
591 {
592 	return (struct vchiq_arm_state *)state->platform_state;
593 }
594 
595 void
remote_event_signal(struct remote_event * event)596 remote_event_signal(struct remote_event *event)
597 {
598 	/*
599 	 * Ensure that all writes to shared data structures have completed
600 	 * before signalling the peer.
601 	 */
602 	wmb();
603 
604 	event->fired = 1;
605 
606 	dsb(sy);         /* data barrier operation */
607 
608 	if (event->armed)
609 		writel(0, g_regs + BELL2); /* trigger vc interrupt */
610 }
611 
612 int
vchiq_prepare_bulk_data(struct vchiq_instance * instance,struct vchiq_bulk * bulk,void * offset,void __user * uoffset,int size,int dir)613 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
614 			void __user *uoffset, int size, int dir)
615 {
616 	struct vchiq_pagelist_info *pagelistinfo;
617 
618 	pagelistinfo = create_pagelist(instance, offset, uoffset, size,
619 				       (dir == VCHIQ_BULK_RECEIVE)
620 				       ? PAGELIST_READ
621 				       : PAGELIST_WRITE);
622 
623 	if (!pagelistinfo)
624 		return -ENOMEM;
625 
626 	bulk->data = pagelistinfo->dma_addr;
627 
628 	/*
629 	 * Store the pagelistinfo address in remote_data,
630 	 * which isn't used by the slave.
631 	 */
632 	bulk->remote_data = pagelistinfo;
633 
634 	return 0;
635 }
636 
637 void
vchiq_complete_bulk(struct vchiq_instance * instance,struct vchiq_bulk * bulk)638 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
639 {
640 	if (bulk && bulk->remote_data && bulk->actual)
641 		free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
642 			      bulk->actual);
643 }
644 
vchiq_dump_platform_state(void * dump_context)645 int vchiq_dump_platform_state(void *dump_context)
646 {
647 	char buf[80];
648 	int len;
649 
650 	len = snprintf(buf, sizeof(buf), "  Platform: 2835 (VC master)");
651 	return vchiq_dump(dump_context, buf, len + 1);
652 }
653 
654 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_instance ** instance_out)655 int vchiq_initialise(struct vchiq_instance **instance_out)
656 {
657 	struct vchiq_state *state;
658 	struct vchiq_instance *instance = NULL;
659 	int i, ret;
660 
661 	/*
662 	 * VideoCore may not be ready due to boot up timing.
663 	 * It may never be ready if kernel and firmware are mismatched,so don't
664 	 * block forever.
665 	 */
666 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
667 		state = vchiq_get_state();
668 		if (state)
669 			break;
670 		usleep_range(500, 600);
671 	}
672 	if (i == VCHIQ_INIT_RETRIES) {
673 		vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
674 		ret = -ENOTCONN;
675 		goto failed;
676 	} else if (i > 0) {
677 		vchiq_log_warning(vchiq_core_log_level,
678 				  "%s: videocore initialized after %d retries\n", __func__, i);
679 	}
680 
681 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
682 	if (!instance) {
683 		vchiq_log_error(vchiq_core_log_level,
684 				"%s: error allocating vchiq instance\n", __func__);
685 		ret = -ENOMEM;
686 		goto failed;
687 	}
688 
689 	instance->connected = 0;
690 	instance->state = state;
691 	mutex_init(&instance->bulk_waiter_list_mutex);
692 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
693 
694 	*instance_out = instance;
695 
696 	ret = 0;
697 
698 failed:
699 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
700 
701 	return ret;
702 }
703 EXPORT_SYMBOL(vchiq_initialise);
704 
free_bulk_waiter(struct vchiq_instance * instance)705 void free_bulk_waiter(struct vchiq_instance *instance)
706 {
707 	struct bulk_waiter_node *waiter, *next;
708 
709 	list_for_each_entry_safe(waiter, next,
710 				 &instance->bulk_waiter_list, list) {
711 		list_del(&waiter->list);
712 		vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
713 			       waiter, waiter->pid);
714 		kfree(waiter);
715 	}
716 }
717 
vchiq_shutdown(struct vchiq_instance * instance)718 int vchiq_shutdown(struct vchiq_instance *instance)
719 {
720 	int status = 0;
721 	struct vchiq_state *state = instance->state;
722 
723 	if (mutex_lock_killable(&state->mutex))
724 		return -EAGAIN;
725 
726 	/* Remove all services */
727 	vchiq_shutdown_internal(state, instance);
728 
729 	mutex_unlock(&state->mutex);
730 
731 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
732 
733 	free_bulk_waiter(instance);
734 	kfree(instance);
735 
736 	return status;
737 }
738 EXPORT_SYMBOL(vchiq_shutdown);
739 
vchiq_is_connected(struct vchiq_instance * instance)740 static int vchiq_is_connected(struct vchiq_instance *instance)
741 {
742 	return instance->connected;
743 }
744 
vchiq_connect(struct vchiq_instance * instance)745 int vchiq_connect(struct vchiq_instance *instance)
746 {
747 	int status;
748 	struct vchiq_state *state = instance->state;
749 
750 	if (mutex_lock_killable(&state->mutex)) {
751 		vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
752 		status = -EAGAIN;
753 		goto failed;
754 	}
755 	status = vchiq_connect_internal(state, instance);
756 
757 	if (!status)
758 		instance->connected = 1;
759 
760 	mutex_unlock(&state->mutex);
761 
762 failed:
763 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
764 
765 	return status;
766 }
767 EXPORT_SYMBOL(vchiq_connect);
768 
769 static int
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)770 vchiq_add_service(struct vchiq_instance *instance,
771 		  const struct vchiq_service_params_kernel *params,
772 		  unsigned int *phandle)
773 {
774 	int status;
775 	struct vchiq_state *state = instance->state;
776 	struct vchiq_service *service = NULL;
777 	int srvstate;
778 
779 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
780 
781 	srvstate = vchiq_is_connected(instance)
782 		? VCHIQ_SRVSTATE_LISTENING
783 		: VCHIQ_SRVSTATE_HIDDEN;
784 
785 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
786 
787 	if (service) {
788 		*phandle = service->handle;
789 		status = 0;
790 	} else {
791 		status = -EINVAL;
792 	}
793 
794 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
795 
796 	return status;
797 }
798 
799 int
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)800 vchiq_open_service(struct vchiq_instance *instance,
801 		   const struct vchiq_service_params_kernel *params,
802 		   unsigned int *phandle)
803 {
804 	int status = -EINVAL;
805 	struct vchiq_state   *state = instance->state;
806 	struct vchiq_service *service = NULL;
807 
808 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
809 
810 	if (!vchiq_is_connected(instance))
811 		goto failed;
812 
813 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
814 
815 	if (service) {
816 		*phandle = service->handle;
817 		status = vchiq_open_service_internal(service, current->pid);
818 		if (status) {
819 			vchiq_remove_service(instance, service->handle);
820 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
821 		}
822 	}
823 
824 failed:
825 	vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
826 
827 	return status;
828 }
829 EXPORT_SYMBOL(vchiq_open_service);
830 
831 int
vchiq_bulk_transmit(struct vchiq_instance * instance,unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)832 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
833 		    unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
834 {
835 	int status;
836 
837 	while (1) {
838 		switch (mode) {
839 		case VCHIQ_BULK_MODE_NOCALLBACK:
840 		case VCHIQ_BULK_MODE_CALLBACK:
841 			status = vchiq_bulk_transfer(instance, handle,
842 						     (void *)data, NULL,
843 						     size, userdata, mode,
844 						     VCHIQ_BULK_TRANSMIT);
845 			break;
846 		case VCHIQ_BULK_MODE_BLOCKING:
847 			status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
848 							      VCHIQ_BULK_TRANSMIT);
849 			break;
850 		default:
851 			return -EINVAL;
852 		}
853 
854 		/*
855 		 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
856 		 * to implement a retry mechanism since this function is
857 		 * supposed to block until queued
858 		 */
859 		if (status != -EAGAIN)
860 			break;
861 
862 		msleep(1);
863 	}
864 
865 	return status;
866 }
867 EXPORT_SYMBOL(vchiq_bulk_transmit);
868 
vchiq_bulk_receive(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)869 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
870 		       void *data, unsigned int size, void *userdata,
871 		       enum vchiq_bulk_mode mode)
872 {
873 	int status;
874 
875 	while (1) {
876 		switch (mode) {
877 		case VCHIQ_BULK_MODE_NOCALLBACK:
878 		case VCHIQ_BULK_MODE_CALLBACK:
879 			status = vchiq_bulk_transfer(instance, handle, data, NULL,
880 						     size, userdata,
881 						     mode, VCHIQ_BULK_RECEIVE);
882 			break;
883 		case VCHIQ_BULK_MODE_BLOCKING:
884 			status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
885 							      VCHIQ_BULK_RECEIVE);
886 			break;
887 		default:
888 			return -EINVAL;
889 		}
890 
891 		/*
892 		 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
893 		 * to implement a retry mechanism since this function is
894 		 * supposed to block until queued
895 		 */
896 		if (status != -EAGAIN)
897 			break;
898 
899 		msleep(1);
900 	}
901 
902 	return status;
903 }
904 EXPORT_SYMBOL(vchiq_bulk_receive);
905 
906 static int
vchiq_blocking_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,enum vchiq_bulk_dir dir)907 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
908 			     unsigned int size, enum vchiq_bulk_dir dir)
909 {
910 	struct vchiq_service *service;
911 	int status;
912 	struct bulk_waiter_node *waiter = NULL, *iter;
913 
914 	service = find_service_by_handle(instance, handle);
915 	if (!service)
916 		return -EINVAL;
917 
918 	vchiq_service_put(service);
919 
920 	mutex_lock(&instance->bulk_waiter_list_mutex);
921 	list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
922 		if (iter->pid == current->pid) {
923 			list_del(&iter->list);
924 			waiter = iter;
925 			break;
926 		}
927 	}
928 	mutex_unlock(&instance->bulk_waiter_list_mutex);
929 
930 	if (waiter) {
931 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
932 
933 		if (bulk) {
934 			/* This thread has an outstanding bulk transfer. */
935 			/* FIXME: why compare a dma address to a pointer? */
936 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
937 				/*
938 				 * This is not a retry of the previous one.
939 				 * Cancel the signal when the transfer completes.
940 				 */
941 				spin_lock(&bulk_waiter_spinlock);
942 				bulk->userdata = NULL;
943 				spin_unlock(&bulk_waiter_spinlock);
944 			}
945 		}
946 	} else {
947 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
948 		if (!waiter) {
949 			vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
950 			return -ENOMEM;
951 		}
952 	}
953 
954 	status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
955 				     &waiter->bulk_waiter,
956 				     VCHIQ_BULK_MODE_BLOCKING, dir);
957 	if ((status != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
958 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
959 
960 		if (bulk) {
961 			/* Cancel the signal when the transfer completes. */
962 			spin_lock(&bulk_waiter_spinlock);
963 			bulk->userdata = NULL;
964 			spin_unlock(&bulk_waiter_spinlock);
965 		}
966 		kfree(waiter);
967 	} else {
968 		waiter->pid = current->pid;
969 		mutex_lock(&instance->bulk_waiter_list_mutex);
970 		list_add(&waiter->list, &instance->bulk_waiter_list);
971 		mutex_unlock(&instance->bulk_waiter_list_mutex);
972 		vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
973 			       current->pid);
974 	}
975 
976 	return status;
977 }
978 
979 static int
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)980 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
981 	       struct vchiq_header *header, struct user_service *user_service,
982 	       void *bulk_userdata)
983 {
984 	struct vchiq_completion_data_kernel *completion;
985 	int insert;
986 
987 	DEBUG_INITIALISE(g_state.local);
988 
989 	insert = instance->completion_insert;
990 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
991 		/* Out of space - wait for the client */
992 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
993 		vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
994 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
995 		if (wait_for_completion_interruptible(&instance->remove_event)) {
996 			vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
997 			return -EAGAIN;
998 		} else if (instance->closing) {
999 			vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
1000 			return 0;
1001 		}
1002 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1003 	}
1004 
1005 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1006 
1007 	completion->header = header;
1008 	completion->reason = reason;
1009 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1010 	completion->service_userdata = user_service->service;
1011 	completion->bulk_userdata = bulk_userdata;
1012 
1013 	if (reason == VCHIQ_SERVICE_CLOSED) {
1014 		/*
1015 		 * Take an extra reference, to be held until
1016 		 * this CLOSED notification is delivered.
1017 		 */
1018 		vchiq_service_get(user_service->service);
1019 		if (instance->use_close_delivered)
1020 			user_service->close_pending = 1;
1021 	}
1022 
1023 	/*
1024 	 * A write barrier is needed here to ensure that the entire completion
1025 	 * record is written out before the insert point.
1026 	 */
1027 	wmb();
1028 
1029 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
1030 		user_service->message_available_pos = insert;
1031 
1032 	insert++;
1033 	instance->completion_insert = insert;
1034 
1035 	complete(&instance->insert_event);
1036 
1037 	return 0;
1038 }
1039 
1040 int
service_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_userdata)1041 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
1042 		 struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
1043 {
1044 	/*
1045 	 * How do we ensure the callback goes to the right client?
1046 	 * The service_user data points to a user_service record
1047 	 * containing the original callback and the user state structure, which
1048 	 * contains a circular buffer for completion records.
1049 	 */
1050 	struct user_service *user_service;
1051 	struct vchiq_service *service;
1052 	bool skip_completion = false;
1053 
1054 	DEBUG_INITIALISE(g_state.local);
1055 
1056 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1057 
1058 	rcu_read_lock();
1059 	service = handle_to_service(instance, handle);
1060 	if (WARN_ON(!service)) {
1061 		rcu_read_unlock();
1062 		return 0;
1063 	}
1064 
1065 	user_service = (struct user_service *)service->base.userdata;
1066 
1067 	if (!instance || instance->closing) {
1068 		rcu_read_unlock();
1069 		return 0;
1070 	}
1071 
1072 	/*
1073 	 * As hopping around different synchronization mechanism,
1074 	 * taking an extra reference results in simpler implementation.
1075 	 */
1076 	vchiq_service_get(service);
1077 	rcu_read_unlock();
1078 
1079 	vchiq_log_trace(vchiq_arm_log_level,
1080 			"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1081 			__func__, (unsigned long)user_service, service->localport,
1082 			user_service->userdata, reason, (unsigned long)header,
1083 			(unsigned long)instance, (unsigned long)bulk_userdata);
1084 
1085 	if (header && user_service->is_vchi) {
1086 		spin_lock(&msg_queue_spinlock);
1087 		while (user_service->msg_insert ==
1088 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
1089 			spin_unlock(&msg_queue_spinlock);
1090 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1091 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1092 			vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
1093 			/*
1094 			 * If there is no MESSAGE_AVAILABLE in the completion
1095 			 * queue, add one
1096 			 */
1097 			if ((user_service->message_available_pos -
1098 				instance->completion_remove) < 0) {
1099 				int status;
1100 
1101 				vchiq_log_info(vchiq_arm_log_level,
1102 					       "Inserting extra MESSAGE_AVAILABLE");
1103 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1104 				status = add_completion(instance, reason, NULL, user_service,
1105 							bulk_userdata);
1106 				if (status) {
1107 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1108 					vchiq_service_put(service);
1109 					return status;
1110 				}
1111 			}
1112 
1113 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1114 			if (wait_for_completion_interruptible(&user_service->remove_event)) {
1115 				vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
1116 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1117 				vchiq_service_put(service);
1118 				return -EAGAIN;
1119 			} else if (instance->closing) {
1120 				vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
1121 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1122 				vchiq_service_put(service);
1123 				return -EINVAL;
1124 			}
1125 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1126 			spin_lock(&msg_queue_spinlock);
1127 		}
1128 
1129 		user_service->msg_queue[user_service->msg_insert &
1130 			(MSG_QUEUE_SIZE - 1)] = header;
1131 		user_service->msg_insert++;
1132 
1133 		/*
1134 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1135 		 * there is a MESSAGE_AVAILABLE in the completion queue then
1136 		 * bypass the completion queue.
1137 		 */
1138 		if (((user_service->message_available_pos -
1139 			instance->completion_remove) >= 0) ||
1140 			user_service->dequeue_pending) {
1141 			user_service->dequeue_pending = 0;
1142 			skip_completion = true;
1143 		}
1144 
1145 		spin_unlock(&msg_queue_spinlock);
1146 		complete(&user_service->insert_event);
1147 
1148 		header = NULL;
1149 	}
1150 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1151 	vchiq_service_put(service);
1152 
1153 	if (skip_completion)
1154 		return 0;
1155 
1156 	return add_completion(instance, reason, header, user_service,
1157 		bulk_userdata);
1158 }
1159 
vchiq_dump(void * dump_context,const char * str,int len)1160 int vchiq_dump(void *dump_context, const char *str, int len)
1161 {
1162 	struct dump_context *context = (struct dump_context *)dump_context;
1163 	int copy_bytes;
1164 
1165 	if (context->actual >= context->space)
1166 		return 0;
1167 
1168 	if (context->offset > 0) {
1169 		int skip_bytes = min_t(int, len, context->offset);
1170 
1171 		str += skip_bytes;
1172 		len -= skip_bytes;
1173 		context->offset -= skip_bytes;
1174 		if (context->offset > 0)
1175 			return 0;
1176 	}
1177 	copy_bytes = min_t(int, len, context->space - context->actual);
1178 	if (copy_bytes == 0)
1179 		return 0;
1180 	if (copy_to_user(context->buf + context->actual, str,
1181 			 copy_bytes))
1182 		return -EFAULT;
1183 	context->actual += copy_bytes;
1184 	len -= copy_bytes;
1185 
1186 	/*
1187 	 * If the terminating NUL is included in the length, then it
1188 	 * marks the end of a line and should be replaced with a
1189 	 * carriage return.
1190 	 */
1191 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1192 		char cr = '\n';
1193 
1194 		if (copy_to_user(context->buf + context->actual - 1,
1195 				 &cr, 1))
1196 			return -EFAULT;
1197 	}
1198 	return 0;
1199 }
1200 
vchiq_dump_platform_instances(void * dump_context)1201 int vchiq_dump_platform_instances(void *dump_context)
1202 {
1203 	struct vchiq_state *state = vchiq_get_state();
1204 	char buf[80];
1205 	int len;
1206 	int i;
1207 
1208 	if (!state)
1209 		return -ENOTCONN;
1210 
1211 	/*
1212 	 * There is no list of instances, so instead scan all services,
1213 	 * marking those that have been dumped.
1214 	 */
1215 
1216 	rcu_read_lock();
1217 	for (i = 0; i < state->unused_service; i++) {
1218 		struct vchiq_service *service;
1219 		struct vchiq_instance *instance;
1220 
1221 		service = rcu_dereference(state->services[i]);
1222 		if (!service || service->base.callback != service_callback)
1223 			continue;
1224 
1225 		instance = service->instance;
1226 		if (instance)
1227 			instance->mark = 0;
1228 	}
1229 	rcu_read_unlock();
1230 
1231 	for (i = 0; i < state->unused_service; i++) {
1232 		struct vchiq_service *service;
1233 		struct vchiq_instance *instance;
1234 		int err;
1235 
1236 		rcu_read_lock();
1237 		service = rcu_dereference(state->services[i]);
1238 		if (!service || service->base.callback != service_callback) {
1239 			rcu_read_unlock();
1240 			continue;
1241 		}
1242 
1243 		instance = service->instance;
1244 		if (!instance || instance->mark) {
1245 			rcu_read_unlock();
1246 			continue;
1247 		}
1248 		rcu_read_unlock();
1249 
1250 		len = snprintf(buf, sizeof(buf),
1251 			       "Instance %pK: pid %d,%s completions %d/%d",
1252 			       instance, instance->pid,
1253 			       instance->connected ? " connected, " :
1254 			       "",
1255 			       instance->completion_insert -
1256 			       instance->completion_remove,
1257 			       MAX_COMPLETIONS);
1258 		err = vchiq_dump(dump_context, buf, len + 1);
1259 		if (err)
1260 			return err;
1261 		instance->mark = 1;
1262 	}
1263 	return 0;
1264 }
1265 
vchiq_dump_platform_service_state(void * dump_context,struct vchiq_service * service)1266 int vchiq_dump_platform_service_state(void *dump_context,
1267 				      struct vchiq_service *service)
1268 {
1269 	struct user_service *user_service =
1270 			(struct user_service *)service->base.userdata;
1271 	char buf[80];
1272 	int len;
1273 
1274 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1275 
1276 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
1277 		len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
1278 				 user_service->msg_insert - user_service->msg_remove,
1279 				 MSG_QUEUE_SIZE);
1280 
1281 		if (user_service->dequeue_pending)
1282 			len += scnprintf(buf + len, sizeof(buf) - len,
1283 				" (dequeue pending)");
1284 	}
1285 
1286 	return vchiq_dump(dump_context, buf, len + 1);
1287 }
1288 
1289 struct vchiq_state *
vchiq_get_state(void)1290 vchiq_get_state(void)
1291 {
1292 	if (!g_state.remote) {
1293 		pr_err("%s: g_state.remote == NULL\n", __func__);
1294 		return NULL;
1295 	}
1296 
1297 	if (g_state.remote->initialised != 1) {
1298 		pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1299 			  __func__, g_state.remote->initialised);
1300 		return NULL;
1301 	}
1302 
1303 	return &g_state;
1304 }
1305 
1306 /*
1307  * Autosuspend related functionality
1308  */
1309 
1310 static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * bulk_user)1311 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
1312 			       enum vchiq_reason reason,
1313 			       struct vchiq_header *header,
1314 			       unsigned int service_user, void *bulk_user)
1315 {
1316 	vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
1317 	return 0;
1318 }
1319 
1320 static int
vchiq_keepalive_thread_func(void * v)1321 vchiq_keepalive_thread_func(void *v)
1322 {
1323 	struct vchiq_state *state = (struct vchiq_state *)v;
1324 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1325 
1326 	int status;
1327 	struct vchiq_instance *instance;
1328 	unsigned int ka_handle;
1329 	int ret;
1330 
1331 	struct vchiq_service_params_kernel params = {
1332 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1333 		.callback    = vchiq_keepalive_vchiq_callback,
1334 		.version     = KEEPALIVE_VER,
1335 		.version_min = KEEPALIVE_VER_MIN
1336 	};
1337 
1338 	ret = vchiq_initialise(&instance);
1339 	if (ret) {
1340 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
1341 				ret);
1342 		goto exit;
1343 	}
1344 
1345 	status = vchiq_connect(instance);
1346 	if (status) {
1347 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
1348 				status);
1349 		goto shutdown;
1350 	}
1351 
1352 	status = vchiq_add_service(instance, &params, &ka_handle);
1353 	if (status) {
1354 		vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
1355 				status);
1356 		goto shutdown;
1357 	}
1358 
1359 	while (1) {
1360 		long rc = 0, uc = 0;
1361 
1362 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1363 			vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
1364 			flush_signals(current);
1365 			continue;
1366 		}
1367 
1368 		/*
1369 		 * read and clear counters.  Do release_count then use_count to
1370 		 * prevent getting more releases than uses
1371 		 */
1372 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
1373 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
1374 
1375 		/*
1376 		 * Call use/release service the requisite number of times.
1377 		 * Process use before release so use counts don't go negative
1378 		 */
1379 		while (uc--) {
1380 			atomic_inc(&arm_state->ka_use_ack_count);
1381 			status = vchiq_use_service(instance, ka_handle);
1382 			if (status) {
1383 				vchiq_log_error(vchiq_susp_log_level,
1384 						"%s vchiq_use_service error %d", __func__, status);
1385 			}
1386 		}
1387 		while (rc--) {
1388 			status = vchiq_release_service(instance, ka_handle);
1389 			if (status) {
1390 				vchiq_log_error(vchiq_susp_log_level,
1391 						"%s vchiq_release_service error %d", __func__,
1392 						status);
1393 			}
1394 		}
1395 	}
1396 
1397 shutdown:
1398 	vchiq_shutdown(instance);
1399 exit:
1400 	return 0;
1401 }
1402 
1403 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1404 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1405 		   enum USE_TYPE_E use_type)
1406 {
1407 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1408 	int ret = 0;
1409 	char entity[16];
1410 	int *entity_uc;
1411 	int local_uc;
1412 
1413 	if (!arm_state) {
1414 		ret = -EINVAL;
1415 		goto out;
1416 	}
1417 
1418 	if (use_type == USE_TYPE_VCHIQ) {
1419 		sprintf(entity, "VCHIQ:   ");
1420 		entity_uc = &arm_state->peer_use_count;
1421 	} else if (service) {
1422 		sprintf(entity, "%c%c%c%c:%03d",
1423 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1424 			service->client_id);
1425 		entity_uc = &service->service_use_count;
1426 	} else {
1427 		vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1428 		ret = -EINVAL;
1429 		goto out;
1430 	}
1431 
1432 	write_lock_bh(&arm_state->susp_res_lock);
1433 	local_uc = ++arm_state->videocore_use_count;
1434 	++(*entity_uc);
1435 
1436 	vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1437 			*entity_uc, local_uc);
1438 
1439 	write_unlock_bh(&arm_state->susp_res_lock);
1440 
1441 	if (!ret) {
1442 		int status = 0;
1443 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1444 
1445 		while (ack_cnt && !status) {
1446 			/* Send the use notify to videocore */
1447 			status = vchiq_send_remote_use_active(state);
1448 			if (!status)
1449 				ack_cnt--;
1450 			else
1451 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1452 		}
1453 	}
1454 
1455 out:
1456 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1457 	return ret;
1458 }
1459 
1460 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1461 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1462 {
1463 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1464 	int ret = 0;
1465 	char entity[16];
1466 	int *entity_uc;
1467 
1468 	if (!arm_state) {
1469 		ret = -EINVAL;
1470 		goto out;
1471 	}
1472 
1473 	if (service) {
1474 		sprintf(entity, "%c%c%c%c:%03d",
1475 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1476 			service->client_id);
1477 		entity_uc = &service->service_use_count;
1478 	} else {
1479 		sprintf(entity, "PEER:   ");
1480 		entity_uc = &arm_state->peer_use_count;
1481 	}
1482 
1483 	write_lock_bh(&arm_state->susp_res_lock);
1484 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1485 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
1486 		WARN_ON(!arm_state->videocore_use_count);
1487 		WARN_ON(!(*entity_uc));
1488 		ret = -EINVAL;
1489 		goto unlock;
1490 	}
1491 	--arm_state->videocore_use_count;
1492 	--(*entity_uc);
1493 
1494 	vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1495 			*entity_uc, arm_state->videocore_use_count);
1496 
1497 unlock:
1498 	write_unlock_bh(&arm_state->susp_res_lock);
1499 
1500 out:
1501 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1502 	return ret;
1503 }
1504 
1505 void
vchiq_on_remote_use(struct vchiq_state * state)1506 vchiq_on_remote_use(struct vchiq_state *state)
1507 {
1508 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1509 
1510 	atomic_inc(&arm_state->ka_use_count);
1511 	complete(&arm_state->ka_evt);
1512 }
1513 
1514 void
vchiq_on_remote_release(struct vchiq_state * state)1515 vchiq_on_remote_release(struct vchiq_state *state)
1516 {
1517 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1518 
1519 	atomic_inc(&arm_state->ka_release_count);
1520 	complete(&arm_state->ka_evt);
1521 }
1522 
1523 int
vchiq_use_service_internal(struct vchiq_service * service)1524 vchiq_use_service_internal(struct vchiq_service *service)
1525 {
1526 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1527 }
1528 
1529 int
vchiq_release_service_internal(struct vchiq_service * service)1530 vchiq_release_service_internal(struct vchiq_service *service)
1531 {
1532 	return vchiq_release_internal(service->state, service);
1533 }
1534 
1535 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1536 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1537 {
1538 	return &instance->debugfs_node;
1539 }
1540 
1541 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1542 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1543 {
1544 	struct vchiq_service *service;
1545 	int use_count = 0, i;
1546 
1547 	i = 0;
1548 	rcu_read_lock();
1549 	while ((service = __next_service_by_instance(instance->state,
1550 						     instance, &i)))
1551 		use_count += service->service_use_count;
1552 	rcu_read_unlock();
1553 	return use_count;
1554 }
1555 
1556 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1557 vchiq_instance_get_pid(struct vchiq_instance *instance)
1558 {
1559 	return instance->pid;
1560 }
1561 
1562 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1563 vchiq_instance_get_trace(struct vchiq_instance *instance)
1564 {
1565 	return instance->trace;
1566 }
1567 
1568 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1569 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1570 {
1571 	struct vchiq_service *service;
1572 	int i;
1573 
1574 	i = 0;
1575 	rcu_read_lock();
1576 	while ((service = __next_service_by_instance(instance->state,
1577 						     instance, &i)))
1578 		service->trace = trace;
1579 	rcu_read_unlock();
1580 	instance->trace = (trace != 0);
1581 }
1582 
1583 int
vchiq_use_service(struct vchiq_instance * instance,unsigned int handle)1584 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1585 {
1586 	int ret = -EINVAL;
1587 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1588 
1589 	if (service) {
1590 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1591 		vchiq_service_put(service);
1592 	}
1593 	return ret;
1594 }
1595 EXPORT_SYMBOL(vchiq_use_service);
1596 
1597 int
vchiq_release_service(struct vchiq_instance * instance,unsigned int handle)1598 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1599 {
1600 	int ret = -EINVAL;
1601 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1602 
1603 	if (service) {
1604 		ret = vchiq_release_internal(service->state, service);
1605 		vchiq_service_put(service);
1606 	}
1607 	return ret;
1608 }
1609 EXPORT_SYMBOL(vchiq_release_service);
1610 
1611 struct service_data_struct {
1612 	int fourcc;
1613 	int clientid;
1614 	int use_count;
1615 };
1616 
1617 void
vchiq_dump_service_use_state(struct vchiq_state * state)1618 vchiq_dump_service_use_state(struct vchiq_state *state)
1619 {
1620 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1621 	struct service_data_struct *service_data;
1622 	int i, found = 0;
1623 	/*
1624 	 * If there's more than 64 services, only dump ones with
1625 	 * non-zero counts
1626 	 */
1627 	int only_nonzero = 0;
1628 	static const char *nz = "<-- preventing suspend";
1629 
1630 	int peer_count;
1631 	int vc_use_count;
1632 	int active_services;
1633 
1634 	if (!arm_state)
1635 		return;
1636 
1637 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1638 				     GFP_KERNEL);
1639 	if (!service_data)
1640 		return;
1641 
1642 	read_lock_bh(&arm_state->susp_res_lock);
1643 	peer_count = arm_state->peer_use_count;
1644 	vc_use_count = arm_state->videocore_use_count;
1645 	active_services = state->unused_service;
1646 	if (active_services > MAX_SERVICES)
1647 		only_nonzero = 1;
1648 
1649 	rcu_read_lock();
1650 	for (i = 0; i < active_services; i++) {
1651 		struct vchiq_service *service_ptr =
1652 			rcu_dereference(state->services[i]);
1653 
1654 		if (!service_ptr)
1655 			continue;
1656 
1657 		if (only_nonzero && !service_ptr->service_use_count)
1658 			continue;
1659 
1660 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1661 			continue;
1662 
1663 		service_data[found].fourcc = service_ptr->base.fourcc;
1664 		service_data[found].clientid = service_ptr->client_id;
1665 		service_data[found].use_count = service_ptr->service_use_count;
1666 		found++;
1667 		if (found >= MAX_SERVICES)
1668 			break;
1669 	}
1670 	rcu_read_unlock();
1671 
1672 	read_unlock_bh(&arm_state->susp_res_lock);
1673 
1674 	if (only_nonzero)
1675 		vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
1676 				  active_services, found);
1677 
1678 	for (i = 0; i < found; i++) {
1679 		vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
1680 				  VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1681 				  service_data[i].clientid, service_data[i].use_count,
1682 				  service_data[i].use_count ? nz : "");
1683 	}
1684 	vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
1685 	vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
1686 			  vc_use_count);
1687 
1688 	kfree(service_data);
1689 }
1690 
1691 int
vchiq_check_service(struct vchiq_service * service)1692 vchiq_check_service(struct vchiq_service *service)
1693 {
1694 	struct vchiq_arm_state *arm_state;
1695 	int ret = -EINVAL;
1696 
1697 	if (!service || !service->state)
1698 		goto out;
1699 
1700 	arm_state = vchiq_platform_get_arm_state(service->state);
1701 
1702 	read_lock_bh(&arm_state->susp_res_lock);
1703 	if (service->service_use_count)
1704 		ret = 0;
1705 	read_unlock_bh(&arm_state->susp_res_lock);
1706 
1707 	if (ret) {
1708 		vchiq_log_error(vchiq_susp_log_level,
1709 				"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1710 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
1711 				service->service_use_count, arm_state->videocore_use_count);
1712 		vchiq_dump_service_use_state(service->state);
1713 	}
1714 out:
1715 	return ret;
1716 }
1717 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1718 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1719 				       enum vchiq_connstate oldstate,
1720 				       enum vchiq_connstate newstate)
1721 {
1722 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1723 	char threadname[16];
1724 
1725 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1726 		       get_conn_state_name(oldstate), get_conn_state_name(newstate));
1727 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1728 		return;
1729 
1730 	write_lock_bh(&arm_state->susp_res_lock);
1731 	if (arm_state->first_connect) {
1732 		write_unlock_bh(&arm_state->susp_res_lock);
1733 		return;
1734 	}
1735 
1736 	arm_state->first_connect = 1;
1737 	write_unlock_bh(&arm_state->susp_res_lock);
1738 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1739 		 state->id);
1740 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1741 					      (void *)state,
1742 					      threadname);
1743 	if (IS_ERR(arm_state->ka_thread)) {
1744 		vchiq_log_error(vchiq_susp_log_level,
1745 				"vchiq: FATAL: couldn't create thread %s",
1746 				threadname);
1747 	} else {
1748 		wake_up_process(arm_state->ka_thread);
1749 	}
1750 }
1751 
1752 static const struct of_device_id vchiq_of_match[] = {
1753 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1754 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1755 	{},
1756 };
1757 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1758 
1759 static struct platform_device *
vchiq_register_child(struct platform_device * pdev,const char * name)1760 vchiq_register_child(struct platform_device *pdev, const char *name)
1761 {
1762 	struct platform_device_info pdevinfo;
1763 	struct platform_device *child;
1764 
1765 	memset(&pdevinfo, 0, sizeof(pdevinfo));
1766 
1767 	pdevinfo.parent = &pdev->dev;
1768 	pdevinfo.name = name;
1769 	pdevinfo.id = PLATFORM_DEVID_NONE;
1770 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
1771 
1772 	child = platform_device_register_full(&pdevinfo);
1773 	if (IS_ERR(child)) {
1774 		dev_warn(&pdev->dev, "%s not registered\n", name);
1775 		child = NULL;
1776 	}
1777 
1778 	return child;
1779 }
1780 
vchiq_probe(struct platform_device * pdev)1781 static int vchiq_probe(struct platform_device *pdev)
1782 {
1783 	struct device_node *fw_node;
1784 	const struct of_device_id *of_id;
1785 	struct vchiq_drvdata *drvdata;
1786 	int err;
1787 
1788 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1789 	drvdata = (struct vchiq_drvdata *)of_id->data;
1790 	if (!drvdata)
1791 		return -EINVAL;
1792 
1793 	fw_node = of_find_compatible_node(NULL, NULL,
1794 					  "raspberrypi,bcm2835-firmware");
1795 	if (!fw_node) {
1796 		dev_err(&pdev->dev, "Missing firmware node\n");
1797 		return -ENOENT;
1798 	}
1799 
1800 	drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1801 	of_node_put(fw_node);
1802 	if (!drvdata->fw)
1803 		return -EPROBE_DEFER;
1804 
1805 	platform_set_drvdata(pdev, drvdata);
1806 
1807 	err = vchiq_platform_init(pdev, &g_state);
1808 	if (err)
1809 		goto failed_platform_init;
1810 
1811 	vchiq_debugfs_init();
1812 
1813 	vchiq_log_info(vchiq_arm_log_level,
1814 		       "vchiq: platform initialised - version %d (min %d)",
1815 		       VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1816 
1817 	/*
1818 	 * Simply exit on error since the function handles cleanup in
1819 	 * cases of failure.
1820 	 */
1821 	err = vchiq_register_chrdev(&pdev->dev);
1822 	if (err) {
1823 		vchiq_log_warning(vchiq_arm_log_level,
1824 				  "Failed to initialize vchiq cdev");
1825 		goto error_exit;
1826 	}
1827 
1828 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1829 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1830 
1831 	return 0;
1832 
1833 failed_platform_init:
1834 	vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1835 error_exit:
1836 	return err;
1837 }
1838 
vchiq_remove(struct platform_device * pdev)1839 static void vchiq_remove(struct platform_device *pdev)
1840 {
1841 	platform_device_unregister(bcm2835_audio);
1842 	platform_device_unregister(bcm2835_camera);
1843 	vchiq_debugfs_deinit();
1844 	vchiq_deregister_chrdev();
1845 }
1846 
1847 static struct platform_driver vchiq_driver = {
1848 	.driver = {
1849 		.name = "bcm2835_vchiq",
1850 		.of_match_table = vchiq_of_match,
1851 	},
1852 	.probe = vchiq_probe,
1853 	.remove_new = vchiq_remove,
1854 };
1855 
vchiq_driver_init(void)1856 static int __init vchiq_driver_init(void)
1857 {
1858 	int ret;
1859 
1860 	ret = platform_driver_register(&vchiq_driver);
1861 	if (ret)
1862 		pr_err("Failed to register vchiq driver\n");
1863 
1864 	return ret;
1865 }
1866 module_init(vchiq_driver_init);
1867 
vchiq_driver_exit(void)1868 static void __exit vchiq_driver_exit(void)
1869 {
1870 	platform_driver_unregister(&vchiq_driver);
1871 }
1872 module_exit(vchiq_driver_exit);
1873 
1874 MODULE_LICENSE("Dual BSD/GPL");
1875 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1876 MODULE_AUTHOR("Broadcom Corporation");
1877