1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <soc/bcm2835/raspberrypi-firmware.h>
29 
30 #include "vchiq_core.h"
31 #include "vchiq_ioctl.h"
32 #include "vchiq_arm.h"
33 #include "vchiq_debugfs.h"
34 
35 #define DEVICE_NAME "vchiq"
36 
37 /* Override the default prefix, which would be vchiq_arm (from the filename) */
38 #undef MODULE_PARAM_PREFIX
39 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
40 
41 /* Some per-instance constants */
42 #define MAX_COMPLETIONS 128
43 #define MAX_SERVICES 64
44 #define MAX_ELEMENTS 8
45 #define MSG_QUEUE_SIZE 128
46 
47 #define KEEPALIVE_VER 1
48 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
49 
50 /* Run time control of log level, based on KERN_XXX level. */
51 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
52 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
53 
54 struct user_service {
55 	struct vchiq_service *service;
56 	void __user *userdata;
57 	struct vchiq_instance *instance;
58 	char is_vchi;
59 	char dequeue_pending;
60 	char close_pending;
61 	int message_available_pos;
62 	int msg_insert;
63 	int msg_remove;
64 	struct completion insert_event;
65 	struct completion remove_event;
66 	struct completion close_event;
67 	struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
68 };
69 
70 struct bulk_waiter_node {
71 	struct bulk_waiter bulk_waiter;
72 	int pid;
73 	struct list_head list;
74 };
75 
76 struct vchiq_instance {
77 	struct vchiq_state *state;
78 	struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS];
79 	int completion_insert;
80 	int completion_remove;
81 	struct completion insert_event;
82 	struct completion remove_event;
83 	struct mutex completion_mutex;
84 
85 	int connected;
86 	int closing;
87 	int pid;
88 	int mark;
89 	int use_close_delivered;
90 	int trace;
91 
92 	struct list_head bulk_waiter_list;
93 	struct mutex bulk_waiter_list_mutex;
94 
95 	struct vchiq_debugfs_node debugfs_node;
96 };
97 
98 struct dump_context {
99 	char __user *buf;
100 	size_t actual;
101 	size_t space;
102 	loff_t offset;
103 };
104 
105 static struct cdev    vchiq_cdev;
106 static dev_t          vchiq_devid;
107 static struct vchiq_state g_state;
108 static struct class  *vchiq_class;
109 static DEFINE_SPINLOCK(msg_queue_spinlock);
110 static struct platform_device *bcm2835_camera;
111 static struct platform_device *bcm2835_audio;
112 
113 static struct vchiq_drvdata bcm2835_drvdata = {
114 	.cache_line_size = 32,
115 };
116 
117 static struct vchiq_drvdata bcm2836_drvdata = {
118 	.cache_line_size = 64,
119 };
120 
121 static const char *const ioctl_names[] = {
122 	"CONNECT",
123 	"SHUTDOWN",
124 	"CREATE_SERVICE",
125 	"REMOVE_SERVICE",
126 	"QUEUE_MESSAGE",
127 	"QUEUE_BULK_TRANSMIT",
128 	"QUEUE_BULK_RECEIVE",
129 	"AWAIT_COMPLETION",
130 	"DEQUEUE_MESSAGE",
131 	"GET_CLIENT_ID",
132 	"GET_CONFIG",
133 	"CLOSE_SERVICE",
134 	"USE_SERVICE",
135 	"RELEASE_SERVICE",
136 	"SET_SERVICE_OPTION",
137 	"DUMP_PHYS_MEM",
138 	"LIB_VERSION",
139 	"CLOSE_DELIVERED"
140 };
141 
142 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
143 		    (VCHIQ_IOC_MAX + 1));
144 
145 static enum vchiq_status
146 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
147 	unsigned int size, enum vchiq_bulk_dir dir);
148 
149 #define VCHIQ_INIT_RETRIES 10
150 enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
151 {
152 	enum vchiq_status status = VCHIQ_ERROR;
153 	struct vchiq_state *state;
154 	struct vchiq_instance *instance = NULL;
155 	int i;
156 
157 	vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
158 
159 	/*
160 	 * VideoCore may not be ready due to boot up timing.
161 	 * It may never be ready if kernel and firmware are mismatched,so don't
162 	 * block forever.
163 	 */
164 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
165 		state = vchiq_get_state();
166 		if (state)
167 			break;
168 		usleep_range(500, 600);
169 	}
170 	if (i == VCHIQ_INIT_RETRIES) {
171 		vchiq_log_error(vchiq_core_log_level,
172 			"%s: videocore not initialized\n", __func__);
173 		goto failed;
174 	} else if (i > 0) {
175 		vchiq_log_warning(vchiq_core_log_level,
176 			"%s: videocore initialized after %d retries\n",
177 			__func__, i);
178 	}
179 
180 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
181 	if (!instance) {
182 		vchiq_log_error(vchiq_core_log_level,
183 			"%s: error allocating vchiq instance\n", __func__);
184 		goto failed;
185 	}
186 
187 	instance->connected = 0;
188 	instance->state = state;
189 	mutex_init(&instance->bulk_waiter_list_mutex);
190 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
191 
192 	*instance_out = instance;
193 
194 	status = VCHIQ_SUCCESS;
195 
196 failed:
197 	vchiq_log_trace(vchiq_core_log_level,
198 		"%s(%p): returning %d", __func__, instance, status);
199 
200 	return status;
201 }
202 EXPORT_SYMBOL(vchiq_initialise);
203 
204 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
205 {
206 	enum vchiq_status status;
207 	struct vchiq_state *state = instance->state;
208 
209 	vchiq_log_trace(vchiq_core_log_level,
210 		"%s(%p) called", __func__, instance);
211 
212 	if (mutex_lock_killable(&state->mutex))
213 		return VCHIQ_RETRY;
214 
215 	/* Remove all services */
216 	status = vchiq_shutdown_internal(state, instance);
217 
218 	mutex_unlock(&state->mutex);
219 
220 	vchiq_log_trace(vchiq_core_log_level,
221 		"%s(%p): returning %d", __func__, instance, status);
222 
223 	if (status == VCHIQ_SUCCESS) {
224 		struct bulk_waiter_node *waiter, *next;
225 
226 		list_for_each_entry_safe(waiter, next,
227 					 &instance->bulk_waiter_list, list) {
228 			list_del(&waiter->list);
229 			vchiq_log_info(vchiq_arm_log_level,
230 					"bulk_waiter - cleaned up %pK for pid %d",
231 					waiter, waiter->pid);
232 			kfree(waiter);
233 		}
234 		kfree(instance);
235 	}
236 
237 	return status;
238 }
239 EXPORT_SYMBOL(vchiq_shutdown);
240 
241 static int vchiq_is_connected(struct vchiq_instance *instance)
242 {
243 	return instance->connected;
244 }
245 
246 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
247 {
248 	enum vchiq_status status;
249 	struct vchiq_state *state = instance->state;
250 
251 	vchiq_log_trace(vchiq_core_log_level,
252 		"%s(%p) called", __func__, instance);
253 
254 	if (mutex_lock_killable(&state->mutex)) {
255 		vchiq_log_trace(vchiq_core_log_level,
256 			"%s: call to mutex_lock failed", __func__);
257 		status = VCHIQ_RETRY;
258 		goto failed;
259 	}
260 	status = vchiq_connect_internal(state, instance);
261 
262 	if (status == VCHIQ_SUCCESS)
263 		instance->connected = 1;
264 
265 	mutex_unlock(&state->mutex);
266 
267 failed:
268 	vchiq_log_trace(vchiq_core_log_level,
269 		"%s(%p): returning %d", __func__, instance, status);
270 
271 	return status;
272 }
273 EXPORT_SYMBOL(vchiq_connect);
274 
275 static enum vchiq_status vchiq_add_service(
276 	struct vchiq_instance             *instance,
277 	const struct vchiq_service_params_kernel *params,
278 	unsigned int       *phandle)
279 {
280 	enum vchiq_status status;
281 	struct vchiq_state *state = instance->state;
282 	struct vchiq_service *service = NULL;
283 	int srvstate;
284 
285 	vchiq_log_trace(vchiq_core_log_level,
286 		"%s(%p) called", __func__, instance);
287 
288 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
289 
290 	srvstate = vchiq_is_connected(instance)
291 		? VCHIQ_SRVSTATE_LISTENING
292 		: VCHIQ_SRVSTATE_HIDDEN;
293 
294 	service = vchiq_add_service_internal(
295 		state,
296 		params,
297 		srvstate,
298 		instance,
299 		NULL);
300 
301 	if (service) {
302 		*phandle = service->handle;
303 		status = VCHIQ_SUCCESS;
304 	} else
305 		status = VCHIQ_ERROR;
306 
307 	vchiq_log_trace(vchiq_core_log_level,
308 		"%s(%p): returning %d", __func__, instance, status);
309 
310 	return status;
311 }
312 
313 enum vchiq_status vchiq_open_service(
314 	struct vchiq_instance             *instance,
315 	const struct vchiq_service_params_kernel *params,
316 	unsigned int       *phandle)
317 {
318 	enum vchiq_status   status = VCHIQ_ERROR;
319 	struct vchiq_state   *state = instance->state;
320 	struct vchiq_service *service = NULL;
321 
322 	vchiq_log_trace(vchiq_core_log_level,
323 		"%s(%p) called", __func__, instance);
324 
325 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
326 
327 	if (!vchiq_is_connected(instance))
328 		goto failed;
329 
330 	service = vchiq_add_service_internal(state,
331 		params,
332 		VCHIQ_SRVSTATE_OPENING,
333 		instance,
334 		NULL);
335 
336 	if (service) {
337 		*phandle = service->handle;
338 		status = vchiq_open_service_internal(service, current->pid);
339 		if (status != VCHIQ_SUCCESS) {
340 			vchiq_remove_service(service->handle);
341 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
342 		}
343 	}
344 
345 failed:
346 	vchiq_log_trace(vchiq_core_log_level,
347 		"%s(%p): returning %d", __func__, instance, status);
348 
349 	return status;
350 }
351 EXPORT_SYMBOL(vchiq_open_service);
352 
353 enum vchiq_status
354 vchiq_bulk_transmit(unsigned int handle, const void *data,
355 	unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
356 {
357 	enum vchiq_status status;
358 
359 	while (1) {
360 		switch (mode) {
361 		case VCHIQ_BULK_MODE_NOCALLBACK:
362 		case VCHIQ_BULK_MODE_CALLBACK:
363 			status = vchiq_bulk_transfer(handle,
364 						     (void *)data, NULL,
365 						     size, userdata, mode,
366 						     VCHIQ_BULK_TRANSMIT);
367 			break;
368 		case VCHIQ_BULK_MODE_BLOCKING:
369 			status = vchiq_blocking_bulk_transfer(handle,
370 				(void *)data, size, VCHIQ_BULK_TRANSMIT);
371 			break;
372 		default:
373 			return VCHIQ_ERROR;
374 		}
375 
376 		/*
377 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
378 		 * to implement a retry mechanism since this function is
379 		 * supposed to block until queued
380 		 */
381 		if (status != VCHIQ_RETRY)
382 			break;
383 
384 		msleep(1);
385 	}
386 
387 	return status;
388 }
389 EXPORT_SYMBOL(vchiq_bulk_transmit);
390 
391 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
392 				     unsigned int size, void *userdata,
393 				     enum vchiq_bulk_mode mode)
394 {
395 	enum vchiq_status status;
396 
397 	while (1) {
398 		switch (mode) {
399 		case VCHIQ_BULK_MODE_NOCALLBACK:
400 		case VCHIQ_BULK_MODE_CALLBACK:
401 			status = vchiq_bulk_transfer(handle, data, NULL,
402 						     size, userdata,
403 						     mode, VCHIQ_BULK_RECEIVE);
404 			break;
405 		case VCHIQ_BULK_MODE_BLOCKING:
406 			status = vchiq_blocking_bulk_transfer(handle,
407 				(void *)data, size, VCHIQ_BULK_RECEIVE);
408 			break;
409 		default:
410 			return VCHIQ_ERROR;
411 		}
412 
413 		/*
414 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
415 		 * to implement a retry mechanism since this function is
416 		 * supposed to block until queued
417 		 */
418 		if (status != VCHIQ_RETRY)
419 			break;
420 
421 		msleep(1);
422 	}
423 
424 	return status;
425 }
426 EXPORT_SYMBOL(vchiq_bulk_receive);
427 
428 static enum vchiq_status
429 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
430 	unsigned int size, enum vchiq_bulk_dir dir)
431 {
432 	struct vchiq_instance *instance;
433 	struct vchiq_service *service;
434 	enum vchiq_status status;
435 	struct bulk_waiter_node *waiter = NULL;
436 	bool found = false;
437 
438 	service = find_service_by_handle(handle);
439 	if (!service)
440 		return VCHIQ_ERROR;
441 
442 	instance = service->instance;
443 
444 	unlock_service(service);
445 
446 	mutex_lock(&instance->bulk_waiter_list_mutex);
447 	list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
448 		if (waiter->pid == current->pid) {
449 			list_del(&waiter->list);
450 			found = true;
451 			break;
452 		}
453 	}
454 	mutex_unlock(&instance->bulk_waiter_list_mutex);
455 
456 	if (found) {
457 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
458 
459 		if (bulk) {
460 			/* This thread has an outstanding bulk transfer. */
461 			/* FIXME: why compare a dma address to a pointer? */
462 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
463 				(bulk->size != size)) {
464 				/*
465 				 * This is not a retry of the previous one.
466 				 * Cancel the signal when the transfer completes.
467 				 */
468 				spin_lock(&bulk_waiter_spinlock);
469 				bulk->userdata = NULL;
470 				spin_unlock(&bulk_waiter_spinlock);
471 			}
472 		}
473 	} else {
474 		waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
475 		if (!waiter) {
476 			vchiq_log_error(vchiq_core_log_level,
477 				"%s - out of memory", __func__);
478 			return VCHIQ_ERROR;
479 		}
480 	}
481 
482 	status = vchiq_bulk_transfer(handle, data, NULL, size,
483 				     &waiter->bulk_waiter,
484 				     VCHIQ_BULK_MODE_BLOCKING, dir);
485 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
486 		!waiter->bulk_waiter.bulk) {
487 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
488 
489 		if (bulk) {
490 			/* Cancel the signal when the transfer completes. */
491 			spin_lock(&bulk_waiter_spinlock);
492 			bulk->userdata = NULL;
493 			spin_unlock(&bulk_waiter_spinlock);
494 		}
495 		kfree(waiter);
496 	} else {
497 		waiter->pid = current->pid;
498 		mutex_lock(&instance->bulk_waiter_list_mutex);
499 		list_add(&waiter->list, &instance->bulk_waiter_list);
500 		mutex_unlock(&instance->bulk_waiter_list_mutex);
501 		vchiq_log_info(vchiq_arm_log_level,
502 				"saved bulk_waiter %pK for pid %d",
503 				waiter, current->pid);
504 	}
505 
506 	return status;
507 }
508 /****************************************************************************
509  *
510  *   add_completion
511  *
512  ***************************************************************************/
513 
514 static enum vchiq_status
515 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
516 	       struct vchiq_header *header, struct user_service *user_service,
517 	       void *bulk_userdata)
518 {
519 	struct vchiq_completion_data_kernel *completion;
520 	int insert;
521 
522 	DEBUG_INITIALISE(g_state.local)
523 
524 	insert = instance->completion_insert;
525 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
526 		/* Out of space - wait for the client */
527 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
528 		vchiq_log_trace(vchiq_arm_log_level,
529 			"%s - completion queue full", __func__);
530 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
531 		if (wait_for_completion_interruptible(
532 					&instance->remove_event)) {
533 			vchiq_log_info(vchiq_arm_log_level,
534 				"service_callback interrupted");
535 			return VCHIQ_RETRY;
536 		} else if (instance->closing) {
537 			vchiq_log_info(vchiq_arm_log_level,
538 				"service_callback closing");
539 			return VCHIQ_SUCCESS;
540 		}
541 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
542 	}
543 
544 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
545 
546 	completion->header = header;
547 	completion->reason = reason;
548 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
549 	completion->service_userdata = user_service->service;
550 	completion->bulk_userdata = bulk_userdata;
551 
552 	if (reason == VCHIQ_SERVICE_CLOSED) {
553 		/*
554 		 * Take an extra reference, to be held until
555 		 * this CLOSED notification is delivered.
556 		 */
557 		lock_service(user_service->service);
558 		if (instance->use_close_delivered)
559 			user_service->close_pending = 1;
560 	}
561 
562 	/*
563 	 * A write barrier is needed here to ensure that the entire completion
564 	 * record is written out before the insert point.
565 	 */
566 	wmb();
567 
568 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
569 		user_service->message_available_pos = insert;
570 
571 	insert++;
572 	instance->completion_insert = insert;
573 
574 	complete(&instance->insert_event);
575 
576 	return VCHIQ_SUCCESS;
577 }
578 
579 /****************************************************************************
580  *
581  *   service_callback
582  *
583  ***************************************************************************/
584 
585 static enum vchiq_status
586 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
587 		 unsigned int handle, void *bulk_userdata)
588 {
589 	/*
590 	 * How do we ensure the callback goes to the right client?
591 	 * The service_user data points to a user_service record
592 	 * containing the original callback and the user state structure, which
593 	 * contains a circular buffer for completion records.
594 	 */
595 	struct user_service *user_service;
596 	struct vchiq_service *service;
597 	struct vchiq_instance *instance;
598 	bool skip_completion = false;
599 
600 	DEBUG_INITIALISE(g_state.local)
601 
602 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
603 
604 	service = handle_to_service(handle);
605 	BUG_ON(!service);
606 	user_service = (struct user_service *)service->base.userdata;
607 	instance = user_service->instance;
608 
609 	if (!instance || instance->closing)
610 		return VCHIQ_SUCCESS;
611 
612 	vchiq_log_trace(vchiq_arm_log_level,
613 		"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
614 		__func__, (unsigned long)user_service,
615 		service->localport, user_service->userdata,
616 		reason, (unsigned long)header,
617 		(unsigned long)instance, (unsigned long)bulk_userdata);
618 
619 	if (header && user_service->is_vchi) {
620 		spin_lock(&msg_queue_spinlock);
621 		while (user_service->msg_insert ==
622 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
623 			spin_unlock(&msg_queue_spinlock);
624 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
625 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
626 			vchiq_log_trace(vchiq_arm_log_level,
627 				"service_callback - msg queue full");
628 			/*
629 			 * If there is no MESSAGE_AVAILABLE in the completion
630 			 * queue, add one
631 			 */
632 			if ((user_service->message_available_pos -
633 				instance->completion_remove) < 0) {
634 				enum vchiq_status status;
635 
636 				vchiq_log_info(vchiq_arm_log_level,
637 					"Inserting extra MESSAGE_AVAILABLE");
638 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
639 				status = add_completion(instance, reason,
640 					NULL, user_service, bulk_userdata);
641 				if (status != VCHIQ_SUCCESS) {
642 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
643 					return status;
644 				}
645 			}
646 
647 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
648 			if (wait_for_completion_interruptible(
649 						&user_service->remove_event)) {
650 				vchiq_log_info(vchiq_arm_log_level,
651 					"%s interrupted", __func__);
652 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
653 				return VCHIQ_RETRY;
654 			} else if (instance->closing) {
655 				vchiq_log_info(vchiq_arm_log_level,
656 					"%s closing", __func__);
657 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
658 				return VCHIQ_ERROR;
659 			}
660 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
661 			spin_lock(&msg_queue_spinlock);
662 		}
663 
664 		user_service->msg_queue[user_service->msg_insert &
665 			(MSG_QUEUE_SIZE - 1)] = header;
666 		user_service->msg_insert++;
667 
668 		/*
669 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
670 		 * there is a MESSAGE_AVAILABLE in the completion queue then
671 		 * bypass the completion queue.
672 		 */
673 		if (((user_service->message_available_pos -
674 			instance->completion_remove) >= 0) ||
675 			user_service->dequeue_pending) {
676 			user_service->dequeue_pending = 0;
677 			skip_completion = true;
678 		}
679 
680 		spin_unlock(&msg_queue_spinlock);
681 		complete(&user_service->insert_event);
682 
683 		header = NULL;
684 	}
685 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
686 
687 	if (skip_completion)
688 		return VCHIQ_SUCCESS;
689 
690 	return add_completion(instance, reason, header, user_service,
691 		bulk_userdata);
692 }
693 
694 /****************************************************************************
695  *
696  *   user_service_free
697  *
698  ***************************************************************************/
699 static void
700 user_service_free(void *userdata)
701 {
702 	kfree(userdata);
703 }
704 
705 /****************************************************************************
706  *
707  *   close_delivered
708  *
709  ***************************************************************************/
710 static void close_delivered(struct user_service *user_service)
711 {
712 	vchiq_log_info(vchiq_arm_log_level,
713 		"%s(handle=%x)",
714 		__func__, user_service->service->handle);
715 
716 	if (user_service->close_pending) {
717 		/* Allow the underlying service to be culled */
718 		unlock_service(user_service->service);
719 
720 		/* Wake the user-thread blocked in close_ or remove_service */
721 		complete(&user_service->close_event);
722 
723 		user_service->close_pending = 0;
724 	}
725 }
726 
727 struct vchiq_io_copy_callback_context {
728 	struct vchiq_element *element;
729 	size_t element_offset;
730 	unsigned long elements_to_go;
731 };
732 
733 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
734 					   size_t offset, size_t maxsize)
735 {
736 	struct vchiq_io_copy_callback_context *cc = context;
737 	size_t total_bytes_copied = 0;
738 	size_t bytes_this_round;
739 
740 	while (total_bytes_copied < maxsize) {
741 		if (!cc->elements_to_go)
742 			return total_bytes_copied;
743 
744 		if (!cc->element->size) {
745 			cc->elements_to_go--;
746 			cc->element++;
747 			cc->element_offset = 0;
748 			continue;
749 		}
750 
751 		bytes_this_round = min(cc->element->size - cc->element_offset,
752 				       maxsize - total_bytes_copied);
753 
754 		if (copy_from_user(dest + total_bytes_copied,
755 				  cc->element->data + cc->element_offset,
756 				  bytes_this_round))
757 			return -EFAULT;
758 
759 		cc->element_offset += bytes_this_round;
760 		total_bytes_copied += bytes_this_round;
761 
762 		if (cc->element_offset == cc->element->size) {
763 			cc->elements_to_go--;
764 			cc->element++;
765 			cc->element_offset = 0;
766 		}
767 	}
768 
769 	return maxsize;
770 }
771 
772 /**************************************************************************
773  *
774  *   vchiq_ioc_queue_message
775  *
776  **************************************************************************/
777 static int
778 vchiq_ioc_queue_message(unsigned int handle,
779 			struct vchiq_element *elements,
780 			unsigned long count)
781 {
782 	struct vchiq_io_copy_callback_context context;
783 	enum vchiq_status status = VCHIQ_SUCCESS;
784 	unsigned long i;
785 	size_t total_size = 0;
786 
787 	context.element = elements;
788 	context.element_offset = 0;
789 	context.elements_to_go = count;
790 
791 	for (i = 0; i < count; i++) {
792 		if (!elements[i].data && elements[i].size != 0)
793 			return -EFAULT;
794 
795 		total_size += elements[i].size;
796 	}
797 
798 	status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
799 				     &context, total_size);
800 
801 	if (status == VCHIQ_ERROR)
802 		return -EIO;
803 	else if (status == VCHIQ_RETRY)
804 		return -EINTR;
805 	return 0;
806 }
807 
808 static int vchiq_ioc_create_service(struct vchiq_instance *instance,
809 				    struct vchiq_create_service *args)
810 {
811 	struct user_service *user_service = NULL;
812 	struct vchiq_service *service;
813 	enum vchiq_status status = VCHIQ_SUCCESS;
814 	struct vchiq_service_params_kernel params;
815 	int srvstate;
816 
817 	user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
818 	if (!user_service)
819 		return -ENOMEM;
820 
821 	if (args->is_open) {
822 		if (!instance->connected) {
823 			kfree(user_service);
824 			return -ENOTCONN;
825 		}
826 		srvstate = VCHIQ_SRVSTATE_OPENING;
827 	} else {
828 		srvstate = instance->connected ?
829 			 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
830 	}
831 
832 	params = (struct vchiq_service_params_kernel) {
833 		.fourcc   = args->params.fourcc,
834 		.callback = service_callback,
835 		.userdata = user_service,
836 		.version  = args->params.version,
837 		.version_min = args->params.version_min,
838 	};
839 	service = vchiq_add_service_internal(instance->state, &params,
840 					     srvstate, instance,
841 					     user_service_free);
842 	if (!service) {
843 		kfree(user_service);
844 		return -EEXIST;
845 	}
846 
847 	user_service->service = service;
848 	user_service->userdata = args->params.userdata;
849 	user_service->instance = instance;
850 	user_service->is_vchi = (args->is_vchi != 0);
851 	user_service->dequeue_pending = 0;
852 	user_service->close_pending = 0;
853 	user_service->message_available_pos = instance->completion_remove - 1;
854 	user_service->msg_insert = 0;
855 	user_service->msg_remove = 0;
856 	init_completion(&user_service->insert_event);
857 	init_completion(&user_service->remove_event);
858 	init_completion(&user_service->close_event);
859 
860 	if (args->is_open) {
861 		status = vchiq_open_service_internal(service, instance->pid);
862 		if (status != VCHIQ_SUCCESS) {
863 			vchiq_remove_service(service->handle);
864 			return (status == VCHIQ_RETRY) ?
865 				-EINTR : -EIO;
866 		}
867 	}
868 	args->handle = service->handle;
869 
870 	return 0;
871 }
872 
873 static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
874 				     struct vchiq_dequeue_message *args)
875 {
876 	struct user_service *user_service;
877 	struct vchiq_service *service;
878 	struct vchiq_header *header;
879 	int ret;
880 
881 	DEBUG_INITIALISE(g_state.local)
882 	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
883 	service = find_service_for_instance(instance, args->handle);
884 	if (!service)
885 		return -EINVAL;
886 
887 	user_service = (struct user_service *)service->base.userdata;
888 	if (user_service->is_vchi == 0) {
889 		ret = -EINVAL;
890 		goto out;
891 	}
892 
893 	spin_lock(&msg_queue_spinlock);
894 	if (user_service->msg_remove == user_service->msg_insert) {
895 		if (!args->blocking) {
896 			spin_unlock(&msg_queue_spinlock);
897 			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
898 			ret = -EWOULDBLOCK;
899 			goto out;
900 		}
901 		user_service->dequeue_pending = 1;
902 		ret = 0;
903 		do {
904 			spin_unlock(&msg_queue_spinlock);
905 			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
906 			if (wait_for_completion_interruptible(
907 				&user_service->insert_event)) {
908 				vchiq_log_info(vchiq_arm_log_level,
909 					"DEQUEUE_MESSAGE interrupted");
910 				ret = -EINTR;
911 				break;
912 			}
913 			spin_lock(&msg_queue_spinlock);
914 		} while (user_service->msg_remove ==
915 			user_service->msg_insert);
916 
917 		if (ret)
918 			goto out;
919 	}
920 
921 	BUG_ON((int)(user_service->msg_insert -
922 		user_service->msg_remove) < 0);
923 
924 	header = user_service->msg_queue[user_service->msg_remove &
925 		(MSG_QUEUE_SIZE - 1)];
926 	user_service->msg_remove++;
927 	spin_unlock(&msg_queue_spinlock);
928 
929 	complete(&user_service->remove_event);
930 	if (!header) {
931 		ret = -ENOTCONN;
932 	} else if (header->size <= args->bufsize) {
933 		/* Copy to user space if msgbuf is not NULL */
934 		if (!args->buf || (copy_to_user(args->buf,
935 					header->data, header->size) == 0)) {
936 			ret = header->size;
937 			vchiq_release_message(service->handle, header);
938 		} else
939 			ret = -EFAULT;
940 	} else {
941 		vchiq_log_error(vchiq_arm_log_level,
942 			"header %pK: bufsize %x < size %x",
943 			header, args->bufsize, header->size);
944 		WARN(1, "invalid size\n");
945 		ret = -EMSGSIZE;
946 	}
947 	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
948 out:
949 	unlock_service(service);
950 	return ret;
951 }
952 
953 static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
954 				      struct vchiq_queue_bulk_transfer *args,
955 				      enum vchiq_bulk_dir dir,
956 				      enum vchiq_bulk_mode __user *mode)
957 {
958 	struct vchiq_service *service;
959 	struct bulk_waiter_node *waiter = NULL;
960 	bool found = false;
961 	void *userdata;
962 	int status = 0;
963 	int ret;
964 
965 	service = find_service_for_instance(instance, args->handle);
966 	if (!service)
967 		return -EINVAL;
968 
969 	if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
970 		waiter = kzalloc(sizeof(struct bulk_waiter_node),
971 			GFP_KERNEL);
972 		if (!waiter) {
973 			ret = -ENOMEM;
974 			goto out;
975 		}
976 
977 		userdata = &waiter->bulk_waiter;
978 	} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
979 		mutex_lock(&instance->bulk_waiter_list_mutex);
980 		list_for_each_entry(waiter, &instance->bulk_waiter_list,
981 				    list) {
982 			if (waiter->pid == current->pid) {
983 				list_del(&waiter->list);
984 				found = true;
985 				break;
986 			}
987 		}
988 		mutex_unlock(&instance->bulk_waiter_list_mutex);
989 		if (!found) {
990 			vchiq_log_error(vchiq_arm_log_level,
991 				"no bulk_waiter found for pid %d",
992 				current->pid);
993 			ret = -ESRCH;
994 			goto out;
995 		}
996 		vchiq_log_info(vchiq_arm_log_level,
997 			"found bulk_waiter %pK for pid %d", waiter,
998 			current->pid);
999 		userdata = &waiter->bulk_waiter;
1000 	} else {
1001 		userdata = args->userdata;
1002 	}
1003 
1004 	status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size,
1005 				     userdata, args->mode, dir);
1006 
1007 	if (!waiter) {
1008 		ret = 0;
1009 		goto out;
1010 	}
1011 
1012 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1013 		!waiter->bulk_waiter.bulk) {
1014 		if (waiter->bulk_waiter.bulk) {
1015 			/* Cancel the signal when the transfer completes. */
1016 			spin_lock(&bulk_waiter_spinlock);
1017 			waiter->bulk_waiter.bulk->userdata = NULL;
1018 			spin_unlock(&bulk_waiter_spinlock);
1019 		}
1020 		kfree(waiter);
1021 		ret = 0;
1022 	} else {
1023 		const enum vchiq_bulk_mode mode_waiting =
1024 			VCHIQ_BULK_MODE_WAITING;
1025 		waiter->pid = current->pid;
1026 		mutex_lock(&instance->bulk_waiter_list_mutex);
1027 		list_add(&waiter->list, &instance->bulk_waiter_list);
1028 		mutex_unlock(&instance->bulk_waiter_list_mutex);
1029 		vchiq_log_info(vchiq_arm_log_level,
1030 			"saved bulk_waiter %pK for pid %d",
1031 			waiter, current->pid);
1032 
1033 		ret = put_user(mode_waiting, mode);
1034 	}
1035 out:
1036 	unlock_service(service);
1037 	if (ret)
1038 		return ret;
1039 	else if (status == VCHIQ_ERROR)
1040 		return -EIO;
1041 	else if (status == VCHIQ_RETRY)
1042 		return -EINTR;
1043 	return 0;
1044 }
1045 
1046 /* read a user pointer value from an array pointers in user space */
1047 static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
1048 {
1049 	int ret;
1050 
1051 	if (in_compat_syscall()) {
1052 		compat_uptr_t ptr32;
1053 		compat_uptr_t __user *uptr = ubuf;
1054 		ret = get_user(ptr32, uptr + index);
1055 		if (ret)
1056 			return ret;
1057 
1058 		*buf = compat_ptr(ptr32);
1059 	} else {
1060 		uintptr_t ptr, __user *uptr = ubuf;
1061 		ret = get_user(ptr, uptr + index);
1062 
1063 		if (ret)
1064 			return ret;
1065 
1066 		*buf = (void __user *)ptr;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 struct vchiq_completion_data32 {
1073 	enum vchiq_reason reason;
1074 	compat_uptr_t header;
1075 	compat_uptr_t service_userdata;
1076 	compat_uptr_t bulk_userdata;
1077 };
1078 
1079 static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
1080 				struct vchiq_completion_data *completion,
1081 				int index)
1082 {
1083 	struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
1084 
1085 	if (in_compat_syscall()) {
1086 		struct vchiq_completion_data32 tmp = {
1087 			.reason		  = completion->reason,
1088 			.header		  = ptr_to_compat(completion->header),
1089 			.service_userdata = ptr_to_compat(completion->service_userdata),
1090 			.bulk_userdata	  = ptr_to_compat(completion->bulk_userdata),
1091 		};
1092 		if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
1093 			return -EFAULT;
1094 	} else {
1095 		if (copy_to_user(&buf[index], completion, sizeof(*completion)))
1096 			return -EFAULT;
1097 	}
1098 
1099 	return 0;
1100 }
1101 
1102 static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
1103 				      struct vchiq_await_completion *args,
1104 				      int __user *msgbufcountp)
1105 {
1106 	int msgbufcount;
1107 	int remove;
1108 	int ret;
1109 
1110 	DEBUG_INITIALISE(g_state.local)
1111 
1112 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1113 	if (!instance->connected) {
1114 		return -ENOTCONN;
1115 	}
1116 
1117 	mutex_lock(&instance->completion_mutex);
1118 
1119 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1120 	while ((instance->completion_remove ==
1121 		instance->completion_insert)
1122 		&& !instance->closing) {
1123 		int rc;
1124 
1125 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1126 		mutex_unlock(&instance->completion_mutex);
1127 		rc = wait_for_completion_interruptible(
1128 					&instance->insert_event);
1129 		mutex_lock(&instance->completion_mutex);
1130 		if (rc) {
1131 			DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1132 			vchiq_log_info(vchiq_arm_log_level,
1133 				"AWAIT_COMPLETION interrupted");
1134 			ret = -EINTR;
1135 			goto out;
1136 		}
1137 	}
1138 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1139 
1140 	msgbufcount = args->msgbufcount;
1141 	remove = instance->completion_remove;
1142 
1143 	for (ret = 0; ret < args->count; ret++) {
1144 		struct vchiq_completion_data_kernel *completion;
1145 		struct vchiq_completion_data user_completion;
1146 		struct vchiq_service *service;
1147 		struct user_service *user_service;
1148 		struct vchiq_header *header;
1149 
1150 		if (remove == instance->completion_insert)
1151 			break;
1152 
1153 		completion = &instance->completions[
1154 			remove & (MAX_COMPLETIONS - 1)];
1155 
1156 		/*
1157 		 * A read memory barrier is needed to stop
1158 		 * prefetch of a stale completion record
1159 		 */
1160 		rmb();
1161 
1162 		service = completion->service_userdata;
1163 		user_service = service->base.userdata;
1164 
1165 		memset(&user_completion, 0, sizeof(user_completion));
1166 		user_completion = (struct vchiq_completion_data) {
1167 			.reason = completion->reason,
1168 			.service_userdata = user_service->userdata,
1169 		};
1170 
1171 		header = completion->header;
1172 		if (header) {
1173 			void __user *msgbuf;
1174 			int msglen;
1175 
1176 			msglen = header->size + sizeof(struct vchiq_header);
1177 			/* This must be a VCHIQ-style service */
1178 			if (args->msgbufsize < msglen) {
1179 				vchiq_log_error(vchiq_arm_log_level,
1180 					"header %pK: msgbufsize %x < msglen %x",
1181 					header, args->msgbufsize, msglen);
1182 				WARN(1, "invalid message size\n");
1183 				if (ret == 0)
1184 					ret = -EMSGSIZE;
1185 				break;
1186 			}
1187 			if (msgbufcount <= 0)
1188 				/* Stall here for lack of a buffer for the message. */
1189 				break;
1190 			/* Get the pointer from user space */
1191 			msgbufcount--;
1192 			if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
1193 						msgbufcount)) {
1194 				if (ret == 0)
1195 					ret = -EFAULT;
1196 				break;
1197 			}
1198 
1199 			/* Copy the message to user space */
1200 			if (copy_to_user(msgbuf, header, msglen)) {
1201 				if (ret == 0)
1202 					ret = -EFAULT;
1203 				break;
1204 			}
1205 
1206 			/* Now it has been copied, the message can be released. */
1207 			vchiq_release_message(service->handle, header);
1208 
1209 			/* The completion must point to the msgbuf. */
1210 			user_completion.header = msgbuf;
1211 		}
1212 
1213 		if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
1214 		    !instance->use_close_delivered)
1215 			unlock_service(service);
1216 
1217 		/*
1218 		 * FIXME: address space mismatch, does bulk_userdata
1219 		 * actually point to user or kernel memory?
1220 		 */
1221 		user_completion.bulk_userdata = completion->bulk_userdata;
1222 
1223 		if (vchiq_put_completion(args->buf, &user_completion, ret)) {
1224 			if (ret == 0)
1225 				ret = -EFAULT;
1226 			break;
1227 		}
1228 
1229 		/*
1230 		 * Ensure that the above copy has completed
1231 		 * before advancing the remove pointer.
1232 		 */
1233 		mb();
1234 		remove++;
1235 		instance->completion_remove = remove;
1236 	}
1237 
1238 	if (msgbufcount != args->msgbufcount) {
1239 		if (put_user(msgbufcount, msgbufcountp))
1240 			ret = -EFAULT;
1241 	}
1242 out:
1243 	if (ret)
1244 		complete(&instance->remove_event);
1245 	mutex_unlock(&instance->completion_mutex);
1246 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1247 
1248 	return ret;
1249 }
1250 
1251 /****************************************************************************
1252  *
1253  *   vchiq_ioctl
1254  *
1255  ***************************************************************************/
1256 static long
1257 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1258 {
1259 	struct vchiq_instance *instance = file->private_data;
1260 	enum vchiq_status status = VCHIQ_SUCCESS;
1261 	struct vchiq_service *service = NULL;
1262 	long ret = 0;
1263 	int i, rc;
1264 
1265 	vchiq_log_trace(vchiq_arm_log_level,
1266 		"%s - instance %pK, cmd %s, arg %lx",
1267 		__func__, instance,
1268 		((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
1269 		(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
1270 		ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
1271 
1272 	switch (cmd) {
1273 	case VCHIQ_IOC_SHUTDOWN:
1274 		if (!instance->connected)
1275 			break;
1276 
1277 		/* Remove all services */
1278 		i = 0;
1279 		while ((service = next_service_by_instance(instance->state,
1280 			instance, &i))) {
1281 			status = vchiq_remove_service(service->handle);
1282 			unlock_service(service);
1283 			if (status != VCHIQ_SUCCESS)
1284 				break;
1285 		}
1286 		service = NULL;
1287 
1288 		if (status == VCHIQ_SUCCESS) {
1289 			/* Wake the completion thread and ask it to exit */
1290 			instance->closing = 1;
1291 			complete(&instance->insert_event);
1292 		}
1293 
1294 		break;
1295 
1296 	case VCHIQ_IOC_CONNECT:
1297 		if (instance->connected) {
1298 			ret = -EINVAL;
1299 			break;
1300 		}
1301 		rc = mutex_lock_killable(&instance->state->mutex);
1302 		if (rc) {
1303 			vchiq_log_error(vchiq_arm_log_level,
1304 				"vchiq: connect: could not lock mutex for state %d: %d",
1305 				instance->state->id, rc);
1306 			ret = -EINTR;
1307 			break;
1308 		}
1309 		status = vchiq_connect_internal(instance->state, instance);
1310 		mutex_unlock(&instance->state->mutex);
1311 
1312 		if (status == VCHIQ_SUCCESS)
1313 			instance->connected = 1;
1314 		else
1315 			vchiq_log_error(vchiq_arm_log_level,
1316 				"vchiq: could not connect: %d", status);
1317 		break;
1318 
1319 	case VCHIQ_IOC_CREATE_SERVICE: {
1320 		struct vchiq_create_service __user *argp;
1321 		struct vchiq_create_service args;
1322 
1323 		argp = (void __user *)arg;
1324 		if (copy_from_user(&args, argp, sizeof(args))) {
1325 			ret = -EFAULT;
1326 			break;
1327 		}
1328 
1329 		ret = vchiq_ioc_create_service(instance, &args);
1330 		if (ret < 0)
1331 			break;
1332 
1333 		if (put_user(args.handle, &argp->handle)) {
1334 			vchiq_remove_service(args.handle);
1335 			ret = -EFAULT;
1336 		}
1337 	} break;
1338 
1339 	case VCHIQ_IOC_CLOSE_SERVICE:
1340 	case VCHIQ_IOC_REMOVE_SERVICE: {
1341 		unsigned int handle = (unsigned int)arg;
1342 		struct user_service *user_service;
1343 
1344 		service = find_service_for_instance(instance, handle);
1345 		if (!service) {
1346 			ret = -EINVAL;
1347 			break;
1348 		}
1349 
1350 		user_service = service->base.userdata;
1351 
1352 		/*
1353 		 * close_pending is false on first entry, and when the
1354 		 * wait in vchiq_close_service has been interrupted.
1355 		 */
1356 		if (!user_service->close_pending) {
1357 			status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
1358 				 vchiq_close_service(service->handle) :
1359 				 vchiq_remove_service(service->handle);
1360 			if (status != VCHIQ_SUCCESS)
1361 				break;
1362 		}
1363 
1364 		/*
1365 		 * close_pending is true once the underlying service
1366 		 * has been closed until the client library calls the
1367 		 * CLOSE_DELIVERED ioctl, signalling close_event.
1368 		 */
1369 		if (user_service->close_pending &&
1370 			wait_for_completion_interruptible(
1371 				&user_service->close_event))
1372 			status = VCHIQ_RETRY;
1373 		break;
1374 	}
1375 
1376 	case VCHIQ_IOC_USE_SERVICE:
1377 	case VCHIQ_IOC_RELEASE_SERVICE:	{
1378 		unsigned int handle = (unsigned int)arg;
1379 
1380 		service = find_service_for_instance(instance, handle);
1381 		if (service) {
1382 			status = (cmd == VCHIQ_IOC_USE_SERVICE)	?
1383 				vchiq_use_service_internal(service) :
1384 				vchiq_release_service_internal(service);
1385 			if (status != VCHIQ_SUCCESS) {
1386 				vchiq_log_error(vchiq_susp_log_level,
1387 					"%s: cmd %s returned error %d for service %c%c%c%c:%03d",
1388 					__func__,
1389 					(cmd == VCHIQ_IOC_USE_SERVICE) ?
1390 						"VCHIQ_IOC_USE_SERVICE" :
1391 						"VCHIQ_IOC_RELEASE_SERVICE",
1392 					status,
1393 					VCHIQ_FOURCC_AS_4CHARS(
1394 						service->base.fourcc),
1395 					service->client_id);
1396 				ret = -EINVAL;
1397 			}
1398 		} else
1399 			ret = -EINVAL;
1400 	} break;
1401 
1402 	case VCHIQ_IOC_QUEUE_MESSAGE: {
1403 		struct vchiq_queue_message args;
1404 
1405 		if (copy_from_user(&args, (const void __user *)arg,
1406 				   sizeof(args))) {
1407 			ret = -EFAULT;
1408 			break;
1409 		}
1410 
1411 		service = find_service_for_instance(instance, args.handle);
1412 
1413 		if (service && (args.count <= MAX_ELEMENTS)) {
1414 			/* Copy elements into kernel space */
1415 			struct vchiq_element elements[MAX_ELEMENTS];
1416 
1417 			if (copy_from_user(elements, args.elements,
1418 				args.count * sizeof(struct vchiq_element)) == 0)
1419 				ret = vchiq_ioc_queue_message(args.handle, elements,
1420 							      args.count);
1421 			else
1422 				ret = -EFAULT;
1423 		} else {
1424 			ret = -EINVAL;
1425 		}
1426 	} break;
1427 
1428 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
1429 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
1430 		struct vchiq_queue_bulk_transfer args;
1431 		struct vchiq_queue_bulk_transfer __user *argp;
1432 
1433 		enum vchiq_bulk_dir dir =
1434 			(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1435 			VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1436 
1437 		argp = (void __user *)arg;
1438 		if (copy_from_user(&args, argp, sizeof(args))) {
1439 			ret = -EFAULT;
1440 			break;
1441 		}
1442 
1443 		ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
1444 						 dir, &argp->mode);
1445 	} break;
1446 
1447 	case VCHIQ_IOC_AWAIT_COMPLETION: {
1448 		struct vchiq_await_completion args;
1449 		struct vchiq_await_completion __user *argp;
1450 
1451 		argp = (void __user *)arg;
1452 		if (copy_from_user(&args, argp, sizeof(args))) {
1453 			ret = -EFAULT;
1454 			break;
1455 		}
1456 
1457 		ret = vchiq_ioc_await_completion(instance, &args,
1458 						 &argp->msgbufcount);
1459 	} break;
1460 
1461 	case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1462 		struct vchiq_dequeue_message args;
1463 
1464 		if (copy_from_user(&args, (const void __user *)arg,
1465 				   sizeof(args))) {
1466 			ret = -EFAULT;
1467 			break;
1468 		}
1469 
1470 		ret = vchiq_ioc_dequeue_message(instance, &args);
1471 	} break;
1472 
1473 	case VCHIQ_IOC_GET_CLIENT_ID: {
1474 		unsigned int handle = (unsigned int)arg;
1475 
1476 		ret = vchiq_get_client_id(handle);
1477 	} break;
1478 
1479 	case VCHIQ_IOC_GET_CONFIG: {
1480 		struct vchiq_get_config args;
1481 		struct vchiq_config config;
1482 
1483 		if (copy_from_user(&args, (const void __user *)arg,
1484 				   sizeof(args))) {
1485 			ret = -EFAULT;
1486 			break;
1487 		}
1488 		if (args.config_size > sizeof(config)) {
1489 			ret = -EINVAL;
1490 			break;
1491 		}
1492 
1493 		vchiq_get_config(&config);
1494 		if (copy_to_user(args.pconfig, &config, args.config_size)) {
1495 			ret = -EFAULT;
1496 			break;
1497 		}
1498 	} break;
1499 
1500 	case VCHIQ_IOC_SET_SERVICE_OPTION: {
1501 		struct vchiq_set_service_option args;
1502 
1503 		if (copy_from_user(&args, (const void __user *)arg,
1504 				   sizeof(args))) {
1505 			ret = -EFAULT;
1506 			break;
1507 		}
1508 
1509 		service = find_service_for_instance(instance, args.handle);
1510 		if (!service) {
1511 			ret = -EINVAL;
1512 			break;
1513 		}
1514 
1515 		status = vchiq_set_service_option(
1516 				args.handle, args.option, args.value);
1517 	} break;
1518 
1519 	case VCHIQ_IOC_LIB_VERSION: {
1520 		unsigned int lib_version = (unsigned int)arg;
1521 
1522 		if (lib_version < VCHIQ_VERSION_MIN)
1523 			ret = -EINVAL;
1524 		else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1525 			instance->use_close_delivered = 1;
1526 	} break;
1527 
1528 	case VCHIQ_IOC_CLOSE_DELIVERED: {
1529 		unsigned int handle = (unsigned int)arg;
1530 
1531 		service = find_closed_service_for_instance(instance, handle);
1532 		if (service) {
1533 			struct user_service *user_service =
1534 				(struct user_service *)service->base.userdata;
1535 			close_delivered(user_service);
1536 		} else
1537 			ret = -EINVAL;
1538 	} break;
1539 
1540 	default:
1541 		ret = -ENOTTY;
1542 		break;
1543 	}
1544 
1545 	if (service)
1546 		unlock_service(service);
1547 
1548 	if (ret == 0) {
1549 		if (status == VCHIQ_ERROR)
1550 			ret = -EIO;
1551 		else if (status == VCHIQ_RETRY)
1552 			ret = -EINTR;
1553 	}
1554 
1555 	if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1556 		(ret != -EWOULDBLOCK))
1557 		vchiq_log_info(vchiq_arm_log_level,
1558 			"  ioctl instance %pK, cmd %s -> status %d, %ld",
1559 			instance,
1560 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1561 				ioctl_names[_IOC_NR(cmd)] :
1562 				"<invalid>",
1563 			status, ret);
1564 	else
1565 		vchiq_log_trace(vchiq_arm_log_level,
1566 			"  ioctl instance %pK, cmd %s -> status %d, %ld",
1567 			instance,
1568 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1569 				ioctl_names[_IOC_NR(cmd)] :
1570 				"<invalid>",
1571 			status, ret);
1572 
1573 	return ret;
1574 }
1575 
1576 #if defined(CONFIG_COMPAT)
1577 
1578 struct vchiq_service_params32 {
1579 	int fourcc;
1580 	compat_uptr_t callback;
1581 	compat_uptr_t userdata;
1582 	short version; /* Increment for non-trivial changes */
1583 	short version_min; /* Update for incompatible changes */
1584 };
1585 
1586 struct vchiq_create_service32 {
1587 	struct vchiq_service_params32 params;
1588 	int is_open;
1589 	int is_vchi;
1590 	unsigned int handle; /* OUT */
1591 };
1592 
1593 #define VCHIQ_IOC_CREATE_SERVICE32 \
1594 	_IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1595 
1596 static long
1597 vchiq_compat_ioctl_create_service(
1598 	struct file *file,
1599 	unsigned int cmd,
1600 	struct vchiq_create_service32 __user *ptrargs32)
1601 {
1602 	struct vchiq_create_service args;
1603 	struct vchiq_create_service32 args32;
1604 	long ret;
1605 
1606 	if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1607 		return -EFAULT;
1608 
1609 	args = (struct vchiq_create_service) {
1610 		.params = {
1611 			.fourcc	     = args32.params.fourcc,
1612 			.callback    = compat_ptr(args32.params.callback),
1613 			.userdata    = compat_ptr(args32.params.userdata),
1614 			.version     = args32.params.version,
1615 			.version_min = args32.params.version_min,
1616 		},
1617 		.is_open = args32.is_open,
1618 		.is_vchi = args32.is_vchi,
1619 		.handle  = args32.handle,
1620 	};
1621 
1622 	ret = vchiq_ioc_create_service(file->private_data, &args);
1623 	if (ret < 0)
1624 		return ret;
1625 
1626 	if (put_user(args.handle, &ptrargs32->handle)) {
1627 		vchiq_remove_service(args.handle);
1628 		return -EFAULT;
1629 	}
1630 
1631 	return 0;
1632 }
1633 
1634 struct vchiq_element32 {
1635 	compat_uptr_t data;
1636 	unsigned int size;
1637 };
1638 
1639 struct vchiq_queue_message32 {
1640 	unsigned int handle;
1641 	unsigned int count;
1642 	compat_uptr_t elements;
1643 };
1644 
1645 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1646 	_IOW(VCHIQ_IOC_MAGIC,  4, struct vchiq_queue_message32)
1647 
1648 static long
1649 vchiq_compat_ioctl_queue_message(struct file *file,
1650 				 unsigned int cmd,
1651 				 struct vchiq_queue_message32 __user *arg)
1652 {
1653 	struct vchiq_queue_message args;
1654 	struct vchiq_queue_message32 args32;
1655 	struct vchiq_service *service;
1656 	int ret;
1657 
1658 	if (copy_from_user(&args32, arg, sizeof(args32)))
1659 		return -EFAULT;
1660 
1661 	args = (struct vchiq_queue_message) {
1662 		.handle   = args32.handle,
1663 		.count    = args32.count,
1664 		.elements = compat_ptr(args32.elements),
1665 	};
1666 
1667 	if (args32.count > MAX_ELEMENTS)
1668 		return -EINVAL;
1669 
1670 	service = find_service_for_instance(file->private_data, args.handle);
1671 	if (!service)
1672 		return -EINVAL;
1673 
1674 	if (args32.elements && args32.count) {
1675 		struct vchiq_element32 element32[MAX_ELEMENTS];
1676 		struct vchiq_element elements[MAX_ELEMENTS];
1677 		unsigned int count;
1678 
1679 		if (copy_from_user(&element32, args.elements,
1680 				   sizeof(element32))) {
1681 			unlock_service(service);
1682 			return -EFAULT;
1683 		}
1684 
1685 		for (count = 0; count < args32.count; count++) {
1686 			elements[count].data =
1687 				compat_ptr(element32[count].data);
1688 			elements[count].size = element32[count].size;
1689 		}
1690 		ret = vchiq_ioc_queue_message(args.handle, elements,
1691 					      args.count);
1692 	} else {
1693 		ret = -EINVAL;
1694 	}
1695 	unlock_service(service);
1696 
1697 	return ret;
1698 }
1699 
1700 struct vchiq_queue_bulk_transfer32 {
1701 	unsigned int handle;
1702 	compat_uptr_t data;
1703 	unsigned int size;
1704 	compat_uptr_t userdata;
1705 	enum vchiq_bulk_mode mode;
1706 };
1707 
1708 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1709 	_IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1710 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1711 	_IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1712 
1713 static long
1714 vchiq_compat_ioctl_queue_bulk(struct file *file,
1715 			      unsigned int cmd,
1716 			      struct vchiq_queue_bulk_transfer32 __user *argp)
1717 {
1718 	struct vchiq_queue_bulk_transfer32 args32;
1719 	struct vchiq_queue_bulk_transfer args;
1720 	enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
1721 				  VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1722 
1723 	if (copy_from_user(&args32, argp, sizeof(args32)))
1724 		return -EFAULT;
1725 
1726 	args = (struct vchiq_queue_bulk_transfer) {
1727 		.handle   = args32.handle,
1728 		.data	  = compat_ptr(args32.data),
1729 		.size	  = args32.size,
1730 		.userdata = compat_ptr(args32.userdata),
1731 		.mode	  = args32.mode,
1732 	};
1733 
1734 	return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
1735 					  dir, &argp->mode);
1736 }
1737 
1738 struct vchiq_await_completion32 {
1739 	unsigned int count;
1740 	compat_uptr_t buf;
1741 	unsigned int msgbufsize;
1742 	unsigned int msgbufcount; /* IN/OUT */
1743 	compat_uptr_t msgbufs;
1744 };
1745 
1746 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1747 	_IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1748 
1749 static long
1750 vchiq_compat_ioctl_await_completion(struct file *file,
1751 				    unsigned int cmd,
1752 				    struct vchiq_await_completion32 __user *argp)
1753 {
1754 	struct vchiq_await_completion args;
1755 	struct vchiq_await_completion32 args32;
1756 
1757 	if (copy_from_user(&args32, argp, sizeof(args32)))
1758 		return -EFAULT;
1759 
1760 	args = (struct vchiq_await_completion) {
1761 		.count		= args32.count,
1762 		.buf		= compat_ptr(args32.buf),
1763 		.msgbufsize	= args32.msgbufsize,
1764 		.msgbufcount	= args32.msgbufcount,
1765 		.msgbufs	= compat_ptr(args32.msgbufs),
1766 	};
1767 
1768 	return vchiq_ioc_await_completion(file->private_data, &args,
1769 					  &argp->msgbufcount);
1770 }
1771 
1772 struct vchiq_dequeue_message32 {
1773 	unsigned int handle;
1774 	int blocking;
1775 	unsigned int bufsize;
1776 	compat_uptr_t buf;
1777 };
1778 
1779 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1780 	_IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1781 
1782 static long
1783 vchiq_compat_ioctl_dequeue_message(struct file *file,
1784 				   unsigned int cmd,
1785 				   struct vchiq_dequeue_message32 __user *arg)
1786 {
1787 	struct vchiq_dequeue_message32 args32;
1788 	struct vchiq_dequeue_message args;
1789 
1790 	if (copy_from_user(&args32, arg, sizeof(args32)))
1791 		return -EFAULT;
1792 
1793 	args = (struct vchiq_dequeue_message) {
1794 		.handle		= args32.handle,
1795 		.blocking	= args32.blocking,
1796 		.bufsize	= args32.bufsize,
1797 		.buf		= compat_ptr(args32.buf),
1798 	};
1799 
1800 	return vchiq_ioc_dequeue_message(file->private_data, &args);
1801 }
1802 
1803 struct vchiq_get_config32 {
1804 	unsigned int config_size;
1805 	compat_uptr_t pconfig;
1806 };
1807 
1808 #define VCHIQ_IOC_GET_CONFIG32 \
1809 	_IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1810 
1811 static long
1812 vchiq_compat_ioctl_get_config(struct file *file,
1813 			      unsigned int cmd,
1814 			      struct vchiq_get_config32 __user *arg)
1815 {
1816 	struct vchiq_get_config32 args32;
1817 	struct vchiq_config config;
1818 	void __user *ptr;
1819 
1820 	if (copy_from_user(&args32, arg, sizeof(args32)))
1821 		return -EFAULT;
1822 	if (args32.config_size > sizeof(config))
1823 		return -EINVAL;
1824 
1825 	vchiq_get_config(&config);
1826 	ptr = compat_ptr(args32.pconfig);
1827 	if (copy_to_user(ptr, &config, args32.config_size))
1828 		return -EFAULT;
1829 
1830 	return 0;
1831 }
1832 
1833 static long
1834 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1835 {
1836 	void __user *argp = compat_ptr(arg);
1837 	switch (cmd) {
1838 	case VCHIQ_IOC_CREATE_SERVICE32:
1839 		return vchiq_compat_ioctl_create_service(file, cmd, argp);
1840 	case VCHIQ_IOC_QUEUE_MESSAGE32:
1841 		return vchiq_compat_ioctl_queue_message(file, cmd, argp);
1842 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1843 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1844 		return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
1845 	case VCHIQ_IOC_AWAIT_COMPLETION32:
1846 		return vchiq_compat_ioctl_await_completion(file, cmd, argp);
1847 	case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1848 		return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
1849 	case VCHIQ_IOC_GET_CONFIG32:
1850 		return vchiq_compat_ioctl_get_config(file, cmd, argp);
1851 	default:
1852 		return vchiq_ioctl(file, cmd, (unsigned long)argp);
1853 	}
1854 }
1855 
1856 #endif
1857 
1858 static int vchiq_open(struct inode *inode, struct file *file)
1859 {
1860 	struct vchiq_state *state = vchiq_get_state();
1861 	struct vchiq_instance *instance;
1862 
1863 	vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1864 
1865 	if (!state) {
1866 		vchiq_log_error(vchiq_arm_log_level,
1867 				"vchiq has no connection to VideoCore");
1868 		return -ENOTCONN;
1869 	}
1870 
1871 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1872 	if (!instance)
1873 		return -ENOMEM;
1874 
1875 	instance->state = state;
1876 	instance->pid = current->tgid;
1877 
1878 	vchiq_debugfs_add_instance(instance);
1879 
1880 	init_completion(&instance->insert_event);
1881 	init_completion(&instance->remove_event);
1882 	mutex_init(&instance->completion_mutex);
1883 	mutex_init(&instance->bulk_waiter_list_mutex);
1884 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
1885 
1886 	file->private_data = instance;
1887 
1888 	return 0;
1889 }
1890 
1891 static int vchiq_release(struct inode *inode, struct file *file)
1892 {
1893 	struct vchiq_instance *instance = file->private_data;
1894 	struct vchiq_state *state = vchiq_get_state();
1895 	struct vchiq_service *service;
1896 	int ret = 0;
1897 	int i;
1898 
1899 	vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1900 		       (unsigned long)instance);
1901 
1902 	if (!state) {
1903 		ret = -EPERM;
1904 		goto out;
1905 	}
1906 
1907 	/* Ensure videocore is awake to allow termination. */
1908 	vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1909 
1910 	mutex_lock(&instance->completion_mutex);
1911 
1912 	/* Wake the completion thread and ask it to exit */
1913 	instance->closing = 1;
1914 	complete(&instance->insert_event);
1915 
1916 	mutex_unlock(&instance->completion_mutex);
1917 
1918 	/* Wake the slot handler if the completion queue is full. */
1919 	complete(&instance->remove_event);
1920 
1921 	/* Mark all services for termination... */
1922 	i = 0;
1923 	while ((service = next_service_by_instance(state, instance, &i))) {
1924 		struct user_service *user_service = service->base.userdata;
1925 
1926 		/* Wake the slot handler if the msg queue is full. */
1927 		complete(&user_service->remove_event);
1928 
1929 		vchiq_terminate_service_internal(service);
1930 		unlock_service(service);
1931 	}
1932 
1933 	/* ...and wait for them to die */
1934 	i = 0;
1935 	while ((service = next_service_by_instance(state, instance, &i))) {
1936 		struct user_service *user_service = service->base.userdata;
1937 
1938 		wait_for_completion(&service->remove_event);
1939 
1940 		BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1941 
1942 		spin_lock(&msg_queue_spinlock);
1943 
1944 		while (user_service->msg_remove != user_service->msg_insert) {
1945 			struct vchiq_header *header;
1946 			int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
1947 
1948 			header = user_service->msg_queue[m];
1949 			user_service->msg_remove++;
1950 			spin_unlock(&msg_queue_spinlock);
1951 
1952 			if (header)
1953 				vchiq_release_message(service->handle, header);
1954 			spin_lock(&msg_queue_spinlock);
1955 		}
1956 
1957 		spin_unlock(&msg_queue_spinlock);
1958 
1959 		unlock_service(service);
1960 	}
1961 
1962 	/* Release any closed services */
1963 	while (instance->completion_remove !=
1964 		instance->completion_insert) {
1965 		struct vchiq_completion_data_kernel *completion;
1966 		struct vchiq_service *service;
1967 
1968 		completion = &instance->completions[
1969 			instance->completion_remove & (MAX_COMPLETIONS - 1)];
1970 		service = completion->service_userdata;
1971 		if (completion->reason == VCHIQ_SERVICE_CLOSED) {
1972 			struct user_service *user_service =
1973 							service->base.userdata;
1974 
1975 			/* Wake any blocked user-thread */
1976 			if (instance->use_close_delivered)
1977 				complete(&user_service->close_event);
1978 			unlock_service(service);
1979 		}
1980 		instance->completion_remove++;
1981 	}
1982 
1983 	/* Release the PEER service count. */
1984 	vchiq_release_internal(instance->state, NULL);
1985 
1986 	{
1987 		struct bulk_waiter_node *waiter, *next;
1988 
1989 		list_for_each_entry_safe(waiter, next,
1990 					 &instance->bulk_waiter_list, list) {
1991 			list_del(&waiter->list);
1992 			vchiq_log_info(vchiq_arm_log_level,
1993 				"bulk_waiter - cleaned up %pK for pid %d",
1994 				waiter, waiter->pid);
1995 			kfree(waiter);
1996 		}
1997 	}
1998 
1999 	vchiq_debugfs_remove_instance(instance);
2000 
2001 	kfree(instance);
2002 	file->private_data = NULL;
2003 
2004 out:
2005 	return ret;
2006 }
2007 
2008 /****************************************************************************
2009  *
2010  *   vchiq_dump
2011  *
2012  ***************************************************************************/
2013 
2014 int vchiq_dump(void *dump_context, const char *str, int len)
2015 {
2016 	struct dump_context *context = (struct dump_context *)dump_context;
2017 	int copy_bytes;
2018 
2019 	if (context->actual >= context->space)
2020 		return 0;
2021 
2022 	if (context->offset > 0) {
2023 		int skip_bytes = min_t(int, len, context->offset);
2024 
2025 		str += skip_bytes;
2026 		len -= skip_bytes;
2027 		context->offset -= skip_bytes;
2028 		if (context->offset > 0)
2029 			return 0;
2030 	}
2031 	copy_bytes = min_t(int, len, context->space - context->actual);
2032 	if (copy_bytes == 0)
2033 		return 0;
2034 	if (copy_to_user(context->buf + context->actual, str,
2035 			 copy_bytes))
2036 		return -EFAULT;
2037 	context->actual += copy_bytes;
2038 	len -= copy_bytes;
2039 
2040 	/*
2041 	 * If the terminating NUL is included in the length, then it
2042 	 * marks the end of a line and should be replaced with a
2043 	 * carriage return.
2044 	 */
2045 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
2046 		char cr = '\n';
2047 
2048 		if (copy_to_user(context->buf + context->actual - 1,
2049 				 &cr, 1))
2050 			return -EFAULT;
2051 	}
2052 	return 0;
2053 }
2054 
2055 /****************************************************************************
2056  *
2057  *   vchiq_dump_platform_instance_state
2058  *
2059  ***************************************************************************/
2060 
2061 int vchiq_dump_platform_instances(void *dump_context)
2062 {
2063 	struct vchiq_state *state = vchiq_get_state();
2064 	char buf[80];
2065 	int len;
2066 	int i;
2067 
2068 	/*
2069 	 * There is no list of instances, so instead scan all services,
2070 	 * marking those that have been dumped.
2071 	 */
2072 
2073 	rcu_read_lock();
2074 	for (i = 0; i < state->unused_service; i++) {
2075 		struct vchiq_service *service;
2076 		struct vchiq_instance *instance;
2077 
2078 		service = rcu_dereference(state->services[i]);
2079 		if (!service || service->base.callback != service_callback)
2080 			continue;
2081 
2082 		instance = service->instance;
2083 		if (instance)
2084 			instance->mark = 0;
2085 	}
2086 	rcu_read_unlock();
2087 
2088 	for (i = 0; i < state->unused_service; i++) {
2089 		struct vchiq_service *service;
2090 		struct vchiq_instance *instance;
2091 		int err;
2092 
2093 		rcu_read_lock();
2094 		service = rcu_dereference(state->services[i]);
2095 		if (!service || service->base.callback != service_callback) {
2096 			rcu_read_unlock();
2097 			continue;
2098 		}
2099 
2100 		instance = service->instance;
2101 		if (!instance || instance->mark) {
2102 			rcu_read_unlock();
2103 			continue;
2104 		}
2105 		rcu_read_unlock();
2106 
2107 		len = snprintf(buf, sizeof(buf),
2108 			       "Instance %pK: pid %d,%s completions %d/%d",
2109 			       instance, instance->pid,
2110 			       instance->connected ? " connected, " :
2111 			       "",
2112 			       instance->completion_insert -
2113 			       instance->completion_remove,
2114 			       MAX_COMPLETIONS);
2115 		err = vchiq_dump(dump_context, buf, len + 1);
2116 		if (err)
2117 			return err;
2118 		instance->mark = 1;
2119 	}
2120 	return 0;
2121 }
2122 
2123 /****************************************************************************
2124  *
2125  *   vchiq_dump_platform_service_state
2126  *
2127  ***************************************************************************/
2128 
2129 int vchiq_dump_platform_service_state(void *dump_context,
2130 				      struct vchiq_service *service)
2131 {
2132 	struct user_service *user_service =
2133 			(struct user_service *)service->base.userdata;
2134 	char buf[80];
2135 	int len;
2136 
2137 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
2138 
2139 	if ((service->base.callback == service_callback) &&
2140 		user_service->is_vchi) {
2141 		len += scnprintf(buf + len, sizeof(buf) - len,
2142 			", %d/%d messages",
2143 			user_service->msg_insert - user_service->msg_remove,
2144 			MSG_QUEUE_SIZE);
2145 
2146 		if (user_service->dequeue_pending)
2147 			len += scnprintf(buf + len, sizeof(buf) - len,
2148 				" (dequeue pending)");
2149 	}
2150 
2151 	return vchiq_dump(dump_context, buf, len + 1);
2152 }
2153 
2154 /****************************************************************************
2155  *
2156  *   vchiq_read
2157  *
2158  ***************************************************************************/
2159 
2160 static ssize_t
2161 vchiq_read(struct file *file, char __user *buf,
2162 	size_t count, loff_t *ppos)
2163 {
2164 	struct dump_context context;
2165 	int err;
2166 
2167 	context.buf = buf;
2168 	context.actual = 0;
2169 	context.space = count;
2170 	context.offset = *ppos;
2171 
2172 	err = vchiq_dump_state(&context, &g_state);
2173 	if (err)
2174 		return err;
2175 
2176 	*ppos += context.actual;
2177 
2178 	return context.actual;
2179 }
2180 
2181 struct vchiq_state *
2182 vchiq_get_state(void)
2183 {
2184 
2185 	if (!g_state.remote)
2186 		printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2187 	else if (g_state.remote->initialised != 1)
2188 		printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2189 			__func__, g_state.remote->initialised);
2190 
2191 	return (g_state.remote &&
2192 		(g_state.remote->initialised == 1)) ? &g_state : NULL;
2193 }
2194 
2195 static const struct file_operations
2196 vchiq_fops = {
2197 	.owner = THIS_MODULE,
2198 	.unlocked_ioctl = vchiq_ioctl,
2199 #if defined(CONFIG_COMPAT)
2200 	.compat_ioctl = vchiq_compat_ioctl,
2201 #endif
2202 	.open = vchiq_open,
2203 	.release = vchiq_release,
2204 	.read = vchiq_read
2205 };
2206 
2207 /*
2208  * Autosuspend related functionality
2209  */
2210 
2211 static enum vchiq_status
2212 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
2213 	struct vchiq_header *header,
2214 	unsigned int service_user,
2215 	void *bulk_user)
2216 {
2217 	vchiq_log_error(vchiq_susp_log_level,
2218 		"%s callback reason %d", __func__, reason);
2219 	return 0;
2220 }
2221 
2222 static int
2223 vchiq_keepalive_thread_func(void *v)
2224 {
2225 	struct vchiq_state *state = (struct vchiq_state *)v;
2226 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2227 
2228 	enum vchiq_status status;
2229 	struct vchiq_instance *instance;
2230 	unsigned int ka_handle;
2231 
2232 	struct vchiq_service_params_kernel params = {
2233 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2234 		.callback    = vchiq_keepalive_vchiq_callback,
2235 		.version     = KEEPALIVE_VER,
2236 		.version_min = KEEPALIVE_VER_MIN
2237 	};
2238 
2239 	status = vchiq_initialise(&instance);
2240 	if (status != VCHIQ_SUCCESS) {
2241 		vchiq_log_error(vchiq_susp_log_level,
2242 			"%s vchiq_initialise failed %d", __func__, status);
2243 		goto exit;
2244 	}
2245 
2246 	status = vchiq_connect(instance);
2247 	if (status != VCHIQ_SUCCESS) {
2248 		vchiq_log_error(vchiq_susp_log_level,
2249 			"%s vchiq_connect failed %d", __func__, status);
2250 		goto shutdown;
2251 	}
2252 
2253 	status = vchiq_add_service(instance, &params, &ka_handle);
2254 	if (status != VCHIQ_SUCCESS) {
2255 		vchiq_log_error(vchiq_susp_log_level,
2256 			"%s vchiq_open_service failed %d", __func__, status);
2257 		goto shutdown;
2258 	}
2259 
2260 	while (1) {
2261 		long rc = 0, uc = 0;
2262 
2263 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
2264 			vchiq_log_error(vchiq_susp_log_level,
2265 				"%s interrupted", __func__);
2266 			flush_signals(current);
2267 			continue;
2268 		}
2269 
2270 		/*
2271 		 * read and clear counters.  Do release_count then use_count to
2272 		 * prevent getting more releases than uses
2273 		 */
2274 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
2275 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
2276 
2277 		/*
2278 		 * Call use/release service the requisite number of times.
2279 		 * Process use before release so use counts don't go negative
2280 		 */
2281 		while (uc--) {
2282 			atomic_inc(&arm_state->ka_use_ack_count);
2283 			status = vchiq_use_service(ka_handle);
2284 			if (status != VCHIQ_SUCCESS) {
2285 				vchiq_log_error(vchiq_susp_log_level,
2286 					"%s vchiq_use_service error %d",
2287 					__func__, status);
2288 			}
2289 		}
2290 		while (rc--) {
2291 			status = vchiq_release_service(ka_handle);
2292 			if (status != VCHIQ_SUCCESS) {
2293 				vchiq_log_error(vchiq_susp_log_level,
2294 					"%s vchiq_release_service error %d",
2295 					__func__, status);
2296 			}
2297 		}
2298 	}
2299 
2300 shutdown:
2301 	vchiq_shutdown(instance);
2302 exit:
2303 	return 0;
2304 }
2305 
2306 enum vchiq_status
2307 vchiq_arm_init_state(struct vchiq_state *state,
2308 		     struct vchiq_arm_state *arm_state)
2309 {
2310 	if (arm_state) {
2311 		rwlock_init(&arm_state->susp_res_lock);
2312 
2313 		init_completion(&arm_state->ka_evt);
2314 		atomic_set(&arm_state->ka_use_count, 0);
2315 		atomic_set(&arm_state->ka_use_ack_count, 0);
2316 		atomic_set(&arm_state->ka_release_count, 0);
2317 
2318 		arm_state->state = state;
2319 		arm_state->first_connect = 0;
2320 
2321 	}
2322 	return VCHIQ_SUCCESS;
2323 }
2324 
2325 enum vchiq_status
2326 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
2327 		   enum USE_TYPE_E use_type)
2328 {
2329 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2330 	enum vchiq_status ret = VCHIQ_SUCCESS;
2331 	char entity[16];
2332 	int *entity_uc;
2333 	int local_uc;
2334 
2335 	if (!arm_state)
2336 		goto out;
2337 
2338 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2339 
2340 	if (use_type == USE_TYPE_VCHIQ) {
2341 		sprintf(entity, "VCHIQ:   ");
2342 		entity_uc = &arm_state->peer_use_count;
2343 	} else if (service) {
2344 		sprintf(entity, "%c%c%c%c:%03d",
2345 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2346 			service->client_id);
2347 		entity_uc = &service->service_use_count;
2348 	} else {
2349 		vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
2350 		ret = VCHIQ_ERROR;
2351 		goto out;
2352 	}
2353 
2354 	write_lock_bh(&arm_state->susp_res_lock);
2355 	local_uc = ++arm_state->videocore_use_count;
2356 	++(*entity_uc);
2357 
2358 	vchiq_log_trace(vchiq_susp_log_level,
2359 		"%s %s count %d, state count %d",
2360 		__func__, entity, *entity_uc, local_uc);
2361 
2362 	write_unlock_bh(&arm_state->susp_res_lock);
2363 
2364 	if (ret == VCHIQ_SUCCESS) {
2365 		enum vchiq_status status = VCHIQ_SUCCESS;
2366 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2367 
2368 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2369 			/* Send the use notify to videocore */
2370 			status = vchiq_send_remote_use_active(state);
2371 			if (status == VCHIQ_SUCCESS)
2372 				ack_cnt--;
2373 			else
2374 				atomic_add(ack_cnt,
2375 					&arm_state->ka_use_ack_count);
2376 		}
2377 	}
2378 
2379 out:
2380 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2381 	return ret;
2382 }
2383 
2384 enum vchiq_status
2385 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
2386 {
2387 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2388 	enum vchiq_status ret = VCHIQ_SUCCESS;
2389 	char entity[16];
2390 	int *entity_uc;
2391 
2392 	if (!arm_state)
2393 		goto out;
2394 
2395 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2396 
2397 	if (service) {
2398 		sprintf(entity, "%c%c%c%c:%03d",
2399 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2400 			service->client_id);
2401 		entity_uc = &service->service_use_count;
2402 	} else {
2403 		sprintf(entity, "PEER:   ");
2404 		entity_uc = &arm_state->peer_use_count;
2405 	}
2406 
2407 	write_lock_bh(&arm_state->susp_res_lock);
2408 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
2409 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
2410 		WARN_ON(!arm_state->videocore_use_count);
2411 		WARN_ON(!(*entity_uc));
2412 		ret = VCHIQ_ERROR;
2413 		goto unlock;
2414 	}
2415 	--arm_state->videocore_use_count;
2416 	--(*entity_uc);
2417 
2418 	vchiq_log_trace(vchiq_susp_log_level,
2419 		"%s %s count %d, state count %d",
2420 		__func__, entity, *entity_uc,
2421 		arm_state->videocore_use_count);
2422 
2423 unlock:
2424 	write_unlock_bh(&arm_state->susp_res_lock);
2425 
2426 out:
2427 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2428 	return ret;
2429 }
2430 
2431 void
2432 vchiq_on_remote_use(struct vchiq_state *state)
2433 {
2434 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2435 
2436 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2437 	atomic_inc(&arm_state->ka_use_count);
2438 	complete(&arm_state->ka_evt);
2439 }
2440 
2441 void
2442 vchiq_on_remote_release(struct vchiq_state *state)
2443 {
2444 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2445 
2446 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2447 	atomic_inc(&arm_state->ka_release_count);
2448 	complete(&arm_state->ka_evt);
2449 }
2450 
2451 enum vchiq_status
2452 vchiq_use_service_internal(struct vchiq_service *service)
2453 {
2454 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2455 }
2456 
2457 enum vchiq_status
2458 vchiq_release_service_internal(struct vchiq_service *service)
2459 {
2460 	return vchiq_release_internal(service->state, service);
2461 }
2462 
2463 struct vchiq_debugfs_node *
2464 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
2465 {
2466 	return &instance->debugfs_node;
2467 }
2468 
2469 int
2470 vchiq_instance_get_use_count(struct vchiq_instance *instance)
2471 {
2472 	struct vchiq_service *service;
2473 	int use_count = 0, i;
2474 
2475 	i = 0;
2476 	rcu_read_lock();
2477 	while ((service = __next_service_by_instance(instance->state,
2478 						     instance, &i)))
2479 		use_count += service->service_use_count;
2480 	rcu_read_unlock();
2481 	return use_count;
2482 }
2483 
2484 int
2485 vchiq_instance_get_pid(struct vchiq_instance *instance)
2486 {
2487 	return instance->pid;
2488 }
2489 
2490 int
2491 vchiq_instance_get_trace(struct vchiq_instance *instance)
2492 {
2493 	return instance->trace;
2494 }
2495 
2496 void
2497 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
2498 {
2499 	struct vchiq_service *service;
2500 	int i;
2501 
2502 	i = 0;
2503 	rcu_read_lock();
2504 	while ((service = __next_service_by_instance(instance->state,
2505 						     instance, &i)))
2506 		service->trace = trace;
2507 	rcu_read_unlock();
2508 	instance->trace = (trace != 0);
2509 }
2510 
2511 enum vchiq_status
2512 vchiq_use_service(unsigned int handle)
2513 {
2514 	enum vchiq_status ret = VCHIQ_ERROR;
2515 	struct vchiq_service *service = find_service_by_handle(handle);
2516 
2517 	if (service) {
2518 		ret = vchiq_use_internal(service->state, service,
2519 				USE_TYPE_SERVICE);
2520 		unlock_service(service);
2521 	}
2522 	return ret;
2523 }
2524 EXPORT_SYMBOL(vchiq_use_service);
2525 
2526 enum vchiq_status
2527 vchiq_release_service(unsigned int handle)
2528 {
2529 	enum vchiq_status ret = VCHIQ_ERROR;
2530 	struct vchiq_service *service = find_service_by_handle(handle);
2531 
2532 	if (service) {
2533 		ret = vchiq_release_internal(service->state, service);
2534 		unlock_service(service);
2535 	}
2536 	return ret;
2537 }
2538 EXPORT_SYMBOL(vchiq_release_service);
2539 
2540 struct service_data_struct {
2541 	int fourcc;
2542 	int clientid;
2543 	int use_count;
2544 };
2545 
2546 void
2547 vchiq_dump_service_use_state(struct vchiq_state *state)
2548 {
2549 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2550 	struct service_data_struct *service_data;
2551 	int i, found = 0;
2552 	/*
2553 	 * If there's more than 64 services, only dump ones with
2554 	 * non-zero counts
2555 	 */
2556 	int only_nonzero = 0;
2557 	static const char *nz = "<-- preventing suspend";
2558 
2559 	int peer_count;
2560 	int vc_use_count;
2561 	int active_services;
2562 
2563 	if (!arm_state)
2564 		return;
2565 
2566 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
2567 				     GFP_KERNEL);
2568 	if (!service_data)
2569 		return;
2570 
2571 	read_lock_bh(&arm_state->susp_res_lock);
2572 	peer_count = arm_state->peer_use_count;
2573 	vc_use_count = arm_state->videocore_use_count;
2574 	active_services = state->unused_service;
2575 	if (active_services > MAX_SERVICES)
2576 		only_nonzero = 1;
2577 
2578 	rcu_read_lock();
2579 	for (i = 0; i < active_services; i++) {
2580 		struct vchiq_service *service_ptr =
2581 			rcu_dereference(state->services[i]);
2582 
2583 		if (!service_ptr)
2584 			continue;
2585 
2586 		if (only_nonzero && !service_ptr->service_use_count)
2587 			continue;
2588 
2589 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
2590 			continue;
2591 
2592 		service_data[found].fourcc = service_ptr->base.fourcc;
2593 		service_data[found].clientid = service_ptr->client_id;
2594 		service_data[found].use_count = service_ptr->service_use_count;
2595 		found++;
2596 		if (found >= MAX_SERVICES)
2597 			break;
2598 	}
2599 	rcu_read_unlock();
2600 
2601 	read_unlock_bh(&arm_state->susp_res_lock);
2602 
2603 	if (only_nonzero)
2604 		vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2605 			"services (%d).  Only dumping up to first %d services "
2606 			"with non-zero use-count", active_services, found);
2607 
2608 	for (i = 0; i < found; i++) {
2609 		vchiq_log_warning(vchiq_susp_log_level,
2610 			"----- %c%c%c%c:%d service count %d %s",
2611 			VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2612 			service_data[i].clientid,
2613 			service_data[i].use_count,
2614 			service_data[i].use_count ? nz : "");
2615 	}
2616 	vchiq_log_warning(vchiq_susp_log_level,
2617 		"----- VCHIQ use count count %d", peer_count);
2618 	vchiq_log_warning(vchiq_susp_log_level,
2619 		"--- Overall vchiq instance use count %d", vc_use_count);
2620 
2621 	kfree(service_data);
2622 }
2623 
2624 enum vchiq_status
2625 vchiq_check_service(struct vchiq_service *service)
2626 {
2627 	struct vchiq_arm_state *arm_state;
2628 	enum vchiq_status ret = VCHIQ_ERROR;
2629 
2630 	if (!service || !service->state)
2631 		goto out;
2632 
2633 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2634 
2635 	arm_state = vchiq_platform_get_arm_state(service->state);
2636 
2637 	read_lock_bh(&arm_state->susp_res_lock);
2638 	if (service->service_use_count)
2639 		ret = VCHIQ_SUCCESS;
2640 	read_unlock_bh(&arm_state->susp_res_lock);
2641 
2642 	if (ret == VCHIQ_ERROR) {
2643 		vchiq_log_error(vchiq_susp_log_level,
2644 			"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
2645 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2646 			service->client_id, service->service_use_count,
2647 			arm_state->videocore_use_count);
2648 		vchiq_dump_service_use_state(service->state);
2649 	}
2650 out:
2651 	return ret;
2652 }
2653 
2654 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
2655 				       enum vchiq_connstate oldstate,
2656 				       enum vchiq_connstate newstate)
2657 {
2658 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2659 	char threadname[16];
2660 
2661 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2662 		get_conn_state_name(oldstate), get_conn_state_name(newstate));
2663 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
2664 		return;
2665 
2666 	write_lock_bh(&arm_state->susp_res_lock);
2667 	if (arm_state->first_connect) {
2668 		write_unlock_bh(&arm_state->susp_res_lock);
2669 		return;
2670 	}
2671 
2672 	arm_state->first_connect = 1;
2673 	write_unlock_bh(&arm_state->susp_res_lock);
2674 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
2675 		 state->id);
2676 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
2677 					      (void *)state,
2678 					      threadname);
2679 	if (IS_ERR(arm_state->ka_thread)) {
2680 		vchiq_log_error(vchiq_susp_log_level,
2681 				"vchiq: FATAL: couldn't create thread %s",
2682 				threadname);
2683 	} else {
2684 		wake_up_process(arm_state->ka_thread);
2685 	}
2686 }
2687 
2688 static const struct of_device_id vchiq_of_match[] = {
2689 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
2690 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
2691 	{},
2692 };
2693 MODULE_DEVICE_TABLE(of, vchiq_of_match);
2694 
2695 static struct platform_device *
2696 vchiq_register_child(struct platform_device *pdev, const char *name)
2697 {
2698 	struct platform_device_info pdevinfo;
2699 	struct platform_device *child;
2700 
2701 	memset(&pdevinfo, 0, sizeof(pdevinfo));
2702 
2703 	pdevinfo.parent = &pdev->dev;
2704 	pdevinfo.name = name;
2705 	pdevinfo.id = PLATFORM_DEVID_NONE;
2706 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
2707 
2708 	child = platform_device_register_full(&pdevinfo);
2709 	if (IS_ERR(child)) {
2710 		dev_warn(&pdev->dev, "%s not registered\n", name);
2711 		child = NULL;
2712 	}
2713 
2714 	return child;
2715 }
2716 
2717 static int vchiq_probe(struct platform_device *pdev)
2718 {
2719 	struct device_node *fw_node;
2720 	const struct of_device_id *of_id;
2721 	struct vchiq_drvdata *drvdata;
2722 	struct device *vchiq_dev;
2723 	int err;
2724 
2725 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
2726 	drvdata = (struct vchiq_drvdata *)of_id->data;
2727 	if (!drvdata)
2728 		return -EINVAL;
2729 
2730 	fw_node = of_find_compatible_node(NULL, NULL,
2731 					  "raspberrypi,bcm2835-firmware");
2732 	if (!fw_node) {
2733 		dev_err(&pdev->dev, "Missing firmware node\n");
2734 		return -ENOENT;
2735 	}
2736 
2737 	drvdata->fw = rpi_firmware_get(fw_node);
2738 	of_node_put(fw_node);
2739 	if (!drvdata->fw)
2740 		return -EPROBE_DEFER;
2741 
2742 	platform_set_drvdata(pdev, drvdata);
2743 
2744 	err = vchiq_platform_init(pdev, &g_state);
2745 	if (err)
2746 		goto failed_platform_init;
2747 
2748 	cdev_init(&vchiq_cdev, &vchiq_fops);
2749 	vchiq_cdev.owner = THIS_MODULE;
2750 	err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2751 	if (err) {
2752 		vchiq_log_error(vchiq_arm_log_level,
2753 			"Unable to register device");
2754 		goto failed_platform_init;
2755 	}
2756 
2757 	vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
2758 				  "vchiq");
2759 	if (IS_ERR(vchiq_dev)) {
2760 		err = PTR_ERR(vchiq_dev);
2761 		goto failed_device_create;
2762 	}
2763 
2764 	vchiq_debugfs_init();
2765 
2766 	vchiq_log_info(vchiq_arm_log_level,
2767 		"vchiq: initialised - version %d (min %d), device %d.%d",
2768 		VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2769 		MAJOR(vchiq_devid), MINOR(vchiq_devid));
2770 
2771 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
2772 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
2773 
2774 	return 0;
2775 
2776 failed_device_create:
2777 	cdev_del(&vchiq_cdev);
2778 failed_platform_init:
2779 	vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2780 	return err;
2781 }
2782 
2783 static int vchiq_remove(struct platform_device *pdev)
2784 {
2785 	platform_device_unregister(bcm2835_audio);
2786 	platform_device_unregister(bcm2835_camera);
2787 	vchiq_debugfs_deinit();
2788 	device_destroy(vchiq_class, vchiq_devid);
2789 	cdev_del(&vchiq_cdev);
2790 
2791 	return 0;
2792 }
2793 
2794 static struct platform_driver vchiq_driver = {
2795 	.driver = {
2796 		.name = "bcm2835_vchiq",
2797 		.of_match_table = vchiq_of_match,
2798 	},
2799 	.probe = vchiq_probe,
2800 	.remove = vchiq_remove,
2801 };
2802 
2803 static int __init vchiq_driver_init(void)
2804 {
2805 	int ret;
2806 
2807 	vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2808 	if (IS_ERR(vchiq_class)) {
2809 		pr_err("Failed to create vchiq class\n");
2810 		return PTR_ERR(vchiq_class);
2811 	}
2812 
2813 	ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
2814 	if (ret) {
2815 		pr_err("Failed to allocate vchiq's chrdev region\n");
2816 		goto class_destroy;
2817 	}
2818 
2819 	ret = platform_driver_register(&vchiq_driver);
2820 	if (ret) {
2821 		pr_err("Failed to register vchiq driver\n");
2822 		goto region_unregister;
2823 	}
2824 
2825 	return 0;
2826 
2827 region_unregister:
2828 	unregister_chrdev_region(vchiq_devid, 1);
2829 
2830 class_destroy:
2831 	class_destroy(vchiq_class);
2832 
2833 	return ret;
2834 }
2835 module_init(vchiq_driver_init);
2836 
2837 static void __exit vchiq_driver_exit(void)
2838 {
2839 	platform_driver_unregister(&vchiq_driver);
2840 	unregister_chrdev_region(vchiq_devid, 1);
2841 	class_destroy(vchiq_class);
2842 }
2843 module_exit(vchiq_driver_exit);
2844 
2845 MODULE_LICENSE("Dual BSD/GPL");
2846 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2847 MODULE_AUTHOR("Broadcom Corporation");
2848