1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <soc/bcm2835/raspberrypi-firmware.h>
29 
30 #include "vchiq_core.h"
31 #include "vchiq_ioctl.h"
32 #include "vchiq_arm.h"
33 #include "vchiq_debugfs.h"
34 
35 #define DEVICE_NAME "vchiq"
36 
37 /* Override the default prefix, which would be vchiq_arm (from the filename) */
38 #undef MODULE_PARAM_PREFIX
39 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
40 
41 /* Some per-instance constants */
42 #define MAX_COMPLETIONS 128
43 #define MAX_SERVICES 64
44 #define MAX_ELEMENTS 8
45 #define MSG_QUEUE_SIZE 128
46 
47 #define KEEPALIVE_VER 1
48 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
49 
50 /* Run time control of log level, based on KERN_XXX level. */
51 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
52 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
53 
54 struct user_service {
55 	struct vchiq_service *service;
56 	void __user *userdata;
57 	struct vchiq_instance *instance;
58 	char is_vchi;
59 	char dequeue_pending;
60 	char close_pending;
61 	int message_available_pos;
62 	int msg_insert;
63 	int msg_remove;
64 	struct completion insert_event;
65 	struct completion remove_event;
66 	struct completion close_event;
67 	struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
68 };
69 
70 struct bulk_waiter_node {
71 	struct bulk_waiter bulk_waiter;
72 	int pid;
73 	struct list_head list;
74 };
75 
76 struct vchiq_instance {
77 	struct vchiq_state *state;
78 	struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS];
79 	int completion_insert;
80 	int completion_remove;
81 	struct completion insert_event;
82 	struct completion remove_event;
83 	struct mutex completion_mutex;
84 
85 	int connected;
86 	int closing;
87 	int pid;
88 	int mark;
89 	int use_close_delivered;
90 	int trace;
91 
92 	struct list_head bulk_waiter_list;
93 	struct mutex bulk_waiter_list_mutex;
94 
95 	struct vchiq_debugfs_node debugfs_node;
96 };
97 
98 struct dump_context {
99 	char __user *buf;
100 	size_t actual;
101 	size_t space;
102 	loff_t offset;
103 };
104 
105 static struct cdev    vchiq_cdev;
106 static dev_t          vchiq_devid;
107 static struct vchiq_state g_state;
108 static struct class  *vchiq_class;
109 static DEFINE_SPINLOCK(msg_queue_spinlock);
110 static struct platform_device *bcm2835_camera;
111 static struct platform_device *bcm2835_audio;
112 
113 static struct vchiq_drvdata bcm2835_drvdata = {
114 	.cache_line_size = 32,
115 };
116 
117 static struct vchiq_drvdata bcm2836_drvdata = {
118 	.cache_line_size = 64,
119 };
120 
121 static const char *const ioctl_names[] = {
122 	"CONNECT",
123 	"SHUTDOWN",
124 	"CREATE_SERVICE",
125 	"REMOVE_SERVICE",
126 	"QUEUE_MESSAGE",
127 	"QUEUE_BULK_TRANSMIT",
128 	"QUEUE_BULK_RECEIVE",
129 	"AWAIT_COMPLETION",
130 	"DEQUEUE_MESSAGE",
131 	"GET_CLIENT_ID",
132 	"GET_CONFIG",
133 	"CLOSE_SERVICE",
134 	"USE_SERVICE",
135 	"RELEASE_SERVICE",
136 	"SET_SERVICE_OPTION",
137 	"DUMP_PHYS_MEM",
138 	"LIB_VERSION",
139 	"CLOSE_DELIVERED"
140 };
141 
142 static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1));
143 
144 static enum vchiq_status
145 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
146 	unsigned int size, enum vchiq_bulk_dir dir);
147 
148 #define VCHIQ_INIT_RETRIES 10
149 int vchiq_initialise(struct vchiq_instance **instance_out)
150 {
151 	struct vchiq_state *state;
152 	struct vchiq_instance *instance = NULL;
153 	int i, ret;
154 
155 	/*
156 	 * VideoCore may not be ready due to boot up timing.
157 	 * It may never be ready if kernel and firmware are mismatched,so don't
158 	 * block forever.
159 	 */
160 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
161 		state = vchiq_get_state();
162 		if (state)
163 			break;
164 		usleep_range(500, 600);
165 	}
166 	if (i == VCHIQ_INIT_RETRIES) {
167 		vchiq_log_error(vchiq_core_log_level,
168 			"%s: videocore not initialized\n", __func__);
169 		ret = -ENOTCONN;
170 		goto failed;
171 	} else if (i > 0) {
172 		vchiq_log_warning(vchiq_core_log_level,
173 			"%s: videocore initialized after %d retries\n",
174 			__func__, i);
175 	}
176 
177 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
178 	if (!instance) {
179 		vchiq_log_error(vchiq_core_log_level,
180 			"%s: error allocating vchiq instance\n", __func__);
181 		ret = -ENOMEM;
182 		goto failed;
183 	}
184 
185 	instance->connected = 0;
186 	instance->state = state;
187 	mutex_init(&instance->bulk_waiter_list_mutex);
188 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
189 
190 	*instance_out = instance;
191 
192 	ret = 0;
193 
194 failed:
195 	vchiq_log_trace(vchiq_core_log_level,
196 		"%s(%p): returning %d", __func__, instance, ret);
197 
198 	return ret;
199 }
200 EXPORT_SYMBOL(vchiq_initialise);
201 
202 static void free_bulk_waiter(struct vchiq_instance *instance)
203 {
204 	struct bulk_waiter_node *waiter, *next;
205 
206 	list_for_each_entry_safe(waiter, next,
207 				 &instance->bulk_waiter_list, list) {
208 		list_del(&waiter->list);
209 		vchiq_log_info(vchiq_arm_log_level,
210 				"bulk_waiter - cleaned up %pK for pid %d",
211 				waiter, waiter->pid);
212 		kfree(waiter);
213 	}
214 }
215 
216 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
217 {
218 	enum vchiq_status status = VCHIQ_SUCCESS;
219 	struct vchiq_state *state = instance->state;
220 
221 	if (mutex_lock_killable(&state->mutex))
222 		return VCHIQ_RETRY;
223 
224 	/* Remove all services */
225 	vchiq_shutdown_internal(state, instance);
226 
227 	mutex_unlock(&state->mutex);
228 
229 	vchiq_log_trace(vchiq_core_log_level,
230 		"%s(%p): returning %d", __func__, instance, status);
231 
232 	free_bulk_waiter(instance);
233 	kfree(instance);
234 
235 	return status;
236 }
237 EXPORT_SYMBOL(vchiq_shutdown);
238 
239 static int vchiq_is_connected(struct vchiq_instance *instance)
240 {
241 	return instance->connected;
242 }
243 
244 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
245 {
246 	enum vchiq_status status;
247 	struct vchiq_state *state = instance->state;
248 
249 	if (mutex_lock_killable(&state->mutex)) {
250 		vchiq_log_trace(vchiq_core_log_level,
251 			"%s: call to mutex_lock failed", __func__);
252 		status = VCHIQ_RETRY;
253 		goto failed;
254 	}
255 	status = vchiq_connect_internal(state, instance);
256 
257 	if (status == VCHIQ_SUCCESS)
258 		instance->connected = 1;
259 
260 	mutex_unlock(&state->mutex);
261 
262 failed:
263 	vchiq_log_trace(vchiq_core_log_level,
264 		"%s(%p): returning %d", __func__, instance, status);
265 
266 	return status;
267 }
268 EXPORT_SYMBOL(vchiq_connect);
269 
270 static enum vchiq_status
271 vchiq_add_service(struct vchiq_instance *instance,
272 		  const struct vchiq_service_params_kernel *params,
273 		  unsigned int *phandle)
274 {
275 	enum vchiq_status status;
276 	struct vchiq_state *state = instance->state;
277 	struct vchiq_service *service = NULL;
278 	int srvstate;
279 
280 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
281 
282 	srvstate = vchiq_is_connected(instance)
283 		? VCHIQ_SRVSTATE_LISTENING
284 		: VCHIQ_SRVSTATE_HIDDEN;
285 
286 	service = vchiq_add_service_internal(
287 		state,
288 		params,
289 		srvstate,
290 		instance,
291 		NULL);
292 
293 	if (service) {
294 		*phandle = service->handle;
295 		status = VCHIQ_SUCCESS;
296 	} else {
297 		status = VCHIQ_ERROR;
298 	}
299 
300 	vchiq_log_trace(vchiq_core_log_level,
301 		"%s(%p): returning %d", __func__, instance, status);
302 
303 	return status;
304 }
305 
306 enum vchiq_status
307 vchiq_open_service(struct vchiq_instance *instance,
308 		   const struct vchiq_service_params_kernel *params,
309 		   unsigned int *phandle)
310 {
311 	enum vchiq_status   status = VCHIQ_ERROR;
312 	struct vchiq_state   *state = instance->state;
313 	struct vchiq_service *service = NULL;
314 
315 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
316 
317 	if (!vchiq_is_connected(instance))
318 		goto failed;
319 
320 	service = vchiq_add_service_internal(state,
321 		params,
322 		VCHIQ_SRVSTATE_OPENING,
323 		instance,
324 		NULL);
325 
326 	if (service) {
327 		*phandle = service->handle;
328 		status = vchiq_open_service_internal(service, current->pid);
329 		if (status != VCHIQ_SUCCESS) {
330 			vchiq_remove_service(service->handle);
331 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
332 		}
333 	}
334 
335 failed:
336 	vchiq_log_trace(vchiq_core_log_level,
337 		"%s(%p): returning %d", __func__, instance, status);
338 
339 	return status;
340 }
341 EXPORT_SYMBOL(vchiq_open_service);
342 
343 enum vchiq_status
344 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
345 		    void *userdata, enum vchiq_bulk_mode mode)
346 {
347 	enum vchiq_status status;
348 
349 	while (1) {
350 		switch (mode) {
351 		case VCHIQ_BULK_MODE_NOCALLBACK:
352 		case VCHIQ_BULK_MODE_CALLBACK:
353 			status = vchiq_bulk_transfer(handle,
354 						     (void *)data, NULL,
355 						     size, userdata, mode,
356 						     VCHIQ_BULK_TRANSMIT);
357 			break;
358 		case VCHIQ_BULK_MODE_BLOCKING:
359 			status = vchiq_blocking_bulk_transfer(handle,
360 				(void *)data, size, VCHIQ_BULK_TRANSMIT);
361 			break;
362 		default:
363 			return VCHIQ_ERROR;
364 		}
365 
366 		/*
367 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
368 		 * to implement a retry mechanism since this function is
369 		 * supposed to block until queued
370 		 */
371 		if (status != VCHIQ_RETRY)
372 			break;
373 
374 		msleep(1);
375 	}
376 
377 	return status;
378 }
379 EXPORT_SYMBOL(vchiq_bulk_transmit);
380 
381 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
382 				     unsigned int size, void *userdata,
383 				     enum vchiq_bulk_mode mode)
384 {
385 	enum vchiq_status status;
386 
387 	while (1) {
388 		switch (mode) {
389 		case VCHIQ_BULK_MODE_NOCALLBACK:
390 		case VCHIQ_BULK_MODE_CALLBACK:
391 			status = vchiq_bulk_transfer(handle, data, NULL,
392 						     size, userdata,
393 						     mode, VCHIQ_BULK_RECEIVE);
394 			break;
395 		case VCHIQ_BULK_MODE_BLOCKING:
396 			status = vchiq_blocking_bulk_transfer(handle,
397 				(void *)data, size, VCHIQ_BULK_RECEIVE);
398 			break;
399 		default:
400 			return VCHIQ_ERROR;
401 		}
402 
403 		/*
404 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
405 		 * to implement a retry mechanism since this function is
406 		 * supposed to block until queued
407 		 */
408 		if (status != VCHIQ_RETRY)
409 			break;
410 
411 		msleep(1);
412 	}
413 
414 	return status;
415 }
416 EXPORT_SYMBOL(vchiq_bulk_receive);
417 
418 static enum vchiq_status
419 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
420 			     enum vchiq_bulk_dir dir)
421 {
422 	struct vchiq_instance *instance;
423 	struct vchiq_service *service;
424 	enum vchiq_status status;
425 	struct bulk_waiter_node *waiter = NULL;
426 	bool found = false;
427 
428 	service = find_service_by_handle(handle);
429 	if (!service)
430 		return VCHIQ_ERROR;
431 
432 	instance = service->instance;
433 
434 	vchiq_service_put(service);
435 
436 	mutex_lock(&instance->bulk_waiter_list_mutex);
437 	list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
438 		if (waiter->pid == current->pid) {
439 			list_del(&waiter->list);
440 			found = true;
441 			break;
442 		}
443 	}
444 	mutex_unlock(&instance->bulk_waiter_list_mutex);
445 
446 	if (found) {
447 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
448 
449 		if (bulk) {
450 			/* This thread has an outstanding bulk transfer. */
451 			/* FIXME: why compare a dma address to a pointer? */
452 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
453 				(bulk->size != size)) {
454 				/*
455 				 * This is not a retry of the previous one.
456 				 * Cancel the signal when the transfer completes.
457 				 */
458 				spin_lock(&bulk_waiter_spinlock);
459 				bulk->userdata = NULL;
460 				spin_unlock(&bulk_waiter_spinlock);
461 			}
462 		}
463 	} else {
464 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
465 		if (!waiter) {
466 			vchiq_log_error(vchiq_core_log_level,
467 				"%s - out of memory", __func__);
468 			return VCHIQ_ERROR;
469 		}
470 	}
471 
472 	status = vchiq_bulk_transfer(handle, data, NULL, size,
473 				     &waiter->bulk_waiter,
474 				     VCHIQ_BULK_MODE_BLOCKING, dir);
475 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
476 		!waiter->bulk_waiter.bulk) {
477 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
478 
479 		if (bulk) {
480 			/* Cancel the signal when the transfer completes. */
481 			spin_lock(&bulk_waiter_spinlock);
482 			bulk->userdata = NULL;
483 			spin_unlock(&bulk_waiter_spinlock);
484 		}
485 		kfree(waiter);
486 	} else {
487 		waiter->pid = current->pid;
488 		mutex_lock(&instance->bulk_waiter_list_mutex);
489 		list_add(&waiter->list, &instance->bulk_waiter_list);
490 		mutex_unlock(&instance->bulk_waiter_list_mutex);
491 		vchiq_log_info(vchiq_arm_log_level,
492 				"saved bulk_waiter %pK for pid %d",
493 				waiter, current->pid);
494 	}
495 
496 	return status;
497 }
498 
499 static enum vchiq_status
500 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
501 	       struct vchiq_header *header, struct user_service *user_service,
502 	       void *bulk_userdata)
503 {
504 	struct vchiq_completion_data_kernel *completion;
505 	int insert;
506 
507 	DEBUG_INITIALISE(g_state.local)
508 
509 	insert = instance->completion_insert;
510 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
511 		/* Out of space - wait for the client */
512 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
513 		vchiq_log_trace(vchiq_arm_log_level,
514 			"%s - completion queue full", __func__);
515 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
516 		if (wait_for_completion_interruptible(
517 					&instance->remove_event)) {
518 			vchiq_log_info(vchiq_arm_log_level,
519 				"service_callback interrupted");
520 			return VCHIQ_RETRY;
521 		} else if (instance->closing) {
522 			vchiq_log_info(vchiq_arm_log_level,
523 				"service_callback closing");
524 			return VCHIQ_SUCCESS;
525 		}
526 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
527 	}
528 
529 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
530 
531 	completion->header = header;
532 	completion->reason = reason;
533 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
534 	completion->service_userdata = user_service->service;
535 	completion->bulk_userdata = bulk_userdata;
536 
537 	if (reason == VCHIQ_SERVICE_CLOSED) {
538 		/*
539 		 * Take an extra reference, to be held until
540 		 * this CLOSED notification is delivered.
541 		 */
542 		vchiq_service_get(user_service->service);
543 		if (instance->use_close_delivered)
544 			user_service->close_pending = 1;
545 	}
546 
547 	/*
548 	 * A write barrier is needed here to ensure that the entire completion
549 	 * record is written out before the insert point.
550 	 */
551 	wmb();
552 
553 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
554 		user_service->message_available_pos = insert;
555 
556 	insert++;
557 	instance->completion_insert = insert;
558 
559 	complete(&instance->insert_event);
560 
561 	return VCHIQ_SUCCESS;
562 }
563 
564 static enum vchiq_status
565 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
566 		 unsigned int handle, void *bulk_userdata)
567 {
568 	/*
569 	 * How do we ensure the callback goes to the right client?
570 	 * The service_user data points to a user_service record
571 	 * containing the original callback and the user state structure, which
572 	 * contains a circular buffer for completion records.
573 	 */
574 	struct user_service *user_service;
575 	struct vchiq_service *service;
576 	struct vchiq_instance *instance;
577 	bool skip_completion = false;
578 
579 	DEBUG_INITIALISE(g_state.local)
580 
581 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
582 
583 	service = handle_to_service(handle);
584 	if (WARN_ON(!service))
585 		return VCHIQ_SUCCESS;
586 
587 	user_service = (struct user_service *)service->base.userdata;
588 	instance = user_service->instance;
589 
590 	if (!instance || instance->closing)
591 		return VCHIQ_SUCCESS;
592 
593 	vchiq_log_trace(vchiq_arm_log_level,
594 		"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
595 		__func__, (unsigned long)user_service,
596 		service->localport, user_service->userdata,
597 		reason, (unsigned long)header,
598 		(unsigned long)instance, (unsigned long)bulk_userdata);
599 
600 	if (header && user_service->is_vchi) {
601 		spin_lock(&msg_queue_spinlock);
602 		while (user_service->msg_insert ==
603 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
604 			spin_unlock(&msg_queue_spinlock);
605 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
606 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
607 			vchiq_log_trace(vchiq_arm_log_level,
608 				"service_callback - msg queue full");
609 			/*
610 			 * If there is no MESSAGE_AVAILABLE in the completion
611 			 * queue, add one
612 			 */
613 			if ((user_service->message_available_pos -
614 				instance->completion_remove) < 0) {
615 				enum vchiq_status status;
616 
617 				vchiq_log_info(vchiq_arm_log_level,
618 					"Inserting extra MESSAGE_AVAILABLE");
619 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
620 				status = add_completion(instance, reason,
621 					NULL, user_service, bulk_userdata);
622 				if (status != VCHIQ_SUCCESS) {
623 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
624 					return status;
625 				}
626 			}
627 
628 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
629 			if (wait_for_completion_interruptible(
630 						&user_service->remove_event)) {
631 				vchiq_log_info(vchiq_arm_log_level,
632 					"%s interrupted", __func__);
633 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
634 				return VCHIQ_RETRY;
635 			} else if (instance->closing) {
636 				vchiq_log_info(vchiq_arm_log_level,
637 					"%s closing", __func__);
638 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
639 				return VCHIQ_ERROR;
640 			}
641 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
642 			spin_lock(&msg_queue_spinlock);
643 		}
644 
645 		user_service->msg_queue[user_service->msg_insert &
646 			(MSG_QUEUE_SIZE - 1)] = header;
647 		user_service->msg_insert++;
648 
649 		/*
650 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
651 		 * there is a MESSAGE_AVAILABLE in the completion queue then
652 		 * bypass the completion queue.
653 		 */
654 		if (((user_service->message_available_pos -
655 			instance->completion_remove) >= 0) ||
656 			user_service->dequeue_pending) {
657 			user_service->dequeue_pending = 0;
658 			skip_completion = true;
659 		}
660 
661 		spin_unlock(&msg_queue_spinlock);
662 		complete(&user_service->insert_event);
663 
664 		header = NULL;
665 	}
666 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
667 
668 	if (skip_completion)
669 		return VCHIQ_SUCCESS;
670 
671 	return add_completion(instance, reason, header, user_service,
672 		bulk_userdata);
673 }
674 
675 static void
676 user_service_free(void *userdata)
677 {
678 	kfree(userdata);
679 }
680 
681 static void close_delivered(struct user_service *user_service)
682 {
683 	vchiq_log_info(vchiq_arm_log_level,
684 		"%s(handle=%x)",
685 		__func__, user_service->service->handle);
686 
687 	if (user_service->close_pending) {
688 		/* Allow the underlying service to be culled */
689 		vchiq_service_put(user_service->service);
690 
691 		/* Wake the user-thread blocked in close_ or remove_service */
692 		complete(&user_service->close_event);
693 
694 		user_service->close_pending = 0;
695 	}
696 }
697 
698 struct vchiq_io_copy_callback_context {
699 	struct vchiq_element *element;
700 	size_t element_offset;
701 	unsigned long elements_to_go;
702 };
703 
704 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
705 					   size_t offset, size_t maxsize)
706 {
707 	struct vchiq_io_copy_callback_context *cc = context;
708 	size_t total_bytes_copied = 0;
709 	size_t bytes_this_round;
710 
711 	while (total_bytes_copied < maxsize) {
712 		if (!cc->elements_to_go)
713 			return total_bytes_copied;
714 
715 		if (!cc->element->size) {
716 			cc->elements_to_go--;
717 			cc->element++;
718 			cc->element_offset = 0;
719 			continue;
720 		}
721 
722 		bytes_this_round = min(cc->element->size - cc->element_offset,
723 				       maxsize - total_bytes_copied);
724 
725 		if (copy_from_user(dest + total_bytes_copied,
726 				  cc->element->data + cc->element_offset,
727 				  bytes_this_round))
728 			return -EFAULT;
729 
730 		cc->element_offset += bytes_this_round;
731 		total_bytes_copied += bytes_this_round;
732 
733 		if (cc->element_offset == cc->element->size) {
734 			cc->elements_to_go--;
735 			cc->element++;
736 			cc->element_offset = 0;
737 		}
738 	}
739 
740 	return maxsize;
741 }
742 
743 static int
744 vchiq_ioc_queue_message(unsigned int handle, struct vchiq_element *elements,
745 			unsigned long count)
746 {
747 	struct vchiq_io_copy_callback_context context;
748 	enum vchiq_status status = VCHIQ_SUCCESS;
749 	unsigned long i;
750 	size_t total_size = 0;
751 
752 	context.element = elements;
753 	context.element_offset = 0;
754 	context.elements_to_go = count;
755 
756 	for (i = 0; i < count; i++) {
757 		if (!elements[i].data && elements[i].size != 0)
758 			return -EFAULT;
759 
760 		total_size += elements[i].size;
761 	}
762 
763 	status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
764 				     &context, total_size);
765 
766 	if (status == VCHIQ_ERROR)
767 		return -EIO;
768 	else if (status == VCHIQ_RETRY)
769 		return -EINTR;
770 	return 0;
771 }
772 
773 static int vchiq_ioc_create_service(struct vchiq_instance *instance,
774 				    struct vchiq_create_service *args)
775 {
776 	struct user_service *user_service = NULL;
777 	struct vchiq_service *service;
778 	enum vchiq_status status = VCHIQ_SUCCESS;
779 	struct vchiq_service_params_kernel params;
780 	int srvstate;
781 
782 	user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
783 	if (!user_service)
784 		return -ENOMEM;
785 
786 	if (args->is_open) {
787 		if (!instance->connected) {
788 			kfree(user_service);
789 			return -ENOTCONN;
790 		}
791 		srvstate = VCHIQ_SRVSTATE_OPENING;
792 	} else {
793 		srvstate = instance->connected ?
794 			 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
795 	}
796 
797 	params = (struct vchiq_service_params_kernel) {
798 		.fourcc   = args->params.fourcc,
799 		.callback = service_callback,
800 		.userdata = user_service,
801 		.version  = args->params.version,
802 		.version_min = args->params.version_min,
803 	};
804 	service = vchiq_add_service_internal(instance->state, &params,
805 					     srvstate, instance,
806 					     user_service_free);
807 	if (!service) {
808 		kfree(user_service);
809 		return -EEXIST;
810 	}
811 
812 	user_service->service = service;
813 	user_service->userdata = args->params.userdata;
814 	user_service->instance = instance;
815 	user_service->is_vchi = (args->is_vchi != 0);
816 	user_service->dequeue_pending = 0;
817 	user_service->close_pending = 0;
818 	user_service->message_available_pos = instance->completion_remove - 1;
819 	user_service->msg_insert = 0;
820 	user_service->msg_remove = 0;
821 	init_completion(&user_service->insert_event);
822 	init_completion(&user_service->remove_event);
823 	init_completion(&user_service->close_event);
824 
825 	if (args->is_open) {
826 		status = vchiq_open_service_internal(service, instance->pid);
827 		if (status != VCHIQ_SUCCESS) {
828 			vchiq_remove_service(service->handle);
829 			return (status == VCHIQ_RETRY) ?
830 				-EINTR : -EIO;
831 		}
832 	}
833 	args->handle = service->handle;
834 
835 	return 0;
836 }
837 
838 static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
839 				     struct vchiq_dequeue_message *args)
840 {
841 	struct user_service *user_service;
842 	struct vchiq_service *service;
843 	struct vchiq_header *header;
844 	int ret;
845 
846 	DEBUG_INITIALISE(g_state.local)
847 	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
848 	service = find_service_for_instance(instance, args->handle);
849 	if (!service)
850 		return -EINVAL;
851 
852 	user_service = (struct user_service *)service->base.userdata;
853 	if (user_service->is_vchi == 0) {
854 		ret = -EINVAL;
855 		goto out;
856 	}
857 
858 	spin_lock(&msg_queue_spinlock);
859 	if (user_service->msg_remove == user_service->msg_insert) {
860 		if (!args->blocking) {
861 			spin_unlock(&msg_queue_spinlock);
862 			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
863 			ret = -EWOULDBLOCK;
864 			goto out;
865 		}
866 		user_service->dequeue_pending = 1;
867 		ret = 0;
868 		do {
869 			spin_unlock(&msg_queue_spinlock);
870 			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
871 			if (wait_for_completion_interruptible(
872 				&user_service->insert_event)) {
873 				vchiq_log_info(vchiq_arm_log_level,
874 					"DEQUEUE_MESSAGE interrupted");
875 				ret = -EINTR;
876 				break;
877 			}
878 			spin_lock(&msg_queue_spinlock);
879 		} while (user_service->msg_remove == user_service->msg_insert);
880 
881 		if (ret)
882 			goto out;
883 	}
884 
885 	if (WARN_ON_ONCE((int)(user_service->msg_insert -
886 			 user_service->msg_remove) < 0)) {
887 		spin_unlock(&msg_queue_spinlock);
888 		ret = -EINVAL;
889 		goto out;
890 	}
891 
892 	header = user_service->msg_queue[user_service->msg_remove &
893 		(MSG_QUEUE_SIZE - 1)];
894 	user_service->msg_remove++;
895 	spin_unlock(&msg_queue_spinlock);
896 
897 	complete(&user_service->remove_event);
898 	if (!header) {
899 		ret = -ENOTCONN;
900 	} else if (header->size <= args->bufsize) {
901 		/* Copy to user space if msgbuf is not NULL */
902 		if (!args->buf || (copy_to_user(args->buf,
903 					header->data, header->size) == 0)) {
904 			ret = header->size;
905 			vchiq_release_message(service->handle, header);
906 		} else {
907 			ret = -EFAULT;
908 		}
909 	} else {
910 		vchiq_log_error(vchiq_arm_log_level,
911 			"header %pK: bufsize %x < size %x",
912 			header, args->bufsize, header->size);
913 		WARN(1, "invalid size\n");
914 		ret = -EMSGSIZE;
915 	}
916 	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
917 out:
918 	vchiq_service_put(service);
919 	return ret;
920 }
921 
922 static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
923 				      struct vchiq_queue_bulk_transfer *args,
924 				      enum vchiq_bulk_dir dir,
925 				      enum vchiq_bulk_mode __user *mode)
926 {
927 	struct vchiq_service *service;
928 	struct bulk_waiter_node *waiter = NULL;
929 	bool found = false;
930 	void *userdata;
931 	int status = 0;
932 	int ret;
933 
934 	service = find_service_for_instance(instance, args->handle);
935 	if (!service)
936 		return -EINVAL;
937 
938 	if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
939 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
940 		if (!waiter) {
941 			ret = -ENOMEM;
942 			goto out;
943 		}
944 
945 		userdata = &waiter->bulk_waiter;
946 	} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
947 		mutex_lock(&instance->bulk_waiter_list_mutex);
948 		list_for_each_entry(waiter, &instance->bulk_waiter_list,
949 				    list) {
950 			if (waiter->pid == current->pid) {
951 				list_del(&waiter->list);
952 				found = true;
953 				break;
954 			}
955 		}
956 		mutex_unlock(&instance->bulk_waiter_list_mutex);
957 		if (!found) {
958 			vchiq_log_error(vchiq_arm_log_level,
959 				"no bulk_waiter found for pid %d",
960 				current->pid);
961 			ret = -ESRCH;
962 			goto out;
963 		}
964 		vchiq_log_info(vchiq_arm_log_level,
965 			"found bulk_waiter %pK for pid %d", waiter,
966 			current->pid);
967 		userdata = &waiter->bulk_waiter;
968 	} else {
969 		userdata = args->userdata;
970 	}
971 
972 	status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size,
973 				     userdata, args->mode, dir);
974 
975 	if (!waiter) {
976 		ret = 0;
977 		goto out;
978 	}
979 
980 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
981 		!waiter->bulk_waiter.bulk) {
982 		if (waiter->bulk_waiter.bulk) {
983 			/* Cancel the signal when the transfer completes. */
984 			spin_lock(&bulk_waiter_spinlock);
985 			waiter->bulk_waiter.bulk->userdata = NULL;
986 			spin_unlock(&bulk_waiter_spinlock);
987 		}
988 		kfree(waiter);
989 		ret = 0;
990 	} else {
991 		const enum vchiq_bulk_mode mode_waiting =
992 			VCHIQ_BULK_MODE_WAITING;
993 		waiter->pid = current->pid;
994 		mutex_lock(&instance->bulk_waiter_list_mutex);
995 		list_add(&waiter->list, &instance->bulk_waiter_list);
996 		mutex_unlock(&instance->bulk_waiter_list_mutex);
997 		vchiq_log_info(vchiq_arm_log_level,
998 			"saved bulk_waiter %pK for pid %d",
999 			waiter, current->pid);
1000 
1001 		ret = put_user(mode_waiting, mode);
1002 	}
1003 out:
1004 	vchiq_service_put(service);
1005 	if (ret)
1006 		return ret;
1007 	else if (status == VCHIQ_ERROR)
1008 		return -EIO;
1009 	else if (status == VCHIQ_RETRY)
1010 		return -EINTR;
1011 	return 0;
1012 }
1013 
1014 /* read a user pointer value from an array pointers in user space */
1015 static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
1016 {
1017 	int ret;
1018 
1019 	if (in_compat_syscall()) {
1020 		compat_uptr_t ptr32;
1021 		compat_uptr_t __user *uptr = ubuf;
1022 
1023 		ret = get_user(ptr32, uptr + index);
1024 		if (ret)
1025 			return ret;
1026 
1027 		*buf = compat_ptr(ptr32);
1028 	} else {
1029 		uintptr_t ptr, __user *uptr = ubuf;
1030 
1031 		ret = get_user(ptr, uptr + index);
1032 
1033 		if (ret)
1034 			return ret;
1035 
1036 		*buf = (void __user *)ptr;
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 struct vchiq_completion_data32 {
1043 	enum vchiq_reason reason;
1044 	compat_uptr_t header;
1045 	compat_uptr_t service_userdata;
1046 	compat_uptr_t bulk_userdata;
1047 };
1048 
1049 static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
1050 				struct vchiq_completion_data *completion,
1051 				int index)
1052 {
1053 	struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
1054 
1055 	if (in_compat_syscall()) {
1056 		struct vchiq_completion_data32 tmp = {
1057 			.reason		  = completion->reason,
1058 			.header		  = ptr_to_compat(completion->header),
1059 			.service_userdata = ptr_to_compat(completion->service_userdata),
1060 			.bulk_userdata	  = ptr_to_compat(completion->bulk_userdata),
1061 		};
1062 		if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
1063 			return -EFAULT;
1064 	} else {
1065 		if (copy_to_user(&buf[index], completion, sizeof(*completion)))
1066 			return -EFAULT;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
1073 				      struct vchiq_await_completion *args,
1074 				      int __user *msgbufcountp)
1075 {
1076 	int msgbufcount;
1077 	int remove;
1078 	int ret;
1079 
1080 	DEBUG_INITIALISE(g_state.local)
1081 
1082 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1083 	if (!instance->connected) {
1084 		return -ENOTCONN;
1085 	}
1086 
1087 	mutex_lock(&instance->completion_mutex);
1088 
1089 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1090 	while ((instance->completion_remove == instance->completion_insert)
1091 		&& !instance->closing) {
1092 		int rc;
1093 
1094 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1095 		mutex_unlock(&instance->completion_mutex);
1096 		rc = wait_for_completion_interruptible(
1097 					&instance->insert_event);
1098 		mutex_lock(&instance->completion_mutex);
1099 		if (rc) {
1100 			DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1101 			vchiq_log_info(vchiq_arm_log_level,
1102 				"AWAIT_COMPLETION interrupted");
1103 			ret = -EINTR;
1104 			goto out;
1105 		}
1106 	}
1107 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1108 
1109 	msgbufcount = args->msgbufcount;
1110 	remove = instance->completion_remove;
1111 
1112 	for (ret = 0; ret < args->count; ret++) {
1113 		struct vchiq_completion_data_kernel *completion;
1114 		struct vchiq_completion_data user_completion;
1115 		struct vchiq_service *service;
1116 		struct user_service *user_service;
1117 		struct vchiq_header *header;
1118 
1119 		if (remove == instance->completion_insert)
1120 			break;
1121 
1122 		completion = &instance->completions[
1123 			remove & (MAX_COMPLETIONS - 1)];
1124 
1125 		/*
1126 		 * A read memory barrier is needed to stop
1127 		 * prefetch of a stale completion record
1128 		 */
1129 		rmb();
1130 
1131 		service = completion->service_userdata;
1132 		user_service = service->base.userdata;
1133 
1134 		memset(&user_completion, 0, sizeof(user_completion));
1135 		user_completion = (struct vchiq_completion_data) {
1136 			.reason = completion->reason,
1137 			.service_userdata = user_service->userdata,
1138 		};
1139 
1140 		header = completion->header;
1141 		if (header) {
1142 			void __user *msgbuf;
1143 			int msglen;
1144 
1145 			msglen = header->size + sizeof(struct vchiq_header);
1146 			/* This must be a VCHIQ-style service */
1147 			if (args->msgbufsize < msglen) {
1148 				vchiq_log_error(vchiq_arm_log_level,
1149 					"header %pK: msgbufsize %x < msglen %x",
1150 					header, args->msgbufsize, msglen);
1151 				WARN(1, "invalid message size\n");
1152 				if (ret == 0)
1153 					ret = -EMSGSIZE;
1154 				break;
1155 			}
1156 			if (msgbufcount <= 0)
1157 				/* Stall here for lack of a buffer for the message. */
1158 				break;
1159 			/* Get the pointer from user space */
1160 			msgbufcount--;
1161 			if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
1162 						msgbufcount)) {
1163 				if (ret == 0)
1164 					ret = -EFAULT;
1165 				break;
1166 			}
1167 
1168 			/* Copy the message to user space */
1169 			if (copy_to_user(msgbuf, header, msglen)) {
1170 				if (ret == 0)
1171 					ret = -EFAULT;
1172 				break;
1173 			}
1174 
1175 			/* Now it has been copied, the message can be released. */
1176 			vchiq_release_message(service->handle, header);
1177 
1178 			/* The completion must point to the msgbuf. */
1179 			user_completion.header = msgbuf;
1180 		}
1181 
1182 		if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
1183 		    !instance->use_close_delivered)
1184 			vchiq_service_put(service);
1185 
1186 		/*
1187 		 * FIXME: address space mismatch, does bulk_userdata
1188 		 * actually point to user or kernel memory?
1189 		 */
1190 		user_completion.bulk_userdata = completion->bulk_userdata;
1191 
1192 		if (vchiq_put_completion(args->buf, &user_completion, ret)) {
1193 			if (ret == 0)
1194 				ret = -EFAULT;
1195 			break;
1196 		}
1197 
1198 		/*
1199 		 * Ensure that the above copy has completed
1200 		 * before advancing the remove pointer.
1201 		 */
1202 		mb();
1203 		remove++;
1204 		instance->completion_remove = remove;
1205 	}
1206 
1207 	if (msgbufcount != args->msgbufcount) {
1208 		if (put_user(msgbufcount, msgbufcountp))
1209 			ret = -EFAULT;
1210 	}
1211 out:
1212 	if (ret)
1213 		complete(&instance->remove_event);
1214 	mutex_unlock(&instance->completion_mutex);
1215 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1216 
1217 	return ret;
1218 }
1219 
1220 static long
1221 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1222 {
1223 	struct vchiq_instance *instance = file->private_data;
1224 	enum vchiq_status status = VCHIQ_SUCCESS;
1225 	struct vchiq_service *service = NULL;
1226 	long ret = 0;
1227 	int i, rc;
1228 
1229 	vchiq_log_trace(vchiq_arm_log_level,
1230 		"%s - instance %pK, cmd %s, arg %lx",
1231 		__func__, instance,
1232 		((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
1233 		(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
1234 		ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
1235 
1236 	switch (cmd) {
1237 	case VCHIQ_IOC_SHUTDOWN:
1238 		if (!instance->connected)
1239 			break;
1240 
1241 		/* Remove all services */
1242 		i = 0;
1243 		while ((service = next_service_by_instance(instance->state,
1244 			instance, &i))) {
1245 			status = vchiq_remove_service(service->handle);
1246 			vchiq_service_put(service);
1247 			if (status != VCHIQ_SUCCESS)
1248 				break;
1249 		}
1250 		service = NULL;
1251 
1252 		if (status == VCHIQ_SUCCESS) {
1253 			/* Wake the completion thread and ask it to exit */
1254 			instance->closing = 1;
1255 			complete(&instance->insert_event);
1256 		}
1257 
1258 		break;
1259 
1260 	case VCHIQ_IOC_CONNECT:
1261 		if (instance->connected) {
1262 			ret = -EINVAL;
1263 			break;
1264 		}
1265 		rc = mutex_lock_killable(&instance->state->mutex);
1266 		if (rc) {
1267 			vchiq_log_error(vchiq_arm_log_level,
1268 				"vchiq: connect: could not lock mutex for state %d: %d",
1269 				instance->state->id, rc);
1270 			ret = -EINTR;
1271 			break;
1272 		}
1273 		status = vchiq_connect_internal(instance->state, instance);
1274 		mutex_unlock(&instance->state->mutex);
1275 
1276 		if (status == VCHIQ_SUCCESS)
1277 			instance->connected = 1;
1278 		else
1279 			vchiq_log_error(vchiq_arm_log_level,
1280 				"vchiq: could not connect: %d", status);
1281 		break;
1282 
1283 	case VCHIQ_IOC_CREATE_SERVICE: {
1284 		struct vchiq_create_service __user *argp;
1285 		struct vchiq_create_service args;
1286 
1287 		argp = (void __user *)arg;
1288 		if (copy_from_user(&args, argp, sizeof(args))) {
1289 			ret = -EFAULT;
1290 			break;
1291 		}
1292 
1293 		ret = vchiq_ioc_create_service(instance, &args);
1294 		if (ret < 0)
1295 			break;
1296 
1297 		if (put_user(args.handle, &argp->handle)) {
1298 			vchiq_remove_service(args.handle);
1299 			ret = -EFAULT;
1300 		}
1301 	} break;
1302 
1303 	case VCHIQ_IOC_CLOSE_SERVICE:
1304 	case VCHIQ_IOC_REMOVE_SERVICE: {
1305 		unsigned int handle = (unsigned int)arg;
1306 		struct user_service *user_service;
1307 
1308 		service = find_service_for_instance(instance, handle);
1309 		if (!service) {
1310 			ret = -EINVAL;
1311 			break;
1312 		}
1313 
1314 		user_service = service->base.userdata;
1315 
1316 		/*
1317 		 * close_pending is false on first entry, and when the
1318 		 * wait in vchiq_close_service has been interrupted.
1319 		 */
1320 		if (!user_service->close_pending) {
1321 			status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
1322 				 vchiq_close_service(service->handle) :
1323 				 vchiq_remove_service(service->handle);
1324 			if (status != VCHIQ_SUCCESS)
1325 				break;
1326 		}
1327 
1328 		/*
1329 		 * close_pending is true once the underlying service
1330 		 * has been closed until the client library calls the
1331 		 * CLOSE_DELIVERED ioctl, signalling close_event.
1332 		 */
1333 		if (user_service->close_pending &&
1334 			wait_for_completion_interruptible(
1335 				&user_service->close_event))
1336 			status = VCHIQ_RETRY;
1337 		break;
1338 	}
1339 
1340 	case VCHIQ_IOC_USE_SERVICE:
1341 	case VCHIQ_IOC_RELEASE_SERVICE:	{
1342 		unsigned int handle = (unsigned int)arg;
1343 
1344 		service = find_service_for_instance(instance, handle);
1345 		if (service) {
1346 			ret = (cmd == VCHIQ_IOC_USE_SERVICE) ?
1347 				vchiq_use_service_internal(service) :
1348 				vchiq_release_service_internal(service);
1349 			if (ret) {
1350 				vchiq_log_error(vchiq_susp_log_level,
1351 					"%s: cmd %s returned error %ld for service %c%c%c%c:%03d",
1352 					__func__,
1353 					(cmd == VCHIQ_IOC_USE_SERVICE) ?
1354 						"VCHIQ_IOC_USE_SERVICE" :
1355 						"VCHIQ_IOC_RELEASE_SERVICE",
1356 					ret,
1357 					VCHIQ_FOURCC_AS_4CHARS(
1358 						service->base.fourcc),
1359 					service->client_id);
1360 			}
1361 		} else {
1362 			ret = -EINVAL;
1363 		}
1364 	} break;
1365 
1366 	case VCHIQ_IOC_QUEUE_MESSAGE: {
1367 		struct vchiq_queue_message args;
1368 
1369 		if (copy_from_user(&args, (const void __user *)arg,
1370 				   sizeof(args))) {
1371 			ret = -EFAULT;
1372 			break;
1373 		}
1374 
1375 		service = find_service_for_instance(instance, args.handle);
1376 
1377 		if (service && (args.count <= MAX_ELEMENTS)) {
1378 			/* Copy elements into kernel space */
1379 			struct vchiq_element elements[MAX_ELEMENTS];
1380 
1381 			if (copy_from_user(elements, args.elements,
1382 				args.count * sizeof(struct vchiq_element)) == 0)
1383 				ret = vchiq_ioc_queue_message(args.handle, elements,
1384 							      args.count);
1385 			else
1386 				ret = -EFAULT;
1387 		} else {
1388 			ret = -EINVAL;
1389 		}
1390 	} break;
1391 
1392 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
1393 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
1394 		struct vchiq_queue_bulk_transfer args;
1395 		struct vchiq_queue_bulk_transfer __user *argp;
1396 
1397 		enum vchiq_bulk_dir dir =
1398 			(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1399 			VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1400 
1401 		argp = (void __user *)arg;
1402 		if (copy_from_user(&args, argp, sizeof(args))) {
1403 			ret = -EFAULT;
1404 			break;
1405 		}
1406 
1407 		ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
1408 						 dir, &argp->mode);
1409 	} break;
1410 
1411 	case VCHIQ_IOC_AWAIT_COMPLETION: {
1412 		struct vchiq_await_completion args;
1413 		struct vchiq_await_completion __user *argp;
1414 
1415 		argp = (void __user *)arg;
1416 		if (copy_from_user(&args, argp, sizeof(args))) {
1417 			ret = -EFAULT;
1418 			break;
1419 		}
1420 
1421 		ret = vchiq_ioc_await_completion(instance, &args,
1422 						 &argp->msgbufcount);
1423 	} break;
1424 
1425 	case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1426 		struct vchiq_dequeue_message args;
1427 
1428 		if (copy_from_user(&args, (const void __user *)arg,
1429 				   sizeof(args))) {
1430 			ret = -EFAULT;
1431 			break;
1432 		}
1433 
1434 		ret = vchiq_ioc_dequeue_message(instance, &args);
1435 	} break;
1436 
1437 	case VCHIQ_IOC_GET_CLIENT_ID: {
1438 		unsigned int handle = (unsigned int)arg;
1439 
1440 		ret = vchiq_get_client_id(handle);
1441 	} break;
1442 
1443 	case VCHIQ_IOC_GET_CONFIG: {
1444 		struct vchiq_get_config args;
1445 		struct vchiq_config config;
1446 
1447 		if (copy_from_user(&args, (const void __user *)arg,
1448 				   sizeof(args))) {
1449 			ret = -EFAULT;
1450 			break;
1451 		}
1452 		if (args.config_size > sizeof(config)) {
1453 			ret = -EINVAL;
1454 			break;
1455 		}
1456 
1457 		vchiq_get_config(&config);
1458 		if (copy_to_user(args.pconfig, &config, args.config_size)) {
1459 			ret = -EFAULT;
1460 			break;
1461 		}
1462 	} break;
1463 
1464 	case VCHIQ_IOC_SET_SERVICE_OPTION: {
1465 		struct vchiq_set_service_option args;
1466 
1467 		if (copy_from_user(&args, (const void __user *)arg,
1468 				   sizeof(args))) {
1469 			ret = -EFAULT;
1470 			break;
1471 		}
1472 
1473 		service = find_service_for_instance(instance, args.handle);
1474 		if (!service) {
1475 			ret = -EINVAL;
1476 			break;
1477 		}
1478 
1479 		ret = vchiq_set_service_option(args.handle, args.option,
1480 					       args.value);
1481 	} break;
1482 
1483 	case VCHIQ_IOC_LIB_VERSION: {
1484 		unsigned int lib_version = (unsigned int)arg;
1485 
1486 		if (lib_version < VCHIQ_VERSION_MIN)
1487 			ret = -EINVAL;
1488 		else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1489 			instance->use_close_delivered = 1;
1490 	} break;
1491 
1492 	case VCHIQ_IOC_CLOSE_DELIVERED: {
1493 		unsigned int handle = (unsigned int)arg;
1494 
1495 		service = find_closed_service_for_instance(instance, handle);
1496 		if (service) {
1497 			struct user_service *user_service =
1498 				(struct user_service *)service->base.userdata;
1499 			close_delivered(user_service);
1500 		} else {
1501 			ret = -EINVAL;
1502 		}
1503 	} break;
1504 
1505 	default:
1506 		ret = -ENOTTY;
1507 		break;
1508 	}
1509 
1510 	if (service)
1511 		vchiq_service_put(service);
1512 
1513 	if (ret == 0) {
1514 		if (status == VCHIQ_ERROR)
1515 			ret = -EIO;
1516 		else if (status == VCHIQ_RETRY)
1517 			ret = -EINTR;
1518 	}
1519 
1520 	if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1521 		(ret != -EWOULDBLOCK))
1522 		vchiq_log_info(vchiq_arm_log_level,
1523 			"  ioctl instance %pK, cmd %s -> status %d, %ld",
1524 			instance,
1525 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1526 				ioctl_names[_IOC_NR(cmd)] :
1527 				"<invalid>",
1528 			status, ret);
1529 	else
1530 		vchiq_log_trace(vchiq_arm_log_level,
1531 			"  ioctl instance %pK, cmd %s -> status %d, %ld",
1532 			instance,
1533 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1534 				ioctl_names[_IOC_NR(cmd)] :
1535 				"<invalid>",
1536 			status, ret);
1537 
1538 	return ret;
1539 }
1540 
1541 #if defined(CONFIG_COMPAT)
1542 
1543 struct vchiq_service_params32 {
1544 	int fourcc;
1545 	compat_uptr_t callback;
1546 	compat_uptr_t userdata;
1547 	short version; /* Increment for non-trivial changes */
1548 	short version_min; /* Update for incompatible changes */
1549 };
1550 
1551 struct vchiq_create_service32 {
1552 	struct vchiq_service_params32 params;
1553 	int is_open;
1554 	int is_vchi;
1555 	unsigned int handle; /* OUT */
1556 };
1557 
1558 #define VCHIQ_IOC_CREATE_SERVICE32 \
1559 	_IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1560 
1561 static long
1562 vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
1563 				  struct vchiq_create_service32 __user *ptrargs32)
1564 {
1565 	struct vchiq_create_service args;
1566 	struct vchiq_create_service32 args32;
1567 	long ret;
1568 
1569 	if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1570 		return -EFAULT;
1571 
1572 	args = (struct vchiq_create_service) {
1573 		.params = {
1574 			.fourcc	     = args32.params.fourcc,
1575 			.callback    = compat_ptr(args32.params.callback),
1576 			.userdata    = compat_ptr(args32.params.userdata),
1577 			.version     = args32.params.version,
1578 			.version_min = args32.params.version_min,
1579 		},
1580 		.is_open = args32.is_open,
1581 		.is_vchi = args32.is_vchi,
1582 		.handle  = args32.handle,
1583 	};
1584 
1585 	ret = vchiq_ioc_create_service(file->private_data, &args);
1586 	if (ret < 0)
1587 		return ret;
1588 
1589 	if (put_user(args.handle, &ptrargs32->handle)) {
1590 		vchiq_remove_service(args.handle);
1591 		return -EFAULT;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 struct vchiq_element32 {
1598 	compat_uptr_t data;
1599 	unsigned int size;
1600 };
1601 
1602 struct vchiq_queue_message32 {
1603 	unsigned int handle;
1604 	unsigned int count;
1605 	compat_uptr_t elements;
1606 };
1607 
1608 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1609 	_IOW(VCHIQ_IOC_MAGIC,  4, struct vchiq_queue_message32)
1610 
1611 static long
1612 vchiq_compat_ioctl_queue_message(struct file *file,
1613 				 unsigned int cmd,
1614 				 struct vchiq_queue_message32 __user *arg)
1615 {
1616 	struct vchiq_queue_message args;
1617 	struct vchiq_queue_message32 args32;
1618 	struct vchiq_service *service;
1619 	int ret;
1620 
1621 	if (copy_from_user(&args32, arg, sizeof(args32)))
1622 		return -EFAULT;
1623 
1624 	args = (struct vchiq_queue_message) {
1625 		.handle   = args32.handle,
1626 		.count    = args32.count,
1627 		.elements = compat_ptr(args32.elements),
1628 	};
1629 
1630 	if (args32.count > MAX_ELEMENTS)
1631 		return -EINVAL;
1632 
1633 	service = find_service_for_instance(file->private_data, args.handle);
1634 	if (!service)
1635 		return -EINVAL;
1636 
1637 	if (args32.elements && args32.count) {
1638 		struct vchiq_element32 element32[MAX_ELEMENTS];
1639 		struct vchiq_element elements[MAX_ELEMENTS];
1640 		unsigned int count;
1641 
1642 		if (copy_from_user(&element32, args.elements,
1643 				   sizeof(element32))) {
1644 			vchiq_service_put(service);
1645 			return -EFAULT;
1646 		}
1647 
1648 		for (count = 0; count < args32.count; count++) {
1649 			elements[count].data =
1650 				compat_ptr(element32[count].data);
1651 			elements[count].size = element32[count].size;
1652 		}
1653 		ret = vchiq_ioc_queue_message(args.handle, elements,
1654 					      args.count);
1655 	} else {
1656 		ret = -EINVAL;
1657 	}
1658 	vchiq_service_put(service);
1659 
1660 	return ret;
1661 }
1662 
1663 struct vchiq_queue_bulk_transfer32 {
1664 	unsigned int handle;
1665 	compat_uptr_t data;
1666 	unsigned int size;
1667 	compat_uptr_t userdata;
1668 	enum vchiq_bulk_mode mode;
1669 };
1670 
1671 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1672 	_IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1673 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1674 	_IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1675 
1676 static long
1677 vchiq_compat_ioctl_queue_bulk(struct file *file,
1678 			      unsigned int cmd,
1679 			      struct vchiq_queue_bulk_transfer32 __user *argp)
1680 {
1681 	struct vchiq_queue_bulk_transfer32 args32;
1682 	struct vchiq_queue_bulk_transfer args;
1683 	enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
1684 				  VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1685 
1686 	if (copy_from_user(&args32, argp, sizeof(args32)))
1687 		return -EFAULT;
1688 
1689 	args = (struct vchiq_queue_bulk_transfer) {
1690 		.handle   = args32.handle,
1691 		.data	  = compat_ptr(args32.data),
1692 		.size	  = args32.size,
1693 		.userdata = compat_ptr(args32.userdata),
1694 		.mode	  = args32.mode,
1695 	};
1696 
1697 	return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
1698 					  dir, &argp->mode);
1699 }
1700 
1701 struct vchiq_await_completion32 {
1702 	unsigned int count;
1703 	compat_uptr_t buf;
1704 	unsigned int msgbufsize;
1705 	unsigned int msgbufcount; /* IN/OUT */
1706 	compat_uptr_t msgbufs;
1707 };
1708 
1709 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1710 	_IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1711 
1712 static long
1713 vchiq_compat_ioctl_await_completion(struct file *file,
1714 				    unsigned int cmd,
1715 				    struct vchiq_await_completion32 __user *argp)
1716 {
1717 	struct vchiq_await_completion args;
1718 	struct vchiq_await_completion32 args32;
1719 
1720 	if (copy_from_user(&args32, argp, sizeof(args32)))
1721 		return -EFAULT;
1722 
1723 	args = (struct vchiq_await_completion) {
1724 		.count		= args32.count,
1725 		.buf		= compat_ptr(args32.buf),
1726 		.msgbufsize	= args32.msgbufsize,
1727 		.msgbufcount	= args32.msgbufcount,
1728 		.msgbufs	= compat_ptr(args32.msgbufs),
1729 	};
1730 
1731 	return vchiq_ioc_await_completion(file->private_data, &args,
1732 					  &argp->msgbufcount);
1733 }
1734 
1735 struct vchiq_dequeue_message32 {
1736 	unsigned int handle;
1737 	int blocking;
1738 	unsigned int bufsize;
1739 	compat_uptr_t buf;
1740 };
1741 
1742 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1743 	_IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1744 
1745 static long
1746 vchiq_compat_ioctl_dequeue_message(struct file *file,
1747 				   unsigned int cmd,
1748 				   struct vchiq_dequeue_message32 __user *arg)
1749 {
1750 	struct vchiq_dequeue_message32 args32;
1751 	struct vchiq_dequeue_message args;
1752 
1753 	if (copy_from_user(&args32, arg, sizeof(args32)))
1754 		return -EFAULT;
1755 
1756 	args = (struct vchiq_dequeue_message) {
1757 		.handle		= args32.handle,
1758 		.blocking	= args32.blocking,
1759 		.bufsize	= args32.bufsize,
1760 		.buf		= compat_ptr(args32.buf),
1761 	};
1762 
1763 	return vchiq_ioc_dequeue_message(file->private_data, &args);
1764 }
1765 
1766 struct vchiq_get_config32 {
1767 	unsigned int config_size;
1768 	compat_uptr_t pconfig;
1769 };
1770 
1771 #define VCHIQ_IOC_GET_CONFIG32 \
1772 	_IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1773 
1774 static long
1775 vchiq_compat_ioctl_get_config(struct file *file,
1776 			      unsigned int cmd,
1777 			      struct vchiq_get_config32 __user *arg)
1778 {
1779 	struct vchiq_get_config32 args32;
1780 	struct vchiq_config config;
1781 	void __user *ptr;
1782 
1783 	if (copy_from_user(&args32, arg, sizeof(args32)))
1784 		return -EFAULT;
1785 	if (args32.config_size > sizeof(config))
1786 		return -EINVAL;
1787 
1788 	vchiq_get_config(&config);
1789 	ptr = compat_ptr(args32.pconfig);
1790 	if (copy_to_user(ptr, &config, args32.config_size))
1791 		return -EFAULT;
1792 
1793 	return 0;
1794 }
1795 
1796 static long
1797 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1798 {
1799 	void __user *argp = compat_ptr(arg);
1800 
1801 	switch (cmd) {
1802 	case VCHIQ_IOC_CREATE_SERVICE32:
1803 		return vchiq_compat_ioctl_create_service(file, cmd, argp);
1804 	case VCHIQ_IOC_QUEUE_MESSAGE32:
1805 		return vchiq_compat_ioctl_queue_message(file, cmd, argp);
1806 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1807 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1808 		return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
1809 	case VCHIQ_IOC_AWAIT_COMPLETION32:
1810 		return vchiq_compat_ioctl_await_completion(file, cmd, argp);
1811 	case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1812 		return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
1813 	case VCHIQ_IOC_GET_CONFIG32:
1814 		return vchiq_compat_ioctl_get_config(file, cmd, argp);
1815 	default:
1816 		return vchiq_ioctl(file, cmd, (unsigned long)argp);
1817 	}
1818 }
1819 
1820 #endif
1821 
1822 static int vchiq_open(struct inode *inode, struct file *file)
1823 {
1824 	struct vchiq_state *state = vchiq_get_state();
1825 	struct vchiq_instance *instance;
1826 
1827 	vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1828 
1829 	if (!state) {
1830 		vchiq_log_error(vchiq_arm_log_level,
1831 				"vchiq has no connection to VideoCore");
1832 		return -ENOTCONN;
1833 	}
1834 
1835 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1836 	if (!instance)
1837 		return -ENOMEM;
1838 
1839 	instance->state = state;
1840 	instance->pid = current->tgid;
1841 
1842 	vchiq_debugfs_add_instance(instance);
1843 
1844 	init_completion(&instance->insert_event);
1845 	init_completion(&instance->remove_event);
1846 	mutex_init(&instance->completion_mutex);
1847 	mutex_init(&instance->bulk_waiter_list_mutex);
1848 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
1849 
1850 	file->private_data = instance;
1851 
1852 	return 0;
1853 }
1854 
1855 static int vchiq_release(struct inode *inode, struct file *file)
1856 {
1857 	struct vchiq_instance *instance = file->private_data;
1858 	struct vchiq_state *state = vchiq_get_state();
1859 	struct vchiq_service *service;
1860 	int ret = 0;
1861 	int i;
1862 
1863 	vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1864 		       (unsigned long)instance);
1865 
1866 	if (!state) {
1867 		ret = -EPERM;
1868 		goto out;
1869 	}
1870 
1871 	/* Ensure videocore is awake to allow termination. */
1872 	vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1873 
1874 	mutex_lock(&instance->completion_mutex);
1875 
1876 	/* Wake the completion thread and ask it to exit */
1877 	instance->closing = 1;
1878 	complete(&instance->insert_event);
1879 
1880 	mutex_unlock(&instance->completion_mutex);
1881 
1882 	/* Wake the slot handler if the completion queue is full. */
1883 	complete(&instance->remove_event);
1884 
1885 	/* Mark all services for termination... */
1886 	i = 0;
1887 	while ((service = next_service_by_instance(state, instance, &i))) {
1888 		struct user_service *user_service = service->base.userdata;
1889 
1890 		/* Wake the slot handler if the msg queue is full. */
1891 		complete(&user_service->remove_event);
1892 
1893 		vchiq_terminate_service_internal(service);
1894 		vchiq_service_put(service);
1895 	}
1896 
1897 	/* ...and wait for them to die */
1898 	i = 0;
1899 	while ((service = next_service_by_instance(state, instance, &i))) {
1900 		struct user_service *user_service = service->base.userdata;
1901 
1902 		wait_for_completion(&service->remove_event);
1903 
1904 		if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1905 			vchiq_service_put(service);
1906 			break;
1907 		}
1908 
1909 		spin_lock(&msg_queue_spinlock);
1910 
1911 		while (user_service->msg_remove != user_service->msg_insert) {
1912 			struct vchiq_header *header;
1913 			int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
1914 
1915 			header = user_service->msg_queue[m];
1916 			user_service->msg_remove++;
1917 			spin_unlock(&msg_queue_spinlock);
1918 
1919 			if (header)
1920 				vchiq_release_message(service->handle, header);
1921 			spin_lock(&msg_queue_spinlock);
1922 		}
1923 
1924 		spin_unlock(&msg_queue_spinlock);
1925 
1926 		vchiq_service_put(service);
1927 	}
1928 
1929 	/* Release any closed services */
1930 	while (instance->completion_remove != instance->completion_insert) {
1931 		struct vchiq_completion_data_kernel *completion;
1932 		struct vchiq_service *service;
1933 
1934 		completion = &instance->completions[
1935 			instance->completion_remove & (MAX_COMPLETIONS - 1)];
1936 		service = completion->service_userdata;
1937 		if (completion->reason == VCHIQ_SERVICE_CLOSED) {
1938 			struct user_service *user_service =
1939 							service->base.userdata;
1940 
1941 			/* Wake any blocked user-thread */
1942 			if (instance->use_close_delivered)
1943 				complete(&user_service->close_event);
1944 			vchiq_service_put(service);
1945 		}
1946 		instance->completion_remove++;
1947 	}
1948 
1949 	/* Release the PEER service count. */
1950 	vchiq_release_internal(instance->state, NULL);
1951 
1952 	free_bulk_waiter(instance);
1953 
1954 	vchiq_debugfs_remove_instance(instance);
1955 
1956 	kfree(instance);
1957 	file->private_data = NULL;
1958 
1959 out:
1960 	return ret;
1961 }
1962 
1963 int vchiq_dump(void *dump_context, const char *str, int len)
1964 {
1965 	struct dump_context *context = (struct dump_context *)dump_context;
1966 	int copy_bytes;
1967 
1968 	if (context->actual >= context->space)
1969 		return 0;
1970 
1971 	if (context->offset > 0) {
1972 		int skip_bytes = min_t(int, len, context->offset);
1973 
1974 		str += skip_bytes;
1975 		len -= skip_bytes;
1976 		context->offset -= skip_bytes;
1977 		if (context->offset > 0)
1978 			return 0;
1979 	}
1980 	copy_bytes = min_t(int, len, context->space - context->actual);
1981 	if (copy_bytes == 0)
1982 		return 0;
1983 	if (copy_to_user(context->buf + context->actual, str,
1984 			 copy_bytes))
1985 		return -EFAULT;
1986 	context->actual += copy_bytes;
1987 	len -= copy_bytes;
1988 
1989 	/*
1990 	 * If the terminating NUL is included in the length, then it
1991 	 * marks the end of a line and should be replaced with a
1992 	 * carriage return.
1993 	 */
1994 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1995 		char cr = '\n';
1996 
1997 		if (copy_to_user(context->buf + context->actual - 1,
1998 				 &cr, 1))
1999 			return -EFAULT;
2000 	}
2001 	return 0;
2002 }
2003 
2004 int vchiq_dump_platform_instances(void *dump_context)
2005 {
2006 	struct vchiq_state *state = vchiq_get_state();
2007 	char buf[80];
2008 	int len;
2009 	int i;
2010 
2011 	/*
2012 	 * There is no list of instances, so instead scan all services,
2013 	 * marking those that have been dumped.
2014 	 */
2015 
2016 	rcu_read_lock();
2017 	for (i = 0; i < state->unused_service; i++) {
2018 		struct vchiq_service *service;
2019 		struct vchiq_instance *instance;
2020 
2021 		service = rcu_dereference(state->services[i]);
2022 		if (!service || service->base.callback != service_callback)
2023 			continue;
2024 
2025 		instance = service->instance;
2026 		if (instance)
2027 			instance->mark = 0;
2028 	}
2029 	rcu_read_unlock();
2030 
2031 	for (i = 0; i < state->unused_service; i++) {
2032 		struct vchiq_service *service;
2033 		struct vchiq_instance *instance;
2034 		int err;
2035 
2036 		rcu_read_lock();
2037 		service = rcu_dereference(state->services[i]);
2038 		if (!service || service->base.callback != service_callback) {
2039 			rcu_read_unlock();
2040 			continue;
2041 		}
2042 
2043 		instance = service->instance;
2044 		if (!instance || instance->mark) {
2045 			rcu_read_unlock();
2046 			continue;
2047 		}
2048 		rcu_read_unlock();
2049 
2050 		len = snprintf(buf, sizeof(buf),
2051 			       "Instance %pK: pid %d,%s completions %d/%d",
2052 			       instance, instance->pid,
2053 			       instance->connected ? " connected, " :
2054 			       "",
2055 			       instance->completion_insert -
2056 			       instance->completion_remove,
2057 			       MAX_COMPLETIONS);
2058 		err = vchiq_dump(dump_context, buf, len + 1);
2059 		if (err)
2060 			return err;
2061 		instance->mark = 1;
2062 	}
2063 	return 0;
2064 }
2065 
2066 int vchiq_dump_platform_service_state(void *dump_context,
2067 				      struct vchiq_service *service)
2068 {
2069 	struct user_service *user_service =
2070 			(struct user_service *)service->base.userdata;
2071 	char buf[80];
2072 	int len;
2073 
2074 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
2075 
2076 	if ((service->base.callback == service_callback) &&
2077 		user_service->is_vchi) {
2078 		len += scnprintf(buf + len, sizeof(buf) - len,
2079 			", %d/%d messages",
2080 			user_service->msg_insert - user_service->msg_remove,
2081 			MSG_QUEUE_SIZE);
2082 
2083 		if (user_service->dequeue_pending)
2084 			len += scnprintf(buf + len, sizeof(buf) - len,
2085 				" (dequeue pending)");
2086 	}
2087 
2088 	return vchiq_dump(dump_context, buf, len + 1);
2089 }
2090 
2091 static ssize_t
2092 vchiq_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2093 {
2094 	struct dump_context context;
2095 	int err;
2096 
2097 	context.buf = buf;
2098 	context.actual = 0;
2099 	context.space = count;
2100 	context.offset = *ppos;
2101 
2102 	err = vchiq_dump_state(&context, &g_state);
2103 	if (err)
2104 		return err;
2105 
2106 	*ppos += context.actual;
2107 
2108 	return context.actual;
2109 }
2110 
2111 struct vchiq_state *
2112 vchiq_get_state(void)
2113 {
2114 
2115 	if (!g_state.remote)
2116 		pr_err("%s: g_state.remote == NULL\n", __func__);
2117 	else if (g_state.remote->initialised != 1)
2118 		pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
2119 			  __func__, g_state.remote->initialised);
2120 
2121 	return (g_state.remote &&
2122 		(g_state.remote->initialised == 1)) ? &g_state : NULL;
2123 }
2124 
2125 static const struct file_operations
2126 vchiq_fops = {
2127 	.owner = THIS_MODULE,
2128 	.unlocked_ioctl = vchiq_ioctl,
2129 #if defined(CONFIG_COMPAT)
2130 	.compat_ioctl = vchiq_compat_ioctl,
2131 #endif
2132 	.open = vchiq_open,
2133 	.release = vchiq_release,
2134 	.read = vchiq_read
2135 };
2136 
2137 /*
2138  * Autosuspend related functionality
2139  */
2140 
2141 static enum vchiq_status
2142 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
2143 			       struct vchiq_header *header,
2144 			       unsigned int service_user, void *bulk_user)
2145 {
2146 	vchiq_log_error(vchiq_susp_log_level,
2147 		"%s callback reason %d", __func__, reason);
2148 	return 0;
2149 }
2150 
2151 static int
2152 vchiq_keepalive_thread_func(void *v)
2153 {
2154 	struct vchiq_state *state = (struct vchiq_state *)v;
2155 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2156 
2157 	enum vchiq_status status;
2158 	struct vchiq_instance *instance;
2159 	unsigned int ka_handle;
2160 	int ret;
2161 
2162 	struct vchiq_service_params_kernel params = {
2163 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2164 		.callback    = vchiq_keepalive_vchiq_callback,
2165 		.version     = KEEPALIVE_VER,
2166 		.version_min = KEEPALIVE_VER_MIN
2167 	};
2168 
2169 	ret = vchiq_initialise(&instance);
2170 	if (ret) {
2171 		vchiq_log_error(vchiq_susp_log_level,
2172 			"%s vchiq_initialise failed %d", __func__, ret);
2173 		goto exit;
2174 	}
2175 
2176 	status = vchiq_connect(instance);
2177 	if (status != VCHIQ_SUCCESS) {
2178 		vchiq_log_error(vchiq_susp_log_level,
2179 			"%s vchiq_connect failed %d", __func__, status);
2180 		goto shutdown;
2181 	}
2182 
2183 	status = vchiq_add_service(instance, &params, &ka_handle);
2184 	if (status != VCHIQ_SUCCESS) {
2185 		vchiq_log_error(vchiq_susp_log_level,
2186 			"%s vchiq_open_service failed %d", __func__, status);
2187 		goto shutdown;
2188 	}
2189 
2190 	while (1) {
2191 		long rc = 0, uc = 0;
2192 
2193 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
2194 			vchiq_log_error(vchiq_susp_log_level,
2195 				"%s interrupted", __func__);
2196 			flush_signals(current);
2197 			continue;
2198 		}
2199 
2200 		/*
2201 		 * read and clear counters.  Do release_count then use_count to
2202 		 * prevent getting more releases than uses
2203 		 */
2204 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
2205 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
2206 
2207 		/*
2208 		 * Call use/release service the requisite number of times.
2209 		 * Process use before release so use counts don't go negative
2210 		 */
2211 		while (uc--) {
2212 			atomic_inc(&arm_state->ka_use_ack_count);
2213 			status = vchiq_use_service(ka_handle);
2214 			if (status != VCHIQ_SUCCESS) {
2215 				vchiq_log_error(vchiq_susp_log_level,
2216 					"%s vchiq_use_service error %d",
2217 					__func__, status);
2218 			}
2219 		}
2220 		while (rc--) {
2221 			status = vchiq_release_service(ka_handle);
2222 			if (status != VCHIQ_SUCCESS) {
2223 				vchiq_log_error(vchiq_susp_log_level,
2224 					"%s vchiq_release_service error %d",
2225 					__func__, status);
2226 			}
2227 		}
2228 	}
2229 
2230 shutdown:
2231 	vchiq_shutdown(instance);
2232 exit:
2233 	return 0;
2234 }
2235 
2236 void
2237 vchiq_arm_init_state(struct vchiq_state *state,
2238 		     struct vchiq_arm_state *arm_state)
2239 {
2240 	if (arm_state) {
2241 		rwlock_init(&arm_state->susp_res_lock);
2242 
2243 		init_completion(&arm_state->ka_evt);
2244 		atomic_set(&arm_state->ka_use_count, 0);
2245 		atomic_set(&arm_state->ka_use_ack_count, 0);
2246 		atomic_set(&arm_state->ka_release_count, 0);
2247 
2248 		arm_state->state = state;
2249 		arm_state->first_connect = 0;
2250 
2251 	}
2252 }
2253 
2254 int
2255 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
2256 		   enum USE_TYPE_E use_type)
2257 {
2258 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2259 	int ret = 0;
2260 	char entity[16];
2261 	int *entity_uc;
2262 	int local_uc;
2263 
2264 	if (!arm_state) {
2265 		ret = -EINVAL;
2266 		goto out;
2267 	}
2268 
2269 	if (use_type == USE_TYPE_VCHIQ) {
2270 		sprintf(entity, "VCHIQ:   ");
2271 		entity_uc = &arm_state->peer_use_count;
2272 	} else if (service) {
2273 		sprintf(entity, "%c%c%c%c:%03d",
2274 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2275 			service->client_id);
2276 		entity_uc = &service->service_use_count;
2277 	} else {
2278 		vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
2279 		ret = -EINVAL;
2280 		goto out;
2281 	}
2282 
2283 	write_lock_bh(&arm_state->susp_res_lock);
2284 	local_uc = ++arm_state->videocore_use_count;
2285 	++(*entity_uc);
2286 
2287 	vchiq_log_trace(vchiq_susp_log_level,
2288 		"%s %s count %d, state count %d",
2289 		__func__, entity, *entity_uc, local_uc);
2290 
2291 	write_unlock_bh(&arm_state->susp_res_lock);
2292 
2293 	if (!ret) {
2294 		enum vchiq_status status = VCHIQ_SUCCESS;
2295 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2296 
2297 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2298 			/* Send the use notify to videocore */
2299 			status = vchiq_send_remote_use_active(state);
2300 			if (status == VCHIQ_SUCCESS)
2301 				ack_cnt--;
2302 			else
2303 				atomic_add(ack_cnt,
2304 					&arm_state->ka_use_ack_count);
2305 		}
2306 	}
2307 
2308 out:
2309 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2310 	return ret;
2311 }
2312 
2313 int
2314 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
2315 {
2316 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2317 	int ret = 0;
2318 	char entity[16];
2319 	int *entity_uc;
2320 
2321 	if (!arm_state) {
2322 		ret = -EINVAL;
2323 		goto out;
2324 	}
2325 
2326 	if (service) {
2327 		sprintf(entity, "%c%c%c%c:%03d",
2328 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2329 			service->client_id);
2330 		entity_uc = &service->service_use_count;
2331 	} else {
2332 		sprintf(entity, "PEER:   ");
2333 		entity_uc = &arm_state->peer_use_count;
2334 	}
2335 
2336 	write_lock_bh(&arm_state->susp_res_lock);
2337 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
2338 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
2339 		WARN_ON(!arm_state->videocore_use_count);
2340 		WARN_ON(!(*entity_uc));
2341 		ret = -EINVAL;
2342 		goto unlock;
2343 	}
2344 	--arm_state->videocore_use_count;
2345 	--(*entity_uc);
2346 
2347 	vchiq_log_trace(vchiq_susp_log_level,
2348 		"%s %s count %d, state count %d",
2349 		__func__, entity, *entity_uc,
2350 		arm_state->videocore_use_count);
2351 
2352 unlock:
2353 	write_unlock_bh(&arm_state->susp_res_lock);
2354 
2355 out:
2356 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2357 	return ret;
2358 }
2359 
2360 void
2361 vchiq_on_remote_use(struct vchiq_state *state)
2362 {
2363 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2364 
2365 	atomic_inc(&arm_state->ka_use_count);
2366 	complete(&arm_state->ka_evt);
2367 }
2368 
2369 void
2370 vchiq_on_remote_release(struct vchiq_state *state)
2371 {
2372 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2373 
2374 	atomic_inc(&arm_state->ka_release_count);
2375 	complete(&arm_state->ka_evt);
2376 }
2377 
2378 int
2379 vchiq_use_service_internal(struct vchiq_service *service)
2380 {
2381 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2382 }
2383 
2384 int
2385 vchiq_release_service_internal(struct vchiq_service *service)
2386 {
2387 	return vchiq_release_internal(service->state, service);
2388 }
2389 
2390 struct vchiq_debugfs_node *
2391 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
2392 {
2393 	return &instance->debugfs_node;
2394 }
2395 
2396 int
2397 vchiq_instance_get_use_count(struct vchiq_instance *instance)
2398 {
2399 	struct vchiq_service *service;
2400 	int use_count = 0, i;
2401 
2402 	i = 0;
2403 	rcu_read_lock();
2404 	while ((service = __next_service_by_instance(instance->state,
2405 						     instance, &i)))
2406 		use_count += service->service_use_count;
2407 	rcu_read_unlock();
2408 	return use_count;
2409 }
2410 
2411 int
2412 vchiq_instance_get_pid(struct vchiq_instance *instance)
2413 {
2414 	return instance->pid;
2415 }
2416 
2417 int
2418 vchiq_instance_get_trace(struct vchiq_instance *instance)
2419 {
2420 	return instance->trace;
2421 }
2422 
2423 void
2424 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
2425 {
2426 	struct vchiq_service *service;
2427 	int i;
2428 
2429 	i = 0;
2430 	rcu_read_lock();
2431 	while ((service = __next_service_by_instance(instance->state,
2432 						     instance, &i)))
2433 		service->trace = trace;
2434 	rcu_read_unlock();
2435 	instance->trace = (trace != 0);
2436 }
2437 
2438 enum vchiq_status
2439 vchiq_use_service(unsigned int handle)
2440 {
2441 	enum vchiq_status ret = VCHIQ_ERROR;
2442 	struct vchiq_service *service = find_service_by_handle(handle);
2443 
2444 	if (service) {
2445 		ret = vchiq_use_internal(service->state, service,
2446 				USE_TYPE_SERVICE);
2447 		vchiq_service_put(service);
2448 	}
2449 	return ret;
2450 }
2451 EXPORT_SYMBOL(vchiq_use_service);
2452 
2453 enum vchiq_status
2454 vchiq_release_service(unsigned int handle)
2455 {
2456 	enum vchiq_status ret = VCHIQ_ERROR;
2457 	struct vchiq_service *service = find_service_by_handle(handle);
2458 
2459 	if (service) {
2460 		ret = vchiq_release_internal(service->state, service);
2461 		vchiq_service_put(service);
2462 	}
2463 	return ret;
2464 }
2465 EXPORT_SYMBOL(vchiq_release_service);
2466 
2467 struct service_data_struct {
2468 	int fourcc;
2469 	int clientid;
2470 	int use_count;
2471 };
2472 
2473 void
2474 vchiq_dump_service_use_state(struct vchiq_state *state)
2475 {
2476 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2477 	struct service_data_struct *service_data;
2478 	int i, found = 0;
2479 	/*
2480 	 * If there's more than 64 services, only dump ones with
2481 	 * non-zero counts
2482 	 */
2483 	int only_nonzero = 0;
2484 	static const char *nz = "<-- preventing suspend";
2485 
2486 	int peer_count;
2487 	int vc_use_count;
2488 	int active_services;
2489 
2490 	if (!arm_state)
2491 		return;
2492 
2493 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
2494 				     GFP_KERNEL);
2495 	if (!service_data)
2496 		return;
2497 
2498 	read_lock_bh(&arm_state->susp_res_lock);
2499 	peer_count = arm_state->peer_use_count;
2500 	vc_use_count = arm_state->videocore_use_count;
2501 	active_services = state->unused_service;
2502 	if (active_services > MAX_SERVICES)
2503 		only_nonzero = 1;
2504 
2505 	rcu_read_lock();
2506 	for (i = 0; i < active_services; i++) {
2507 		struct vchiq_service *service_ptr =
2508 			rcu_dereference(state->services[i]);
2509 
2510 		if (!service_ptr)
2511 			continue;
2512 
2513 		if (only_nonzero && !service_ptr->service_use_count)
2514 			continue;
2515 
2516 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
2517 			continue;
2518 
2519 		service_data[found].fourcc = service_ptr->base.fourcc;
2520 		service_data[found].clientid = service_ptr->client_id;
2521 		service_data[found].use_count = service_ptr->service_use_count;
2522 		found++;
2523 		if (found >= MAX_SERVICES)
2524 			break;
2525 	}
2526 	rcu_read_unlock();
2527 
2528 	read_unlock_bh(&arm_state->susp_res_lock);
2529 
2530 	if (only_nonzero)
2531 		vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2532 			"services (%d).  Only dumping up to first %d services "
2533 			"with non-zero use-count", active_services, found);
2534 
2535 	for (i = 0; i < found; i++) {
2536 		vchiq_log_warning(vchiq_susp_log_level,
2537 			"----- %c%c%c%c:%d service count %d %s",
2538 			VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2539 			service_data[i].clientid,
2540 			service_data[i].use_count,
2541 			service_data[i].use_count ? nz : "");
2542 	}
2543 	vchiq_log_warning(vchiq_susp_log_level,
2544 		"----- VCHIQ use count count %d", peer_count);
2545 	vchiq_log_warning(vchiq_susp_log_level,
2546 		"--- Overall vchiq instance use count %d", vc_use_count);
2547 
2548 	kfree(service_data);
2549 }
2550 
2551 enum vchiq_status
2552 vchiq_check_service(struct vchiq_service *service)
2553 {
2554 	struct vchiq_arm_state *arm_state;
2555 	enum vchiq_status ret = VCHIQ_ERROR;
2556 
2557 	if (!service || !service->state)
2558 		goto out;
2559 
2560 	arm_state = vchiq_platform_get_arm_state(service->state);
2561 
2562 	read_lock_bh(&arm_state->susp_res_lock);
2563 	if (service->service_use_count)
2564 		ret = VCHIQ_SUCCESS;
2565 	read_unlock_bh(&arm_state->susp_res_lock);
2566 
2567 	if (ret == VCHIQ_ERROR) {
2568 		vchiq_log_error(vchiq_susp_log_level,
2569 			"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
2570 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2571 			service->client_id, service->service_use_count,
2572 			arm_state->videocore_use_count);
2573 		vchiq_dump_service_use_state(service->state);
2574 	}
2575 out:
2576 	return ret;
2577 }
2578 
2579 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
2580 				       enum vchiq_connstate oldstate,
2581 				       enum vchiq_connstate newstate)
2582 {
2583 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2584 	char threadname[16];
2585 
2586 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2587 		get_conn_state_name(oldstate), get_conn_state_name(newstate));
2588 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
2589 		return;
2590 
2591 	write_lock_bh(&arm_state->susp_res_lock);
2592 	if (arm_state->first_connect) {
2593 		write_unlock_bh(&arm_state->susp_res_lock);
2594 		return;
2595 	}
2596 
2597 	arm_state->first_connect = 1;
2598 	write_unlock_bh(&arm_state->susp_res_lock);
2599 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
2600 		 state->id);
2601 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
2602 					      (void *)state,
2603 					      threadname);
2604 	if (IS_ERR(arm_state->ka_thread)) {
2605 		vchiq_log_error(vchiq_susp_log_level,
2606 				"vchiq: FATAL: couldn't create thread %s",
2607 				threadname);
2608 	} else {
2609 		wake_up_process(arm_state->ka_thread);
2610 	}
2611 }
2612 
2613 static const struct of_device_id vchiq_of_match[] = {
2614 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
2615 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
2616 	{},
2617 };
2618 MODULE_DEVICE_TABLE(of, vchiq_of_match);
2619 
2620 static struct platform_device *
2621 vchiq_register_child(struct platform_device *pdev, const char *name)
2622 {
2623 	struct platform_device_info pdevinfo;
2624 	struct platform_device *child;
2625 
2626 	memset(&pdevinfo, 0, sizeof(pdevinfo));
2627 
2628 	pdevinfo.parent = &pdev->dev;
2629 	pdevinfo.name = name;
2630 	pdevinfo.id = PLATFORM_DEVID_NONE;
2631 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
2632 
2633 	child = platform_device_register_full(&pdevinfo);
2634 	if (IS_ERR(child)) {
2635 		dev_warn(&pdev->dev, "%s not registered\n", name);
2636 		child = NULL;
2637 	}
2638 
2639 	return child;
2640 }
2641 
2642 static int vchiq_probe(struct platform_device *pdev)
2643 {
2644 	struct device_node *fw_node;
2645 	const struct of_device_id *of_id;
2646 	struct vchiq_drvdata *drvdata;
2647 	struct device *vchiq_dev;
2648 	int err;
2649 
2650 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
2651 	drvdata = (struct vchiq_drvdata *)of_id->data;
2652 	if (!drvdata)
2653 		return -EINVAL;
2654 
2655 	fw_node = of_find_compatible_node(NULL, NULL,
2656 					  "raspberrypi,bcm2835-firmware");
2657 	if (!fw_node) {
2658 		dev_err(&pdev->dev, "Missing firmware node\n");
2659 		return -ENOENT;
2660 	}
2661 
2662 	drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
2663 	of_node_put(fw_node);
2664 	if (!drvdata->fw)
2665 		return -EPROBE_DEFER;
2666 
2667 	platform_set_drvdata(pdev, drvdata);
2668 
2669 	err = vchiq_platform_init(pdev, &g_state);
2670 	if (err)
2671 		goto failed_platform_init;
2672 
2673 	cdev_init(&vchiq_cdev, &vchiq_fops);
2674 	vchiq_cdev.owner = THIS_MODULE;
2675 	err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2676 	if (err) {
2677 		vchiq_log_error(vchiq_arm_log_level,
2678 			"Unable to register device");
2679 		goto failed_platform_init;
2680 	}
2681 
2682 	vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
2683 				  "vchiq");
2684 	if (IS_ERR(vchiq_dev)) {
2685 		err = PTR_ERR(vchiq_dev);
2686 		goto failed_device_create;
2687 	}
2688 
2689 	vchiq_debugfs_init();
2690 
2691 	vchiq_log_info(vchiq_arm_log_level,
2692 		"vchiq: initialised - version %d (min %d), device %d.%d",
2693 		VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2694 		MAJOR(vchiq_devid), MINOR(vchiq_devid));
2695 
2696 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
2697 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
2698 
2699 	return 0;
2700 
2701 failed_device_create:
2702 	cdev_del(&vchiq_cdev);
2703 failed_platform_init:
2704 	vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2705 	return err;
2706 }
2707 
2708 static int vchiq_remove(struct platform_device *pdev)
2709 {
2710 	platform_device_unregister(bcm2835_audio);
2711 	platform_device_unregister(bcm2835_camera);
2712 	vchiq_debugfs_deinit();
2713 	device_destroy(vchiq_class, vchiq_devid);
2714 	cdev_del(&vchiq_cdev);
2715 
2716 	return 0;
2717 }
2718 
2719 static struct platform_driver vchiq_driver = {
2720 	.driver = {
2721 		.name = "bcm2835_vchiq",
2722 		.of_match_table = vchiq_of_match,
2723 	},
2724 	.probe = vchiq_probe,
2725 	.remove = vchiq_remove,
2726 };
2727 
2728 static int __init vchiq_driver_init(void)
2729 {
2730 	int ret;
2731 
2732 	vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2733 	if (IS_ERR(vchiq_class)) {
2734 		pr_err("Failed to create vchiq class\n");
2735 		return PTR_ERR(vchiq_class);
2736 	}
2737 
2738 	ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
2739 	if (ret) {
2740 		pr_err("Failed to allocate vchiq's chrdev region\n");
2741 		goto class_destroy;
2742 	}
2743 
2744 	ret = platform_driver_register(&vchiq_driver);
2745 	if (ret) {
2746 		pr_err("Failed to register vchiq driver\n");
2747 		goto region_unregister;
2748 	}
2749 
2750 	return 0;
2751 
2752 region_unregister:
2753 	unregister_chrdev_region(vchiq_devid, 1);
2754 
2755 class_destroy:
2756 	class_destroy(vchiq_class);
2757 
2758 	return ret;
2759 }
2760 module_init(vchiq_driver_init);
2761 
2762 static void __exit vchiq_driver_exit(void)
2763 {
2764 	platform_driver_unregister(&vchiq_driver);
2765 	unregister_chrdev_region(vchiq_devid, 1);
2766 	class_destroy(vchiq_class);
2767 }
2768 module_exit(vchiq_driver_exit);
2769 
2770 MODULE_LICENSE("Dual BSD/GPL");
2771 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2772 MODULE_AUTHOR("Broadcom Corporation");
2773