1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5 */
6
7 #include <linux/cdev.h>
8 #include <linux/fs.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/compat.h>
12 #include <linux/miscdevice.h>
13
14 #include "vchiq_core.h"
15 #include "vchiq_ioctl.h"
16 #include "vchiq_arm.h"
17 #include "vchiq_debugfs.h"
18
19 static const char *const ioctl_names[] = {
20 "CONNECT",
21 "SHUTDOWN",
22 "CREATE_SERVICE",
23 "REMOVE_SERVICE",
24 "QUEUE_MESSAGE",
25 "QUEUE_BULK_TRANSMIT",
26 "QUEUE_BULK_RECEIVE",
27 "AWAIT_COMPLETION",
28 "DEQUEUE_MESSAGE",
29 "GET_CLIENT_ID",
30 "GET_CONFIG",
31 "CLOSE_SERVICE",
32 "USE_SERVICE",
33 "RELEASE_SERVICE",
34 "SET_SERVICE_OPTION",
35 "DUMP_PHYS_MEM",
36 "LIB_VERSION",
37 "CLOSE_DELIVERED"
38 };
39
40 static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1));
41
42 static void
user_service_free(void * userdata)43 user_service_free(void *userdata)
44 {
45 kfree(userdata);
46 }
47
close_delivered(struct user_service * user_service)48 static void close_delivered(struct user_service *user_service)
49 {
50 vchiq_log_info(vchiq_arm_log_level,
51 "%s(handle=%x)",
52 __func__, user_service->service->handle);
53
54 if (user_service->close_pending) {
55 /* Allow the underlying service to be culled */
56 vchiq_service_put(user_service->service);
57
58 /* Wake the user-thread blocked in close_ or remove_service */
59 complete(&user_service->close_event);
60
61 user_service->close_pending = 0;
62 }
63 }
64
65 struct vchiq_io_copy_callback_context {
66 struct vchiq_element *element;
67 size_t element_offset;
68 unsigned long elements_to_go;
69 };
70
vchiq_ioc_copy_element_data(void * context,void * dest,size_t offset,size_t maxsize)71 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
72 size_t offset, size_t maxsize)
73 {
74 struct vchiq_io_copy_callback_context *cc = context;
75 size_t total_bytes_copied = 0;
76 size_t bytes_this_round;
77
78 while (total_bytes_copied < maxsize) {
79 if (!cc->elements_to_go)
80 return total_bytes_copied;
81
82 if (!cc->element->size) {
83 cc->elements_to_go--;
84 cc->element++;
85 cc->element_offset = 0;
86 continue;
87 }
88
89 bytes_this_round = min(cc->element->size - cc->element_offset,
90 maxsize - total_bytes_copied);
91
92 if (copy_from_user(dest + total_bytes_copied,
93 cc->element->data + cc->element_offset,
94 bytes_this_round))
95 return -EFAULT;
96
97 cc->element_offset += bytes_this_round;
98 total_bytes_copied += bytes_this_round;
99
100 if (cc->element_offset == cc->element->size) {
101 cc->elements_to_go--;
102 cc->element++;
103 cc->element_offset = 0;
104 }
105 }
106
107 return maxsize;
108 }
109
110 static int
vchiq_ioc_queue_message(struct vchiq_instance * instance,unsigned int handle,struct vchiq_element * elements,unsigned long count)111 vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle,
112 struct vchiq_element *elements, unsigned long count)
113 {
114 struct vchiq_io_copy_callback_context context;
115 int status = 0;
116 unsigned long i;
117 size_t total_size = 0;
118
119 context.element = elements;
120 context.element_offset = 0;
121 context.elements_to_go = count;
122
123 for (i = 0; i < count; i++) {
124 if (!elements[i].data && elements[i].size != 0)
125 return -EFAULT;
126
127 total_size += elements[i].size;
128 }
129
130 status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data,
131 &context, total_size);
132
133 if (status == -EINVAL)
134 return -EIO;
135 else if (status == -EAGAIN)
136 return -EINTR;
137 return 0;
138 }
139
vchiq_ioc_create_service(struct vchiq_instance * instance,struct vchiq_create_service * args)140 static int vchiq_ioc_create_service(struct vchiq_instance *instance,
141 struct vchiq_create_service *args)
142 {
143 struct user_service *user_service = NULL;
144 struct vchiq_service *service;
145 int status = 0;
146 struct vchiq_service_params_kernel params;
147 int srvstate;
148
149 if (args->is_open && !instance->connected)
150 return -ENOTCONN;
151
152 user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
153 if (!user_service)
154 return -ENOMEM;
155
156 if (args->is_open) {
157 srvstate = VCHIQ_SRVSTATE_OPENING;
158 } else {
159 srvstate = instance->connected ?
160 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
161 }
162
163 params = (struct vchiq_service_params_kernel) {
164 .fourcc = args->params.fourcc,
165 .callback = service_callback,
166 .userdata = user_service,
167 .version = args->params.version,
168 .version_min = args->params.version_min,
169 };
170 service = vchiq_add_service_internal(instance->state, ¶ms,
171 srvstate, instance,
172 user_service_free);
173 if (!service) {
174 kfree(user_service);
175 return -EEXIST;
176 }
177
178 user_service->service = service;
179 user_service->userdata = args->params.userdata;
180 user_service->instance = instance;
181 user_service->is_vchi = (args->is_vchi != 0);
182 user_service->dequeue_pending = 0;
183 user_service->close_pending = 0;
184 user_service->message_available_pos = instance->completion_remove - 1;
185 user_service->msg_insert = 0;
186 user_service->msg_remove = 0;
187 init_completion(&user_service->insert_event);
188 init_completion(&user_service->remove_event);
189 init_completion(&user_service->close_event);
190
191 if (args->is_open) {
192 status = vchiq_open_service_internal(service, instance->pid);
193 if (status) {
194 vchiq_remove_service(instance, service->handle);
195 return (status == -EAGAIN) ?
196 -EINTR : -EIO;
197 }
198 }
199 args->handle = service->handle;
200
201 return 0;
202 }
203
vchiq_ioc_dequeue_message(struct vchiq_instance * instance,struct vchiq_dequeue_message * args)204 static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
205 struct vchiq_dequeue_message *args)
206 {
207 struct user_service *user_service;
208 struct vchiq_service *service;
209 struct vchiq_header *header;
210 int ret;
211
212 DEBUG_INITIALISE(g_state.local);
213 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
214 service = find_service_for_instance(instance, args->handle);
215 if (!service)
216 return -EINVAL;
217
218 user_service = (struct user_service *)service->base.userdata;
219 if (user_service->is_vchi == 0) {
220 ret = -EINVAL;
221 goto out;
222 }
223
224 spin_lock(&msg_queue_spinlock);
225 if (user_service->msg_remove == user_service->msg_insert) {
226 if (!args->blocking) {
227 spin_unlock(&msg_queue_spinlock);
228 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
229 ret = -EWOULDBLOCK;
230 goto out;
231 }
232 user_service->dequeue_pending = 1;
233 ret = 0;
234 do {
235 spin_unlock(&msg_queue_spinlock);
236 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
237 if (wait_for_completion_interruptible(&user_service->insert_event)) {
238 vchiq_log_info(vchiq_arm_log_level,
239 "DEQUEUE_MESSAGE interrupted");
240 ret = -EINTR;
241 break;
242 }
243 spin_lock(&msg_queue_spinlock);
244 } while (user_service->msg_remove == user_service->msg_insert);
245
246 if (ret)
247 goto out;
248 }
249
250 if (WARN_ON_ONCE((int)(user_service->msg_insert -
251 user_service->msg_remove) < 0)) {
252 spin_unlock(&msg_queue_spinlock);
253 ret = -EINVAL;
254 goto out;
255 }
256
257 header = user_service->msg_queue[user_service->msg_remove &
258 (MSG_QUEUE_SIZE - 1)];
259 user_service->msg_remove++;
260 spin_unlock(&msg_queue_spinlock);
261
262 complete(&user_service->remove_event);
263 if (!header) {
264 ret = -ENOTCONN;
265 } else if (header->size <= args->bufsize) {
266 /* Copy to user space if msgbuf is not NULL */
267 if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) {
268 ret = header->size;
269 vchiq_release_message(instance, service->handle, header);
270 } else {
271 ret = -EFAULT;
272 }
273 } else {
274 vchiq_log_error(vchiq_arm_log_level,
275 "header %pK: bufsize %x < size %x",
276 header, args->bufsize, header->size);
277 WARN(1, "invalid size\n");
278 ret = -EMSGSIZE;
279 }
280 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
281 out:
282 vchiq_service_put(service);
283 return ret;
284 }
285
vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance * instance,struct vchiq_queue_bulk_transfer * args,enum vchiq_bulk_dir dir,enum vchiq_bulk_mode __user * mode)286 static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
287 struct vchiq_queue_bulk_transfer *args,
288 enum vchiq_bulk_dir dir,
289 enum vchiq_bulk_mode __user *mode)
290 {
291 struct vchiq_service *service;
292 struct bulk_waiter_node *waiter = NULL, *iter;
293 void *userdata;
294 int status = 0;
295 int ret;
296
297 service = find_service_for_instance(instance, args->handle);
298 if (!service)
299 return -EINVAL;
300
301 if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
302 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
303 if (!waiter) {
304 ret = -ENOMEM;
305 goto out;
306 }
307
308 userdata = &waiter->bulk_waiter;
309 } else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
310 mutex_lock(&instance->bulk_waiter_list_mutex);
311 list_for_each_entry(iter, &instance->bulk_waiter_list,
312 list) {
313 if (iter->pid == current->pid) {
314 list_del(&iter->list);
315 waiter = iter;
316 break;
317 }
318 }
319 mutex_unlock(&instance->bulk_waiter_list_mutex);
320 if (!waiter) {
321 vchiq_log_error(vchiq_arm_log_level,
322 "no bulk_waiter found for pid %d", current->pid);
323 ret = -ESRCH;
324 goto out;
325 }
326 vchiq_log_info(vchiq_arm_log_level,
327 "found bulk_waiter %pK for pid %d", waiter, current->pid);
328 userdata = &waiter->bulk_waiter;
329 } else {
330 userdata = args->userdata;
331 }
332
333 status = vchiq_bulk_transfer(instance, args->handle, NULL, args->data, args->size,
334 userdata, args->mode, dir);
335
336 if (!waiter) {
337 ret = 0;
338 goto out;
339 }
340
341 if ((status != -EAGAIN) || fatal_signal_pending(current) ||
342 !waiter->bulk_waiter.bulk) {
343 if (waiter->bulk_waiter.bulk) {
344 /* Cancel the signal when the transfer completes. */
345 spin_lock(&bulk_waiter_spinlock);
346 waiter->bulk_waiter.bulk->userdata = NULL;
347 spin_unlock(&bulk_waiter_spinlock);
348 }
349 kfree(waiter);
350 ret = 0;
351 } else {
352 const enum vchiq_bulk_mode mode_waiting =
353 VCHIQ_BULK_MODE_WAITING;
354 waiter->pid = current->pid;
355 mutex_lock(&instance->bulk_waiter_list_mutex);
356 list_add(&waiter->list, &instance->bulk_waiter_list);
357 mutex_unlock(&instance->bulk_waiter_list_mutex);
358 vchiq_log_info(vchiq_arm_log_level,
359 "saved bulk_waiter %pK for pid %d", waiter, current->pid);
360
361 ret = put_user(mode_waiting, mode);
362 }
363 out:
364 vchiq_service_put(service);
365 if (ret)
366 return ret;
367 else if (status == -EINVAL)
368 return -EIO;
369 else if (status == -EAGAIN)
370 return -EINTR;
371 return 0;
372 }
373
374 /* read a user pointer value from an array pointers in user space */
vchiq_get_user_ptr(void __user ** buf,void __user * ubuf,int index)375 static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
376 {
377 int ret;
378
379 if (in_compat_syscall()) {
380 compat_uptr_t ptr32;
381 compat_uptr_t __user *uptr = ubuf;
382
383 ret = get_user(ptr32, uptr + index);
384 if (ret)
385 return ret;
386
387 *buf = compat_ptr(ptr32);
388 } else {
389 uintptr_t ptr, __user *uptr = ubuf;
390
391 ret = get_user(ptr, uptr + index);
392
393 if (ret)
394 return ret;
395
396 *buf = (void __user *)ptr;
397 }
398
399 return 0;
400 }
401
402 struct vchiq_completion_data32 {
403 enum vchiq_reason reason;
404 compat_uptr_t header;
405 compat_uptr_t service_userdata;
406 compat_uptr_t bulk_userdata;
407 };
408
vchiq_put_completion(struct vchiq_completion_data __user * buf,struct vchiq_completion_data * completion,int index)409 static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
410 struct vchiq_completion_data *completion,
411 int index)
412 {
413 struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
414
415 if (in_compat_syscall()) {
416 struct vchiq_completion_data32 tmp = {
417 .reason = completion->reason,
418 .header = ptr_to_compat(completion->header),
419 .service_userdata = ptr_to_compat(completion->service_userdata),
420 .bulk_userdata = ptr_to_compat(completion->bulk_userdata),
421 };
422 if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
423 return -EFAULT;
424 } else {
425 if (copy_to_user(&buf[index], completion, sizeof(*completion)))
426 return -EFAULT;
427 }
428
429 return 0;
430 }
431
vchiq_ioc_await_completion(struct vchiq_instance * instance,struct vchiq_await_completion * args,int __user * msgbufcountp)432 static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
433 struct vchiq_await_completion *args,
434 int __user *msgbufcountp)
435 {
436 int msgbufcount;
437 int remove;
438 int ret;
439
440 DEBUG_INITIALISE(g_state.local);
441
442 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
443 if (!instance->connected)
444 return -ENOTCONN;
445
446 mutex_lock(&instance->completion_mutex);
447
448 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
449 while ((instance->completion_remove == instance->completion_insert) && !instance->closing) {
450 int rc;
451
452 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
453 mutex_unlock(&instance->completion_mutex);
454 rc = wait_for_completion_interruptible(&instance->insert_event);
455 mutex_lock(&instance->completion_mutex);
456 if (rc) {
457 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
458 vchiq_log_info(vchiq_arm_log_level,
459 "AWAIT_COMPLETION interrupted");
460 ret = -EINTR;
461 goto out;
462 }
463 }
464 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
465
466 msgbufcount = args->msgbufcount;
467 remove = instance->completion_remove;
468
469 for (ret = 0; ret < args->count; ret++) {
470 struct vchiq_completion_data_kernel *completion;
471 struct vchiq_completion_data user_completion;
472 struct vchiq_service *service;
473 struct user_service *user_service;
474 struct vchiq_header *header;
475
476 if (remove == instance->completion_insert)
477 break;
478
479 completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)];
480
481 /*
482 * A read memory barrier is needed to stop
483 * prefetch of a stale completion record
484 */
485 rmb();
486
487 service = completion->service_userdata;
488 user_service = service->base.userdata;
489
490 memset(&user_completion, 0, sizeof(user_completion));
491 user_completion = (struct vchiq_completion_data) {
492 .reason = completion->reason,
493 .service_userdata = user_service->userdata,
494 };
495
496 header = completion->header;
497 if (header) {
498 void __user *msgbuf;
499 int msglen;
500
501 msglen = header->size + sizeof(struct vchiq_header);
502 /* This must be a VCHIQ-style service */
503 if (args->msgbufsize < msglen) {
504 vchiq_log_error(vchiq_arm_log_level,
505 "header %pK: msgbufsize %x < msglen %x",
506 header, args->msgbufsize, msglen);
507 WARN(1, "invalid message size\n");
508 if (ret == 0)
509 ret = -EMSGSIZE;
510 break;
511 }
512 if (msgbufcount <= 0)
513 /* Stall here for lack of a buffer for the message. */
514 break;
515 /* Get the pointer from user space */
516 msgbufcount--;
517 if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
518 msgbufcount)) {
519 if (ret == 0)
520 ret = -EFAULT;
521 break;
522 }
523
524 /* Copy the message to user space */
525 if (copy_to_user(msgbuf, header, msglen)) {
526 if (ret == 0)
527 ret = -EFAULT;
528 break;
529 }
530
531 /* Now it has been copied, the message can be released. */
532 vchiq_release_message(instance, service->handle, header);
533
534 /* The completion must point to the msgbuf. */
535 user_completion.header = msgbuf;
536 }
537
538 if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
539 !instance->use_close_delivered)
540 vchiq_service_put(service);
541
542 /*
543 * FIXME: address space mismatch, does bulk_userdata
544 * actually point to user or kernel memory?
545 */
546 user_completion.bulk_userdata = completion->bulk_userdata;
547
548 if (vchiq_put_completion(args->buf, &user_completion, ret)) {
549 if (ret == 0)
550 ret = -EFAULT;
551 break;
552 }
553
554 /*
555 * Ensure that the above copy has completed
556 * before advancing the remove pointer.
557 */
558 mb();
559 remove++;
560 instance->completion_remove = remove;
561 }
562
563 if (msgbufcount != args->msgbufcount) {
564 if (put_user(msgbufcount, msgbufcountp))
565 ret = -EFAULT;
566 }
567 out:
568 if (ret)
569 complete(&instance->remove_event);
570 mutex_unlock(&instance->completion_mutex);
571 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
572
573 return ret;
574 }
575
576 static long
vchiq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)577 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
578 {
579 struct vchiq_instance *instance = file->private_data;
580 int status = 0;
581 struct vchiq_service *service = NULL;
582 long ret = 0;
583 int i, rc;
584
585 vchiq_log_trace(vchiq_arm_log_level,
586 "%s - instance %pK, cmd %s, arg %lx", __func__, instance,
587 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
588 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
589
590 switch (cmd) {
591 case VCHIQ_IOC_SHUTDOWN:
592 if (!instance->connected)
593 break;
594
595 /* Remove all services */
596 i = 0;
597 while ((service = next_service_by_instance(instance->state,
598 instance, &i))) {
599 status = vchiq_remove_service(instance, service->handle);
600 vchiq_service_put(service);
601 if (status)
602 break;
603 }
604 service = NULL;
605
606 if (!status) {
607 /* Wake the completion thread and ask it to exit */
608 instance->closing = 1;
609 complete(&instance->insert_event);
610 }
611
612 break;
613
614 case VCHIQ_IOC_CONNECT:
615 if (instance->connected) {
616 ret = -EINVAL;
617 break;
618 }
619 rc = mutex_lock_killable(&instance->state->mutex);
620 if (rc) {
621 vchiq_log_error(vchiq_arm_log_level,
622 "vchiq: connect: could not lock mutex for state %d: %d",
623 instance->state->id, rc);
624 ret = -EINTR;
625 break;
626 }
627 status = vchiq_connect_internal(instance->state, instance);
628 mutex_unlock(&instance->state->mutex);
629
630 if (!status)
631 instance->connected = 1;
632 else
633 vchiq_log_error(vchiq_arm_log_level,
634 "vchiq: could not connect: %d", status);
635 break;
636
637 case VCHIQ_IOC_CREATE_SERVICE: {
638 struct vchiq_create_service __user *argp;
639 struct vchiq_create_service args;
640
641 argp = (void __user *)arg;
642 if (copy_from_user(&args, argp, sizeof(args))) {
643 ret = -EFAULT;
644 break;
645 }
646
647 ret = vchiq_ioc_create_service(instance, &args);
648 if (ret < 0)
649 break;
650
651 if (put_user(args.handle, &argp->handle)) {
652 vchiq_remove_service(instance, args.handle);
653 ret = -EFAULT;
654 }
655 } break;
656
657 case VCHIQ_IOC_CLOSE_SERVICE:
658 case VCHIQ_IOC_REMOVE_SERVICE: {
659 unsigned int handle = (unsigned int)arg;
660 struct user_service *user_service;
661
662 service = find_service_for_instance(instance, handle);
663 if (!service) {
664 ret = -EINVAL;
665 break;
666 }
667
668 user_service = service->base.userdata;
669
670 /*
671 * close_pending is false on first entry, and when the
672 * wait in vchiq_close_service has been interrupted.
673 */
674 if (!user_service->close_pending) {
675 status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
676 vchiq_close_service(instance, service->handle) :
677 vchiq_remove_service(instance, service->handle);
678 if (status)
679 break;
680 }
681
682 /*
683 * close_pending is true once the underlying service
684 * has been closed until the client library calls the
685 * CLOSE_DELIVERED ioctl, signalling close_event.
686 */
687 if (user_service->close_pending &&
688 wait_for_completion_interruptible(&user_service->close_event))
689 status = -EAGAIN;
690 break;
691 }
692
693 case VCHIQ_IOC_USE_SERVICE:
694 case VCHIQ_IOC_RELEASE_SERVICE: {
695 unsigned int handle = (unsigned int)arg;
696
697 service = find_service_for_instance(instance, handle);
698 if (service) {
699 ret = (cmd == VCHIQ_IOC_USE_SERVICE) ?
700 vchiq_use_service_internal(service) :
701 vchiq_release_service_internal(service);
702 if (ret) {
703 vchiq_log_error(vchiq_susp_log_level,
704 "%s: cmd %s returned error %ld for service %c%c%c%c:%03d",
705 __func__, (cmd == VCHIQ_IOC_USE_SERVICE) ?
706 "VCHIQ_IOC_USE_SERVICE" :
707 "VCHIQ_IOC_RELEASE_SERVICE",
708 ret,
709 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
710 service->client_id);
711 }
712 } else {
713 ret = -EINVAL;
714 }
715 } break;
716
717 case VCHIQ_IOC_QUEUE_MESSAGE: {
718 struct vchiq_queue_message args;
719
720 if (copy_from_user(&args, (const void __user *)arg,
721 sizeof(args))) {
722 ret = -EFAULT;
723 break;
724 }
725
726 service = find_service_for_instance(instance, args.handle);
727
728 if (service && (args.count <= MAX_ELEMENTS)) {
729 /* Copy elements into kernel space */
730 struct vchiq_element elements[MAX_ELEMENTS];
731
732 if (copy_from_user(elements, args.elements,
733 args.count * sizeof(struct vchiq_element)) == 0)
734 ret = vchiq_ioc_queue_message(instance, args.handle, elements,
735 args.count);
736 else
737 ret = -EFAULT;
738 } else {
739 ret = -EINVAL;
740 }
741 } break;
742
743 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
744 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
745 struct vchiq_queue_bulk_transfer args;
746 struct vchiq_queue_bulk_transfer __user *argp;
747
748 enum vchiq_bulk_dir dir =
749 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
750 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
751
752 argp = (void __user *)arg;
753 if (copy_from_user(&args, argp, sizeof(args))) {
754 ret = -EFAULT;
755 break;
756 }
757
758 ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
759 dir, &argp->mode);
760 } break;
761
762 case VCHIQ_IOC_AWAIT_COMPLETION: {
763 struct vchiq_await_completion args;
764 struct vchiq_await_completion __user *argp;
765
766 argp = (void __user *)arg;
767 if (copy_from_user(&args, argp, sizeof(args))) {
768 ret = -EFAULT;
769 break;
770 }
771
772 ret = vchiq_ioc_await_completion(instance, &args,
773 &argp->msgbufcount);
774 } break;
775
776 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
777 struct vchiq_dequeue_message args;
778
779 if (copy_from_user(&args, (const void __user *)arg,
780 sizeof(args))) {
781 ret = -EFAULT;
782 break;
783 }
784
785 ret = vchiq_ioc_dequeue_message(instance, &args);
786 } break;
787
788 case VCHIQ_IOC_GET_CLIENT_ID: {
789 unsigned int handle = (unsigned int)arg;
790
791 ret = vchiq_get_client_id(instance, handle);
792 } break;
793
794 case VCHIQ_IOC_GET_CONFIG: {
795 struct vchiq_get_config args;
796 struct vchiq_config config;
797
798 if (copy_from_user(&args, (const void __user *)arg,
799 sizeof(args))) {
800 ret = -EFAULT;
801 break;
802 }
803 if (args.config_size > sizeof(config)) {
804 ret = -EINVAL;
805 break;
806 }
807
808 vchiq_get_config(&config);
809 if (copy_to_user(args.pconfig, &config, args.config_size)) {
810 ret = -EFAULT;
811 break;
812 }
813 } break;
814
815 case VCHIQ_IOC_SET_SERVICE_OPTION: {
816 struct vchiq_set_service_option args;
817
818 if (copy_from_user(&args, (const void __user *)arg,
819 sizeof(args))) {
820 ret = -EFAULT;
821 break;
822 }
823
824 service = find_service_for_instance(instance, args.handle);
825 if (!service) {
826 ret = -EINVAL;
827 break;
828 }
829
830 ret = vchiq_set_service_option(instance, args.handle, args.option,
831 args.value);
832 } break;
833
834 case VCHIQ_IOC_LIB_VERSION: {
835 unsigned int lib_version = (unsigned int)arg;
836
837 if (lib_version < VCHIQ_VERSION_MIN)
838 ret = -EINVAL;
839 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
840 instance->use_close_delivered = 1;
841 } break;
842
843 case VCHIQ_IOC_CLOSE_DELIVERED: {
844 unsigned int handle = (unsigned int)arg;
845
846 service = find_closed_service_for_instance(instance, handle);
847 if (service) {
848 struct user_service *user_service =
849 (struct user_service *)service->base.userdata;
850 close_delivered(user_service);
851 } else {
852 ret = -EINVAL;
853 }
854 } break;
855
856 default:
857 ret = -ENOTTY;
858 break;
859 }
860
861 if (service)
862 vchiq_service_put(service);
863
864 if (ret == 0) {
865 if (status == -EINVAL)
866 ret = -EIO;
867 else if (status == -EAGAIN)
868 ret = -EINTR;
869 }
870
871 if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK))
872 vchiq_log_info(vchiq_arm_log_level,
873 " ioctl instance %pK, cmd %s -> status %d, %ld",
874 instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
875 ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
876 else
877 vchiq_log_trace(vchiq_arm_log_level,
878 " ioctl instance %pK, cmd %s -> status %d, %ld",
879 instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
880 ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
881
882 return ret;
883 }
884
885 #if defined(CONFIG_COMPAT)
886
887 struct vchiq_service_params32 {
888 int fourcc;
889 compat_uptr_t callback;
890 compat_uptr_t userdata;
891 short version; /* Increment for non-trivial changes */
892 short version_min; /* Update for incompatible changes */
893 };
894
895 struct vchiq_create_service32 {
896 struct vchiq_service_params32 params;
897 int is_open;
898 int is_vchi;
899 unsigned int handle; /* OUT */
900 };
901
902 #define VCHIQ_IOC_CREATE_SERVICE32 \
903 _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
904
905 static long
vchiq_compat_ioctl_create_service(struct file * file,unsigned int cmd,struct vchiq_create_service32 __user * ptrargs32)906 vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
907 struct vchiq_create_service32 __user *ptrargs32)
908 {
909 struct vchiq_create_service args;
910 struct vchiq_create_service32 args32;
911 struct vchiq_instance *instance = file->private_data;
912 long ret;
913
914 if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
915 return -EFAULT;
916
917 args = (struct vchiq_create_service) {
918 .params = {
919 .fourcc = args32.params.fourcc,
920 .callback = compat_ptr(args32.params.callback),
921 .userdata = compat_ptr(args32.params.userdata),
922 .version = args32.params.version,
923 .version_min = args32.params.version_min,
924 },
925 .is_open = args32.is_open,
926 .is_vchi = args32.is_vchi,
927 .handle = args32.handle,
928 };
929
930 ret = vchiq_ioc_create_service(instance, &args);
931 if (ret < 0)
932 return ret;
933
934 if (put_user(args.handle, &ptrargs32->handle)) {
935 vchiq_remove_service(instance, args.handle);
936 return -EFAULT;
937 }
938
939 return 0;
940 }
941
942 struct vchiq_element32 {
943 compat_uptr_t data;
944 unsigned int size;
945 };
946
947 struct vchiq_queue_message32 {
948 unsigned int handle;
949 unsigned int count;
950 compat_uptr_t elements;
951 };
952
953 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
954 _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
955
956 static long
vchiq_compat_ioctl_queue_message(struct file * file,unsigned int cmd,struct vchiq_queue_message32 __user * arg)957 vchiq_compat_ioctl_queue_message(struct file *file,
958 unsigned int cmd,
959 struct vchiq_queue_message32 __user *arg)
960 {
961 struct vchiq_queue_message args;
962 struct vchiq_queue_message32 args32;
963 struct vchiq_service *service;
964 struct vchiq_instance *instance = file->private_data;
965 int ret;
966
967 if (copy_from_user(&args32, arg, sizeof(args32)))
968 return -EFAULT;
969
970 args = (struct vchiq_queue_message) {
971 .handle = args32.handle,
972 .count = args32.count,
973 .elements = compat_ptr(args32.elements),
974 };
975
976 if (args32.count > MAX_ELEMENTS)
977 return -EINVAL;
978
979 service = find_service_for_instance(instance, args.handle);
980 if (!service)
981 return -EINVAL;
982
983 if (args32.elements && args32.count) {
984 struct vchiq_element32 element32[MAX_ELEMENTS];
985 struct vchiq_element elements[MAX_ELEMENTS];
986 unsigned int count;
987
988 if (copy_from_user(&element32, args.elements,
989 sizeof(element32))) {
990 vchiq_service_put(service);
991 return -EFAULT;
992 }
993
994 for (count = 0; count < args32.count; count++) {
995 elements[count].data =
996 compat_ptr(element32[count].data);
997 elements[count].size = element32[count].size;
998 }
999 ret = vchiq_ioc_queue_message(instance, args.handle, elements,
1000 args.count);
1001 } else {
1002 ret = -EINVAL;
1003 }
1004 vchiq_service_put(service);
1005
1006 return ret;
1007 }
1008
1009 struct vchiq_queue_bulk_transfer32 {
1010 unsigned int handle;
1011 compat_uptr_t data;
1012 unsigned int size;
1013 compat_uptr_t userdata;
1014 enum vchiq_bulk_mode mode;
1015 };
1016
1017 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1018 _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1019 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1020 _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1021
1022 static long
vchiq_compat_ioctl_queue_bulk(struct file * file,unsigned int cmd,struct vchiq_queue_bulk_transfer32 __user * argp)1023 vchiq_compat_ioctl_queue_bulk(struct file *file,
1024 unsigned int cmd,
1025 struct vchiq_queue_bulk_transfer32 __user *argp)
1026 {
1027 struct vchiq_queue_bulk_transfer32 args32;
1028 struct vchiq_queue_bulk_transfer args;
1029 enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
1030 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1031
1032 if (copy_from_user(&args32, argp, sizeof(args32)))
1033 return -EFAULT;
1034
1035 args = (struct vchiq_queue_bulk_transfer) {
1036 .handle = args32.handle,
1037 .data = compat_ptr(args32.data),
1038 .size = args32.size,
1039 .userdata = compat_ptr(args32.userdata),
1040 .mode = args32.mode,
1041 };
1042
1043 return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
1044 dir, &argp->mode);
1045 }
1046
1047 struct vchiq_await_completion32 {
1048 unsigned int count;
1049 compat_uptr_t buf;
1050 unsigned int msgbufsize;
1051 unsigned int msgbufcount; /* IN/OUT */
1052 compat_uptr_t msgbufs;
1053 };
1054
1055 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1056 _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1057
1058 static long
vchiq_compat_ioctl_await_completion(struct file * file,unsigned int cmd,struct vchiq_await_completion32 __user * argp)1059 vchiq_compat_ioctl_await_completion(struct file *file,
1060 unsigned int cmd,
1061 struct vchiq_await_completion32 __user *argp)
1062 {
1063 struct vchiq_await_completion args;
1064 struct vchiq_await_completion32 args32;
1065
1066 if (copy_from_user(&args32, argp, sizeof(args32)))
1067 return -EFAULT;
1068
1069 args = (struct vchiq_await_completion) {
1070 .count = args32.count,
1071 .buf = compat_ptr(args32.buf),
1072 .msgbufsize = args32.msgbufsize,
1073 .msgbufcount = args32.msgbufcount,
1074 .msgbufs = compat_ptr(args32.msgbufs),
1075 };
1076
1077 return vchiq_ioc_await_completion(file->private_data, &args,
1078 &argp->msgbufcount);
1079 }
1080
1081 struct vchiq_dequeue_message32 {
1082 unsigned int handle;
1083 int blocking;
1084 unsigned int bufsize;
1085 compat_uptr_t buf;
1086 };
1087
1088 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1089 _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1090
1091 static long
vchiq_compat_ioctl_dequeue_message(struct file * file,unsigned int cmd,struct vchiq_dequeue_message32 __user * arg)1092 vchiq_compat_ioctl_dequeue_message(struct file *file,
1093 unsigned int cmd,
1094 struct vchiq_dequeue_message32 __user *arg)
1095 {
1096 struct vchiq_dequeue_message32 args32;
1097 struct vchiq_dequeue_message args;
1098
1099 if (copy_from_user(&args32, arg, sizeof(args32)))
1100 return -EFAULT;
1101
1102 args = (struct vchiq_dequeue_message) {
1103 .handle = args32.handle,
1104 .blocking = args32.blocking,
1105 .bufsize = args32.bufsize,
1106 .buf = compat_ptr(args32.buf),
1107 };
1108
1109 return vchiq_ioc_dequeue_message(file->private_data, &args);
1110 }
1111
1112 struct vchiq_get_config32 {
1113 unsigned int config_size;
1114 compat_uptr_t pconfig;
1115 };
1116
1117 #define VCHIQ_IOC_GET_CONFIG32 \
1118 _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1119
1120 static long
vchiq_compat_ioctl_get_config(struct file * file,unsigned int cmd,struct vchiq_get_config32 __user * arg)1121 vchiq_compat_ioctl_get_config(struct file *file,
1122 unsigned int cmd,
1123 struct vchiq_get_config32 __user *arg)
1124 {
1125 struct vchiq_get_config32 args32;
1126 struct vchiq_config config;
1127 void __user *ptr;
1128
1129 if (copy_from_user(&args32, arg, sizeof(args32)))
1130 return -EFAULT;
1131 if (args32.config_size > sizeof(config))
1132 return -EINVAL;
1133
1134 vchiq_get_config(&config);
1135 ptr = compat_ptr(args32.pconfig);
1136 if (copy_to_user(ptr, &config, args32.config_size))
1137 return -EFAULT;
1138
1139 return 0;
1140 }
1141
1142 static long
vchiq_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1143 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1144 {
1145 void __user *argp = compat_ptr(arg);
1146
1147 switch (cmd) {
1148 case VCHIQ_IOC_CREATE_SERVICE32:
1149 return vchiq_compat_ioctl_create_service(file, cmd, argp);
1150 case VCHIQ_IOC_QUEUE_MESSAGE32:
1151 return vchiq_compat_ioctl_queue_message(file, cmd, argp);
1152 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1153 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1154 return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
1155 case VCHIQ_IOC_AWAIT_COMPLETION32:
1156 return vchiq_compat_ioctl_await_completion(file, cmd, argp);
1157 case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1158 return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
1159 case VCHIQ_IOC_GET_CONFIG32:
1160 return vchiq_compat_ioctl_get_config(file, cmd, argp);
1161 default:
1162 return vchiq_ioctl(file, cmd, (unsigned long)argp);
1163 }
1164 }
1165
1166 #endif
1167
vchiq_open(struct inode * inode,struct file * file)1168 static int vchiq_open(struct inode *inode, struct file *file)
1169 {
1170 struct vchiq_state *state = vchiq_get_state();
1171 struct vchiq_instance *instance;
1172
1173 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1174
1175 if (!state) {
1176 vchiq_log_error(vchiq_arm_log_level,
1177 "vchiq has no connection to VideoCore");
1178 return -ENOTCONN;
1179 }
1180
1181 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1182 if (!instance)
1183 return -ENOMEM;
1184
1185 instance->state = state;
1186 instance->pid = current->tgid;
1187
1188 vchiq_debugfs_add_instance(instance);
1189
1190 init_completion(&instance->insert_event);
1191 init_completion(&instance->remove_event);
1192 mutex_init(&instance->completion_mutex);
1193 mutex_init(&instance->bulk_waiter_list_mutex);
1194 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1195
1196 file->private_data = instance;
1197
1198 return 0;
1199 }
1200
vchiq_release(struct inode * inode,struct file * file)1201 static int vchiq_release(struct inode *inode, struct file *file)
1202 {
1203 struct vchiq_instance *instance = file->private_data;
1204 struct vchiq_state *state = vchiq_get_state();
1205 struct vchiq_service *service;
1206 int ret = 0;
1207 int i;
1208
1209 vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1210 (unsigned long)instance);
1211
1212 if (!state) {
1213 ret = -EPERM;
1214 goto out;
1215 }
1216
1217 /* Ensure videocore is awake to allow termination. */
1218 vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1219
1220 mutex_lock(&instance->completion_mutex);
1221
1222 /* Wake the completion thread and ask it to exit */
1223 instance->closing = 1;
1224 complete(&instance->insert_event);
1225
1226 mutex_unlock(&instance->completion_mutex);
1227
1228 /* Wake the slot handler if the completion queue is full. */
1229 complete(&instance->remove_event);
1230
1231 /* Mark all services for termination... */
1232 i = 0;
1233 while ((service = next_service_by_instance(state, instance, &i))) {
1234 struct user_service *user_service = service->base.userdata;
1235
1236 /* Wake the slot handler if the msg queue is full. */
1237 complete(&user_service->remove_event);
1238
1239 vchiq_terminate_service_internal(service);
1240 vchiq_service_put(service);
1241 }
1242
1243 /* ...and wait for them to die */
1244 i = 0;
1245 while ((service = next_service_by_instance(state, instance, &i))) {
1246 struct user_service *user_service = service->base.userdata;
1247
1248 wait_for_completion(&service->remove_event);
1249
1250 if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1251 vchiq_service_put(service);
1252 break;
1253 }
1254
1255 spin_lock(&msg_queue_spinlock);
1256
1257 while (user_service->msg_remove != user_service->msg_insert) {
1258 struct vchiq_header *header;
1259 int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
1260
1261 header = user_service->msg_queue[m];
1262 user_service->msg_remove++;
1263 spin_unlock(&msg_queue_spinlock);
1264
1265 if (header)
1266 vchiq_release_message(instance, service->handle, header);
1267 spin_lock(&msg_queue_spinlock);
1268 }
1269
1270 spin_unlock(&msg_queue_spinlock);
1271
1272 vchiq_service_put(service);
1273 }
1274
1275 /* Release any closed services */
1276 while (instance->completion_remove != instance->completion_insert) {
1277 struct vchiq_completion_data_kernel *completion;
1278 struct vchiq_service *service;
1279
1280 completion = &instance->completions[instance->completion_remove
1281 & (MAX_COMPLETIONS - 1)];
1282 service = completion->service_userdata;
1283 if (completion->reason == VCHIQ_SERVICE_CLOSED) {
1284 struct user_service *user_service =
1285 service->base.userdata;
1286
1287 /* Wake any blocked user-thread */
1288 if (instance->use_close_delivered)
1289 complete(&user_service->close_event);
1290 vchiq_service_put(service);
1291 }
1292 instance->completion_remove++;
1293 }
1294
1295 /* Release the PEER service count. */
1296 vchiq_release_internal(instance->state, NULL);
1297
1298 free_bulk_waiter(instance);
1299
1300 vchiq_debugfs_remove_instance(instance);
1301
1302 kfree(instance);
1303 file->private_data = NULL;
1304
1305 out:
1306 return ret;
1307 }
1308
1309 static ssize_t
vchiq_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1310 vchiq_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1311 {
1312 struct dump_context context;
1313 int err;
1314
1315 context.buf = buf;
1316 context.actual = 0;
1317 context.space = count;
1318 context.offset = *ppos;
1319
1320 err = vchiq_dump_state(&context, &g_state);
1321 if (err)
1322 return err;
1323
1324 *ppos += context.actual;
1325
1326 return context.actual;
1327 }
1328
1329 static const struct file_operations
1330 vchiq_fops = {
1331 .owner = THIS_MODULE,
1332 .unlocked_ioctl = vchiq_ioctl,
1333 #if defined(CONFIG_COMPAT)
1334 .compat_ioctl = vchiq_compat_ioctl,
1335 #endif
1336 .open = vchiq_open,
1337 .release = vchiq_release,
1338 .read = vchiq_read
1339 };
1340
1341 static struct miscdevice vchiq_miscdev = {
1342 .fops = &vchiq_fops,
1343 .minor = MISC_DYNAMIC_MINOR,
1344 .name = "vchiq",
1345
1346 };
1347
1348 /**
1349 * vchiq_register_chrdev - Register the char driver for vchiq
1350 * and create the necessary class and
1351 * device files in userspace.
1352 * @parent The parent of the char device.
1353 *
1354 * Returns 0 on success else returns the error code.
1355 */
vchiq_register_chrdev(struct device * parent)1356 int vchiq_register_chrdev(struct device *parent)
1357 {
1358 vchiq_miscdev.parent = parent;
1359
1360 return misc_register(&vchiq_miscdev);
1361 }
1362
1363 /**
1364 * vchiq_deregister_chrdev - Deregister and cleanup the vchiq char
1365 * driver and device files
1366 */
vchiq_deregister_chrdev(void)1367 void vchiq_deregister_chrdev(void)
1368 {
1369 misc_deregister(&vchiq_miscdev);
1370 }
1371