1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15 
16 #include "vchiq_arm.h"
17 #include "vchiq_core.h"
18 
19 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 
21 #define VCHIQ_MSG_PADDING            0  /* -                                 */
22 #define VCHIQ_MSG_CONNECT            1  /* -                                 */
23 #define VCHIQ_MSG_OPEN               2  /* + (srcport, -), fourcc, client_id */
24 #define VCHIQ_MSG_OPENACK            3  /* + (srcport, dstport)              */
25 #define VCHIQ_MSG_CLOSE              4  /* + (srcport, dstport)              */
26 #define VCHIQ_MSG_DATA               5  /* + (srcport, dstport)              */
27 #define VCHIQ_MSG_BULK_RX            6  /* + (srcport, dstport), data, size  */
28 #define VCHIQ_MSG_BULK_TX            7  /* + (srcport, dstport), data, size  */
29 #define VCHIQ_MSG_BULK_RX_DONE       8  /* + (srcport, dstport), actual      */
30 #define VCHIQ_MSG_BULK_TX_DONE       9  /* + (srcport, dstport), actual      */
31 #define VCHIQ_MSG_PAUSE             10  /* -                                 */
32 #define VCHIQ_MSG_RESUME            11  /* -                                 */
33 #define VCHIQ_MSG_REMOTE_USE        12  /* -                                 */
34 #define VCHIQ_MSG_REMOTE_RELEASE    13  /* -                                 */
35 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14  /* -                                 */
36 
37 #define TYPE_SHIFT 24
38 
39 #define VCHIQ_PORT_MAX                 (VCHIQ_MAX_SERVICES - 1)
40 #define VCHIQ_PORT_FREE                0x1000
41 #define VCHIQ_PORT_IS_VALID(port)      ((port) < VCHIQ_PORT_FREE)
42 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
43 	(((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
44 #define VCHIQ_MSG_TYPE(msgid)          ((unsigned int)(msgid) >> TYPE_SHIFT)
45 #define VCHIQ_MSG_SRCPORT(msgid) \
46 	(unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
47 #define VCHIQ_MSG_DSTPORT(msgid) \
48 	((unsigned short)(msgid) & 0xfff)
49 
50 #define MAKE_CONNECT			(VCHIQ_MSG_CONNECT << TYPE_SHIFT)
51 #define MAKE_OPEN(srcport) \
52 	((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
53 #define MAKE_OPENACK(srcport, dstport) \
54 	((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
55 #define MAKE_CLOSE(srcport, dstport) \
56 	((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
57 #define MAKE_DATA(srcport, dstport) \
58 	((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
59 #define MAKE_PAUSE			(VCHIQ_MSG_PAUSE << TYPE_SHIFT)
60 #define MAKE_RESUME			(VCHIQ_MSG_RESUME << TYPE_SHIFT)
61 #define MAKE_REMOTE_USE			(VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
62 #define MAKE_REMOTE_USE_ACTIVE		(VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
63 
64 /* Ensure the fields are wide enough */
65 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
66 	== 0);
67 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
68 static_assert((unsigned int)VCHIQ_PORT_MAX <
69 	(unsigned int)VCHIQ_PORT_FREE);
70 
71 #define VCHIQ_MSGID_PADDING            VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
72 #define VCHIQ_MSGID_CLAIMED            0x40000000
73 
74 #define VCHIQ_FOURCC_INVALID           0x00000000
75 #define VCHIQ_FOURCC_IS_LEGAL(fourcc)  ((fourcc) != VCHIQ_FOURCC_INVALID)
76 
77 #define VCHIQ_BULK_ACTUAL_ABORTED -1
78 
79 #if VCHIQ_ENABLE_STATS
80 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
81 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
82 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
83 	(service->stats. stat += addend)
84 #else
85 #define VCHIQ_STATS_INC(state, stat) ((void)0)
86 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
87 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
88 #endif
89 
90 #define HANDLE_STATE_SHIFT 12
91 
92 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
93 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
94 #define SLOT_INDEX_FROM_DATA(state, data) \
95 	(((unsigned int)((char *)data - (char *)state->slot_data)) / \
96 	VCHIQ_SLOT_SIZE)
97 #define SLOT_INDEX_FROM_INFO(state, info) \
98 	((unsigned int)(info - state->slot_info))
99 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
100 	((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
101 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
102 	(SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
103 
104 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
105 
106 #define SRVTRACE_LEVEL(srv) \
107 	(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
108 #define SRVTRACE_ENABLED(srv, lev) \
109 	(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
110 
111 #define NO_CLOSE_RECVD	0
112 #define CLOSE_RECVD	1
113 
114 #define NO_RETRY_POLL	0
115 #define RETRY_POLL	1
116 
117 struct vchiq_open_payload {
118 	int fourcc;
119 	int client_id;
120 	short version;
121 	short version_min;
122 };
123 
124 struct vchiq_openack_payload {
125 	short version;
126 };
127 
128 enum {
129 	QMFLAGS_IS_BLOCKING     = BIT(0),
130 	QMFLAGS_NO_MUTEX_LOCK   = BIT(1),
131 	QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
132 };
133 
134 enum {
135 	VCHIQ_POLL_TERMINATE,
136 	VCHIQ_POLL_REMOVE,
137 	VCHIQ_POLL_TXNOTIFY,
138 	VCHIQ_POLL_RXNOTIFY,
139 	VCHIQ_POLL_COUNT
140 };
141 
142 /* we require this for consistency between endpoints */
143 static_assert(sizeof(struct vchiq_header) == 8);
144 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
145 
check_sizes(void)146 static inline void check_sizes(void)
147 {
148 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
149 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
150 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
151 	BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
152 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
153 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
154 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
155 }
156 
157 /* Run time control of log level, based on KERN_XXX level. */
158 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
159 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
160 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
161 
162 DEFINE_SPINLOCK(bulk_waiter_spinlock);
163 static DEFINE_SPINLOCK(quota_spinlock);
164 
165 static unsigned int handle_seq;
166 
167 static const char *const srvstate_names[] = {
168 	"FREE",
169 	"HIDDEN",
170 	"LISTENING",
171 	"OPENING",
172 	"OPEN",
173 	"OPENSYNC",
174 	"CLOSESENT",
175 	"CLOSERECVD",
176 	"CLOSEWAIT",
177 	"CLOSED"
178 };
179 
180 static const char *const reason_names[] = {
181 	"SERVICE_OPENED",
182 	"SERVICE_CLOSED",
183 	"MESSAGE_AVAILABLE",
184 	"BULK_TRANSMIT_DONE",
185 	"BULK_RECEIVE_DONE",
186 	"BULK_TRANSMIT_ABORTED",
187 	"BULK_RECEIVE_ABORTED"
188 };
189 
190 static const char *const conn_state_names[] = {
191 	"DISCONNECTED",
192 	"CONNECTING",
193 	"CONNECTED",
194 	"PAUSING",
195 	"PAUSE_SENT",
196 	"PAUSED",
197 	"RESUMING",
198 	"PAUSE_TIMEOUT",
199 	"RESUME_TIMEOUT"
200 };
201 
202 static void
203 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
204 
msg_type_str(unsigned int msg_type)205 static const char *msg_type_str(unsigned int msg_type)
206 {
207 	switch (msg_type) {
208 	case VCHIQ_MSG_PADDING:			return "PADDING";
209 	case VCHIQ_MSG_CONNECT:			return "CONNECT";
210 	case VCHIQ_MSG_OPEN:			return "OPEN";
211 	case VCHIQ_MSG_OPENACK:			return "OPENACK";
212 	case VCHIQ_MSG_CLOSE:			return "CLOSE";
213 	case VCHIQ_MSG_DATA:			return "DATA";
214 	case VCHIQ_MSG_BULK_RX:			return "BULK_RX";
215 	case VCHIQ_MSG_BULK_TX:			return "BULK_TX";
216 	case VCHIQ_MSG_BULK_RX_DONE:		return "BULK_RX_DONE";
217 	case VCHIQ_MSG_BULK_TX_DONE:		return "BULK_TX_DONE";
218 	case VCHIQ_MSG_PAUSE:			return "PAUSE";
219 	case VCHIQ_MSG_RESUME:			return "RESUME";
220 	case VCHIQ_MSG_REMOTE_USE:		return "REMOTE_USE";
221 	case VCHIQ_MSG_REMOTE_RELEASE:		return "REMOTE_RELEASE";
222 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:	return "REMOTE_USE_ACTIVE";
223 	}
224 	return "???";
225 }
226 
227 static inline void
set_service_state(struct vchiq_service * service,int newstate)228 set_service_state(struct vchiq_service *service, int newstate)
229 {
230 	vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
231 		       service->state->id, service->localport,
232 		       srvstate_names[service->srvstate],
233 		       srvstate_names[newstate]);
234 	service->srvstate = newstate;
235 }
236 
handle_to_service(struct vchiq_instance * instance,unsigned int handle)237 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle)
238 {
239 	int idx = handle & (VCHIQ_MAX_SERVICES - 1);
240 
241 	return rcu_dereference(instance->state->services[idx]);
242 }
243 struct vchiq_service *
find_service_by_handle(struct vchiq_instance * instance,unsigned int handle)244 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
245 {
246 	struct vchiq_service *service;
247 
248 	rcu_read_lock();
249 	service = handle_to_service(instance, handle);
250 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
251 	    service->handle == handle &&
252 	    kref_get_unless_zero(&service->ref_count)) {
253 		service = rcu_pointer_handoff(service);
254 		rcu_read_unlock();
255 		return service;
256 	}
257 	rcu_read_unlock();
258 	vchiq_log_info(vchiq_core_log_level,
259 		       "Invalid service handle 0x%x", handle);
260 	return NULL;
261 }
262 
263 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,unsigned int localport)264 find_service_by_port(struct vchiq_state *state, unsigned int localport)
265 {
266 	if (localport <= VCHIQ_PORT_MAX) {
267 		struct vchiq_service *service;
268 
269 		rcu_read_lock();
270 		service = rcu_dereference(state->services[localport]);
271 		if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
272 		    kref_get_unless_zero(&service->ref_count)) {
273 			service = rcu_pointer_handoff(service);
274 			rcu_read_unlock();
275 			return service;
276 		}
277 		rcu_read_unlock();
278 	}
279 	vchiq_log_info(vchiq_core_log_level,
280 		       "Invalid port %u", localport);
281 	return NULL;
282 }
283 
284 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)285 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
286 {
287 	struct vchiq_service *service;
288 
289 	rcu_read_lock();
290 	service = handle_to_service(instance, handle);
291 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
292 	    service->handle == handle &&
293 	    service->instance == instance &&
294 	    kref_get_unless_zero(&service->ref_count)) {
295 		service = rcu_pointer_handoff(service);
296 		rcu_read_unlock();
297 		return service;
298 	}
299 	rcu_read_unlock();
300 	vchiq_log_info(vchiq_core_log_level,
301 		       "Invalid service handle 0x%x", handle);
302 	return NULL;
303 }
304 
305 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)306 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
307 {
308 	struct vchiq_service *service;
309 
310 	rcu_read_lock();
311 	service = handle_to_service(instance, handle);
312 	if (service &&
313 	    (service->srvstate == VCHIQ_SRVSTATE_FREE ||
314 	     service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
315 	    service->handle == handle &&
316 	    service->instance == instance &&
317 	    kref_get_unless_zero(&service->ref_count)) {
318 		service = rcu_pointer_handoff(service);
319 		rcu_read_unlock();
320 		return service;
321 	}
322 	rcu_read_unlock();
323 	vchiq_log_info(vchiq_core_log_level,
324 		       "Invalid service handle 0x%x", handle);
325 	return service;
326 }
327 
328 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)329 __next_service_by_instance(struct vchiq_state *state,
330 			   struct vchiq_instance *instance,
331 			   int *pidx)
332 {
333 	struct vchiq_service *service = NULL;
334 	int idx = *pidx;
335 
336 	while (idx < state->unused_service) {
337 		struct vchiq_service *srv;
338 
339 		srv = rcu_dereference(state->services[idx]);
340 		idx++;
341 		if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
342 		    srv->instance == instance) {
343 			service = srv;
344 			break;
345 		}
346 	}
347 
348 	*pidx = idx;
349 	return service;
350 }
351 
352 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)353 next_service_by_instance(struct vchiq_state *state,
354 			 struct vchiq_instance *instance,
355 			 int *pidx)
356 {
357 	struct vchiq_service *service;
358 
359 	rcu_read_lock();
360 	while (1) {
361 		service = __next_service_by_instance(state, instance, pidx);
362 		if (!service)
363 			break;
364 		if (kref_get_unless_zero(&service->ref_count)) {
365 			service = rcu_pointer_handoff(service);
366 			break;
367 		}
368 	}
369 	rcu_read_unlock();
370 	return service;
371 }
372 
373 void
vchiq_service_get(struct vchiq_service * service)374 vchiq_service_get(struct vchiq_service *service)
375 {
376 	if (!service) {
377 		WARN(1, "%s service is NULL\n", __func__);
378 		return;
379 	}
380 	kref_get(&service->ref_count);
381 }
382 
service_release(struct kref * kref)383 static void service_release(struct kref *kref)
384 {
385 	struct vchiq_service *service =
386 		container_of(kref, struct vchiq_service, ref_count);
387 	struct vchiq_state *state = service->state;
388 
389 	WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
390 	rcu_assign_pointer(state->services[service->localport], NULL);
391 	if (service->userdata_term)
392 		service->userdata_term(service->base.userdata);
393 	kfree_rcu(service, rcu);
394 }
395 
396 void
vchiq_service_put(struct vchiq_service * service)397 vchiq_service_put(struct vchiq_service *service)
398 {
399 	if (!service) {
400 		WARN(1, "%s: service is NULL\n", __func__);
401 		return;
402 	}
403 	kref_put(&service->ref_count, service_release);
404 }
405 
406 int
vchiq_get_client_id(struct vchiq_instance * instance,unsigned int handle)407 vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle)
408 {
409 	struct vchiq_service *service;
410 	int id;
411 
412 	rcu_read_lock();
413 	service = handle_to_service(instance, handle);
414 	id = service ? service->client_id : 0;
415 	rcu_read_unlock();
416 	return id;
417 }
418 
419 void *
vchiq_get_service_userdata(struct vchiq_instance * instance,unsigned int handle)420 vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle)
421 {
422 	void *userdata;
423 	struct vchiq_service *service;
424 
425 	rcu_read_lock();
426 	service = handle_to_service(instance, handle);
427 	userdata = service ? service->base.userdata : NULL;
428 	rcu_read_unlock();
429 	return userdata;
430 }
431 EXPORT_SYMBOL(vchiq_get_service_userdata);
432 
433 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)434 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
435 {
436 	struct vchiq_state *state = service->state;
437 	struct vchiq_service_quota *quota;
438 
439 	service->closing = 1;
440 
441 	/* Synchronise with other threads. */
442 	mutex_lock(&state->recycle_mutex);
443 	mutex_unlock(&state->recycle_mutex);
444 	if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
445 		/*
446 		 * If we're pausing then the slot_mutex is held until resume
447 		 * by the slot handler.  Therefore don't try to acquire this
448 		 * mutex if we're the slot handler and in the pause sent state.
449 		 * We don't need to in this case anyway.
450 		 */
451 		mutex_lock(&state->slot_mutex);
452 		mutex_unlock(&state->slot_mutex);
453 	}
454 
455 	/* Unblock any sending thread. */
456 	quota = &state->service_quotas[service->localport];
457 	complete(&quota->quota_event);
458 }
459 
460 static void
mark_service_closing(struct vchiq_service * service)461 mark_service_closing(struct vchiq_service *service)
462 {
463 	mark_service_closing_internal(service, 0);
464 }
465 
466 static inline int
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,void * bulk_userdata)467 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
468 		      struct vchiq_header *header, void *bulk_userdata)
469 {
470 	int status;
471 
472 	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
473 			service->state->id, service->localport, reason_names[reason],
474 			header, bulk_userdata);
475 	status = service->base.callback(service->instance, reason, header, service->handle,
476 					bulk_userdata);
477 	if (status && (status != -EAGAIN)) {
478 		vchiq_log_warning(vchiq_core_log_level,
479 				  "%d: ignoring ERROR from callback to service %x",
480 				  service->state->id, service->handle);
481 		status = 0;
482 	}
483 
484 	if (reason != VCHIQ_MESSAGE_AVAILABLE)
485 		vchiq_release_message(service->instance, service->handle, header);
486 
487 	return status;
488 }
489 
490 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)491 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
492 {
493 	enum vchiq_connstate oldstate = state->conn_state;
494 
495 	vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, conn_state_names[oldstate],
496 		       conn_state_names[newstate]);
497 	state->conn_state = newstate;
498 	vchiq_platform_conn_state_changed(state, oldstate, newstate);
499 }
500 
501 /* This initialises a single remote_event, and the associated wait_queue. */
502 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)503 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
504 {
505 	event->armed = 0;
506 	/*
507 	 * Don't clear the 'fired' flag because it may already have been set
508 	 * by the other side.
509 	 */
510 	init_waitqueue_head(wq);
511 }
512 
513 /*
514  * All the event waiting routines in VCHIQ used a custom semaphore
515  * implementation that filtered most signals. This achieved a behaviour similar
516  * to the "killable" family of functions. While cleaning up this code all the
517  * routines where switched to the "interruptible" family of functions, as the
518  * former was deemed unjustified and the use "killable" set all VCHIQ's
519  * threads in D state.
520  */
521 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)522 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
523 {
524 	if (!event->fired) {
525 		event->armed = 1;
526 		dsb(sy);
527 		if (wait_event_interruptible(*wq, event->fired)) {
528 			event->armed = 0;
529 			return 0;
530 		}
531 		event->armed = 0;
532 		/* Ensure that the peer sees that we are not waiting (armed == 0). */
533 		wmb();
534 	}
535 
536 	event->fired = 0;
537 	return 1;
538 }
539 
540 /*
541  * Acknowledge that the event has been signalled, and wake any waiters. Usually
542  * called as a result of the doorbell being rung.
543  */
544 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)545 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
546 {
547 	event->fired = 1;
548 	event->armed = 0;
549 	wake_up_all(wq);
550 }
551 
552 /* Check if a single event has been signalled, waking the waiters if it has. */
553 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)554 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
555 {
556 	if (event->fired && event->armed)
557 		remote_event_signal_local(wq, event);
558 }
559 
560 /*
561  * VCHIQ used a small, fixed number of remote events. It is simplest to
562  * enumerate them here for polling.
563  */
564 void
remote_event_pollall(struct vchiq_state * state)565 remote_event_pollall(struct vchiq_state *state)
566 {
567 	remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
568 	remote_event_poll(&state->sync_release_event, &state->local->sync_release);
569 	remote_event_poll(&state->trigger_event, &state->local->trigger);
570 	remote_event_poll(&state->recycle_event, &state->local->recycle);
571 }
572 
573 /*
574  * Round up message sizes so that any space at the end of a slot is always big
575  * enough for a header. This relies on header size being a power of two, which
576  * has been verified earlier by a static assertion.
577  */
578 
579 static inline size_t
calc_stride(size_t size)580 calc_stride(size_t size)
581 {
582 	/* Allow room for the header */
583 	size += sizeof(struct vchiq_header);
584 
585 	/* Round up */
586 	return (size + sizeof(struct vchiq_header) - 1) &
587 		~(sizeof(struct vchiq_header) - 1);
588 }
589 
590 /* Called by the slot handler thread */
591 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)592 get_listening_service(struct vchiq_state *state, int fourcc)
593 {
594 	int i;
595 
596 	WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
597 
598 	rcu_read_lock();
599 	for (i = 0; i < state->unused_service; i++) {
600 		struct vchiq_service *service;
601 
602 		service = rcu_dereference(state->services[i]);
603 		if (service &&
604 		    service->public_fourcc == fourcc &&
605 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
606 		     (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
607 		      service->remoteport == VCHIQ_PORT_FREE)) &&
608 		    kref_get_unless_zero(&service->ref_count)) {
609 			service = rcu_pointer_handoff(service);
610 			rcu_read_unlock();
611 			return service;
612 		}
613 	}
614 	rcu_read_unlock();
615 	return NULL;
616 }
617 
618 /* Called by the slot handler thread */
619 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)620 get_connected_service(struct vchiq_state *state, unsigned int port)
621 {
622 	int i;
623 
624 	rcu_read_lock();
625 	for (i = 0; i < state->unused_service; i++) {
626 		struct vchiq_service *service =
627 			rcu_dereference(state->services[i]);
628 
629 		if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
630 		    service->remoteport == port &&
631 		    kref_get_unless_zero(&service->ref_count)) {
632 			service = rcu_pointer_handoff(service);
633 			rcu_read_unlock();
634 			return service;
635 		}
636 	}
637 	rcu_read_unlock();
638 	return NULL;
639 }
640 
641 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)642 request_poll(struct vchiq_state *state, struct vchiq_service *service,
643 	     int poll_type)
644 {
645 	u32 value;
646 	int index;
647 
648 	if (!service)
649 		goto skip_service;
650 
651 	do {
652 		value = atomic_read(&service->poll_flags);
653 	} while (atomic_cmpxchg(&service->poll_flags, value,
654 		 value | BIT(poll_type)) != value);
655 
656 	index = BITSET_WORD(service->localport);
657 	do {
658 		value = atomic_read(&state->poll_services[index]);
659 	} while (atomic_cmpxchg(&state->poll_services[index],
660 		 value, value | BIT(service->localport & 0x1f)) != value);
661 
662 skip_service:
663 	state->poll_needed = 1;
664 	/* Ensure the slot handler thread sees the poll_needed flag. */
665 	wmb();
666 
667 	/* ... and ensure the slot handler runs. */
668 	remote_event_signal_local(&state->trigger_event, &state->local->trigger);
669 }
670 
671 /*
672  * Called from queue_message, by the slot handler and application threads,
673  * with slot_mutex held
674  */
675 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)676 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
677 {
678 	struct vchiq_shared_state *local = state->local;
679 	int tx_pos = state->local_tx_pos;
680 	int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
681 
682 	if (space > slot_space) {
683 		struct vchiq_header *header;
684 		/* Fill the remaining space with padding */
685 		WARN_ON(!state->tx_data);
686 		header = (struct vchiq_header *)
687 			(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
688 		header->msgid = VCHIQ_MSGID_PADDING;
689 		header->size = slot_space - sizeof(struct vchiq_header);
690 
691 		tx_pos += slot_space;
692 	}
693 
694 	/* If necessary, get the next slot. */
695 	if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
696 		int slot_index;
697 
698 		/* If there is no free slot... */
699 
700 		if (!try_wait_for_completion(&state->slot_available_event)) {
701 			/* ...wait for one. */
702 
703 			VCHIQ_STATS_INC(state, slot_stalls);
704 
705 			/* But first, flush through the last slot. */
706 			state->local_tx_pos = tx_pos;
707 			local->tx_pos = tx_pos;
708 			remote_event_signal(&state->remote->trigger);
709 
710 			if (!is_blocking ||
711 			    (wait_for_completion_interruptible(&state->slot_available_event)))
712 				return NULL; /* No space available */
713 		}
714 
715 		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
716 			complete(&state->slot_available_event);
717 			pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
718 			return NULL;
719 		}
720 
721 		slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
722 		state->tx_data =
723 			(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
724 	}
725 
726 	state->local_tx_pos = tx_pos + space;
727 
728 	return (struct vchiq_header *)(state->tx_data +
729 						(tx_pos & VCHIQ_SLOT_MASK));
730 }
731 
732 static void
process_free_data_message(struct vchiq_state * state,u32 * service_found,struct vchiq_header * header)733 process_free_data_message(struct vchiq_state *state, u32 *service_found,
734 			  struct vchiq_header *header)
735 {
736 	int msgid = header->msgid;
737 	int port = VCHIQ_MSG_SRCPORT(msgid);
738 	struct vchiq_service_quota *quota = &state->service_quotas[port];
739 	int count;
740 
741 	spin_lock(&quota_spinlock);
742 	count = quota->message_use_count;
743 	if (count > 0)
744 		quota->message_use_count = count - 1;
745 	spin_unlock(&quota_spinlock);
746 
747 	if (count == quota->message_quota) {
748 		/*
749 		 * Signal the service that it
750 		 * has dropped below its quota
751 		 */
752 		complete(&quota->quota_event);
753 	} else if (count == 0) {
754 		vchiq_log_error(vchiq_core_log_level,
755 				"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
756 				port, quota->message_use_count, header, msgid, header->msgid,
757 				header->size);
758 		WARN(1, "invalid message use count\n");
759 	}
760 	if (!BITSET_IS_SET(service_found, port)) {
761 		/* Set the found bit for this service */
762 		BITSET_SET(service_found, port);
763 
764 		spin_lock(&quota_spinlock);
765 		count = quota->slot_use_count;
766 		if (count > 0)
767 			quota->slot_use_count = count - 1;
768 		spin_unlock(&quota_spinlock);
769 
770 		if (count > 0) {
771 			/*
772 			 * Signal the service in case
773 			 * it has dropped below its quota
774 			 */
775 			complete(&quota->quota_event);
776 			vchiq_log_trace(vchiq_core_log_level, "%d: pfq:%d %x@%pK - slot_use->%d",
777 					state->id, port, header->size, header, count - 1);
778 		} else {
779 			vchiq_log_error(vchiq_core_log_level,
780 					"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
781 					port, count, header, msgid, header->msgid, header->size);
782 			WARN(1, "bad slot use count\n");
783 		}
784 	}
785 }
786 
787 /* Called by the recycle thread. */
788 static void
process_free_queue(struct vchiq_state * state,u32 * service_found,size_t length)789 process_free_queue(struct vchiq_state *state, u32 *service_found,
790 		   size_t length)
791 {
792 	struct vchiq_shared_state *local = state->local;
793 	int slot_queue_available;
794 
795 	/*
796 	 * Find slots which have been freed by the other side, and return them
797 	 * to the available queue.
798 	 */
799 	slot_queue_available = state->slot_queue_available;
800 
801 	/*
802 	 * Use a memory barrier to ensure that any state that may have been
803 	 * modified by another thread is not masked by stale prefetched
804 	 * values.
805 	 */
806 	mb();
807 
808 	while (slot_queue_available != local->slot_queue_recycle) {
809 		unsigned int pos;
810 		int slot_index = local->slot_queue[slot_queue_available &
811 			VCHIQ_SLOT_QUEUE_MASK];
812 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
813 		int data_found = 0;
814 
815 		slot_queue_available++;
816 		/*
817 		 * Beware of the address dependency - data is calculated
818 		 * using an index written by the other side.
819 		 */
820 		rmb();
821 
822 		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
823 				state->id, slot_index, data, local->slot_queue_recycle,
824 				slot_queue_available);
825 
826 		/* Initialise the bitmask for services which have used this slot */
827 		memset(service_found, 0, length);
828 
829 		pos = 0;
830 
831 		while (pos < VCHIQ_SLOT_SIZE) {
832 			struct vchiq_header *header =
833 				(struct vchiq_header *)(data + pos);
834 			int msgid = header->msgid;
835 
836 			if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
837 				process_free_data_message(state, service_found,
838 							  header);
839 				data_found = 1;
840 			}
841 
842 			pos += calc_stride(header->size);
843 			if (pos > VCHIQ_SLOT_SIZE) {
844 				vchiq_log_error(vchiq_core_log_level,
845 						"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
846 						pos, header, msgid, header->msgid, header->size);
847 				WARN(1, "invalid slot position\n");
848 			}
849 		}
850 
851 		if (data_found) {
852 			int count;
853 
854 			spin_lock(&quota_spinlock);
855 			count = state->data_use_count;
856 			if (count > 0)
857 				state->data_use_count = count - 1;
858 			spin_unlock(&quota_spinlock);
859 			if (count == state->data_quota)
860 				complete(&state->data_quota_event);
861 		}
862 
863 		/*
864 		 * Don't allow the slot to be reused until we are no
865 		 * longer interested in it.
866 		 */
867 		mb();
868 
869 		state->slot_queue_available = slot_queue_available;
870 		complete(&state->slot_available_event);
871 	}
872 }
873 
874 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)875 memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
876 {
877 	memcpy(dest + offset, context + offset, maxsize);
878 	return maxsize;
879 }
880 
881 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)882 copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
883 					   size_t maxsize),
884 	void *context,
885 	void *dest,
886 	size_t size)
887 {
888 	size_t pos = 0;
889 
890 	while (pos < size) {
891 		ssize_t callback_result;
892 		size_t max_bytes = size - pos;
893 
894 		callback_result = copy_callback(context, dest + pos, pos,
895 						max_bytes);
896 
897 		if (callback_result < 0)
898 			return callback_result;
899 
900 		if (!callback_result)
901 			return -EIO;
902 
903 		if (callback_result > max_bytes)
904 			return -EIO;
905 
906 		pos += callback_result;
907 	}
908 
909 	return size;
910 }
911 
912 /* Called by the slot handler and application threads */
913 static int
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)914 queue_message(struct vchiq_state *state, struct vchiq_service *service,
915 	      int msgid,
916 	      ssize_t (*copy_callback)(void *context, void *dest,
917 				       size_t offset, size_t maxsize),
918 	      void *context, size_t size, int flags)
919 {
920 	struct vchiq_shared_state *local;
921 	struct vchiq_service_quota *quota = NULL;
922 	struct vchiq_header *header;
923 	int type = VCHIQ_MSG_TYPE(msgid);
924 
925 	size_t stride;
926 
927 	local = state->local;
928 
929 	stride = calc_stride(size);
930 
931 	WARN_ON(stride > VCHIQ_SLOT_SIZE);
932 
933 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
934 	    mutex_lock_killable(&state->slot_mutex))
935 		return -EAGAIN;
936 
937 	if (type == VCHIQ_MSG_DATA) {
938 		int tx_end_index;
939 
940 		if (!service) {
941 			WARN(1, "%s: service is NULL\n", __func__);
942 			mutex_unlock(&state->slot_mutex);
943 			return -EINVAL;
944 		}
945 
946 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
947 				 QMFLAGS_NO_MUTEX_UNLOCK));
948 
949 		if (service->closing) {
950 			/* The service has been closed */
951 			mutex_unlock(&state->slot_mutex);
952 			return -EHOSTDOWN;
953 		}
954 
955 		quota = &state->service_quotas[service->localport];
956 
957 		spin_lock(&quota_spinlock);
958 
959 		/*
960 		 * Ensure this service doesn't use more than its quota of
961 		 * messages or slots
962 		 */
963 		tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
964 
965 		/*
966 		 * Ensure data messages don't use more than their quota of
967 		 * slots
968 		 */
969 		while ((tx_end_index != state->previous_data_index) &&
970 		       (state->data_use_count == state->data_quota)) {
971 			VCHIQ_STATS_INC(state, data_stalls);
972 			spin_unlock(&quota_spinlock);
973 			mutex_unlock(&state->slot_mutex);
974 
975 			if (wait_for_completion_interruptible(&state->data_quota_event))
976 				return -EAGAIN;
977 
978 			mutex_lock(&state->slot_mutex);
979 			spin_lock(&quota_spinlock);
980 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
981 			if ((tx_end_index == state->previous_data_index) ||
982 			    (state->data_use_count < state->data_quota)) {
983 				/* Pass the signal on to other waiters */
984 				complete(&state->data_quota_event);
985 				break;
986 			}
987 		}
988 
989 		while ((quota->message_use_count == quota->message_quota) ||
990 		       ((tx_end_index != quota->previous_tx_index) &&
991 			(quota->slot_use_count == quota->slot_quota))) {
992 			spin_unlock(&quota_spinlock);
993 			vchiq_log_trace(vchiq_core_log_level,
994 					"%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
995 					state->id, service->localport, msg_type_str(type), size,
996 					quota->message_use_count, quota->slot_use_count);
997 			VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
998 			mutex_unlock(&state->slot_mutex);
999 			if (wait_for_completion_interruptible(&quota->quota_event))
1000 				return -EAGAIN;
1001 			if (service->closing)
1002 				return -EHOSTDOWN;
1003 			if (mutex_lock_killable(&state->slot_mutex))
1004 				return -EAGAIN;
1005 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
1006 				/* The service has been closed */
1007 				mutex_unlock(&state->slot_mutex);
1008 				return -EHOSTDOWN;
1009 			}
1010 			spin_lock(&quota_spinlock);
1011 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
1012 		}
1013 
1014 		spin_unlock(&quota_spinlock);
1015 	}
1016 
1017 	header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1018 
1019 	if (!header) {
1020 		if (service)
1021 			VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1022 		/*
1023 		 * In the event of a failure, return the mutex to the
1024 		 * state it was in
1025 		 */
1026 		if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1027 			mutex_unlock(&state->slot_mutex);
1028 		return -EAGAIN;
1029 	}
1030 
1031 	if (type == VCHIQ_MSG_DATA) {
1032 		ssize_t callback_result;
1033 		int tx_end_index;
1034 		int slot_use_count;
1035 
1036 		vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1037 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1038 			       VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1039 
1040 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1041 				 QMFLAGS_NO_MUTEX_UNLOCK));
1042 
1043 		callback_result =
1044 			copy_message_data(copy_callback, context,
1045 					  header->data, size);
1046 
1047 		if (callback_result < 0) {
1048 			mutex_unlock(&state->slot_mutex);
1049 			VCHIQ_SERVICE_STATS_INC(service, error_count);
1050 			return -EINVAL;
1051 		}
1052 
1053 		if (SRVTRACE_ENABLED(service,
1054 				     VCHIQ_LOG_INFO))
1055 			vchiq_log_dump_mem("Sent", 0,
1056 					   header->data,
1057 					   min_t(size_t, 16, callback_result));
1058 
1059 		spin_lock(&quota_spinlock);
1060 		quota->message_use_count++;
1061 
1062 		tx_end_index =
1063 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1064 
1065 		/*
1066 		 * If this transmission can't fit in the last slot used by any
1067 		 * service, the data_use_count must be increased.
1068 		 */
1069 		if (tx_end_index != state->previous_data_index) {
1070 			state->previous_data_index = tx_end_index;
1071 			state->data_use_count++;
1072 		}
1073 
1074 		/*
1075 		 * If this isn't the same slot last used by this service,
1076 		 * the service's slot_use_count must be increased.
1077 		 */
1078 		if (tx_end_index != quota->previous_tx_index) {
1079 			quota->previous_tx_index = tx_end_index;
1080 			slot_use_count = ++quota->slot_use_count;
1081 		} else {
1082 			slot_use_count = 0;
1083 		}
1084 
1085 		spin_unlock(&quota_spinlock);
1086 
1087 		if (slot_use_count)
1088 			vchiq_log_trace(vchiq_core_log_level,
1089 					"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
1090 					service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1091 					size, slot_use_count, header);
1092 
1093 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1094 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1095 	} else {
1096 		vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1097 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1098 			       VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1099 		if (size != 0) {
1100 			/*
1101 			 * It is assumed for now that this code path
1102 			 * only happens from calls inside this file.
1103 			 *
1104 			 * External callers are through the vchiq_queue_message
1105 			 * path which always sets the type to be VCHIQ_MSG_DATA
1106 			 *
1107 			 * At first glance this appears to be correct but
1108 			 * more review is needed.
1109 			 */
1110 			copy_message_data(copy_callback, context,
1111 					  header->data, size);
1112 		}
1113 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1114 	}
1115 
1116 	header->msgid = msgid;
1117 	header->size = size;
1118 
1119 	{
1120 		int svc_fourcc;
1121 
1122 		svc_fourcc = service
1123 			? service->base.fourcc
1124 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1125 
1126 		vchiq_log_info(SRVTRACE_LEVEL(service),
1127 			       "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1128 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1129 			       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
1130 			       VCHIQ_MSG_DSTPORT(msgid), size);
1131 	}
1132 
1133 	/* Make sure the new header is visible to the peer. */
1134 	wmb();
1135 
1136 	/* Make the new tx_pos visible to the peer. */
1137 	local->tx_pos = state->local_tx_pos;
1138 	wmb();
1139 
1140 	if (service && (type == VCHIQ_MSG_CLOSE))
1141 		set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1142 
1143 	if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1144 		mutex_unlock(&state->slot_mutex);
1145 
1146 	remote_event_signal(&state->remote->trigger);
1147 
1148 	return 0;
1149 }
1150 
1151 /* Called by the slot handler and application threads */
1152 static int
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size,int is_blocking)1153 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1154 		   int msgid,
1155 		   ssize_t (*copy_callback)(void *context, void *dest,
1156 					    size_t offset, size_t maxsize),
1157 		   void *context, int size, int is_blocking)
1158 {
1159 	struct vchiq_shared_state *local;
1160 	struct vchiq_header *header;
1161 	ssize_t callback_result;
1162 
1163 	local = state->local;
1164 
1165 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1166 	    mutex_lock_killable(&state->sync_mutex))
1167 		return -EAGAIN;
1168 
1169 	remote_event_wait(&state->sync_release_event, &local->sync_release);
1170 
1171 	/* Ensure that reads don't overtake the remote_event_wait. */
1172 	rmb();
1173 
1174 	header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1175 		local->slot_sync);
1176 
1177 	{
1178 		int oldmsgid = header->msgid;
1179 
1180 		if (oldmsgid != VCHIQ_MSGID_PADDING)
1181 			vchiq_log_error(vchiq_core_log_level, "%d: qms - msgid %x, not PADDING",
1182 					state->id, oldmsgid);
1183 	}
1184 
1185 	vchiq_log_info(vchiq_sync_log_level,
1186 		       "%d: qms %s@%pK,%x (%d->%d)", state->id,
1187 		       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1188 		       header, size, VCHIQ_MSG_SRCPORT(msgid),
1189 		       VCHIQ_MSG_DSTPORT(msgid));
1190 
1191 	callback_result =
1192 		copy_message_data(copy_callback, context,
1193 				  header->data, size);
1194 
1195 	if (callback_result < 0) {
1196 		mutex_unlock(&state->slot_mutex);
1197 		VCHIQ_SERVICE_STATS_INC(service, error_count);
1198 		return -EINVAL;
1199 	}
1200 
1201 	if (service) {
1202 		if (SRVTRACE_ENABLED(service,
1203 				     VCHIQ_LOG_INFO))
1204 			vchiq_log_dump_mem("Sent", 0,
1205 					   header->data,
1206 					   min_t(size_t, 16, callback_result));
1207 
1208 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1209 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1210 	} else {
1211 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1212 	}
1213 
1214 	header->size = size;
1215 	header->msgid = msgid;
1216 
1217 	if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1218 		int svc_fourcc;
1219 
1220 		svc_fourcc = service
1221 			? service->base.fourcc
1222 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1223 
1224 		vchiq_log_trace(vchiq_sync_log_level,
1225 				"Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1226 				msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1227 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
1228 				VCHIQ_MSG_DSTPORT(msgid), size);
1229 	}
1230 
1231 	remote_event_signal(&state->remote->sync_trigger);
1232 
1233 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1234 		mutex_unlock(&state->sync_mutex);
1235 
1236 	return 0;
1237 }
1238 
1239 static inline void
claim_slot(struct vchiq_slot_info * slot)1240 claim_slot(struct vchiq_slot_info *slot)
1241 {
1242 	slot->use_count++;
1243 }
1244 
1245 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1246 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1247 	     struct vchiq_header *header, struct vchiq_service *service)
1248 {
1249 	mutex_lock(&state->recycle_mutex);
1250 
1251 	if (header) {
1252 		int msgid = header->msgid;
1253 
1254 		if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
1255 			mutex_unlock(&state->recycle_mutex);
1256 			return;
1257 		}
1258 
1259 		/* Rewrite the message header to prevent a double release */
1260 		header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1261 	}
1262 
1263 	slot_info->release_count++;
1264 
1265 	if (slot_info->release_count == slot_info->use_count) {
1266 		int slot_queue_recycle;
1267 		/* Add to the freed queue */
1268 
1269 		/*
1270 		 * A read barrier is necessary here to prevent speculative
1271 		 * fetches of remote->slot_queue_recycle from overtaking the
1272 		 * mutex.
1273 		 */
1274 		rmb();
1275 
1276 		slot_queue_recycle = state->remote->slot_queue_recycle;
1277 		state->remote->slot_queue[slot_queue_recycle &
1278 			VCHIQ_SLOT_QUEUE_MASK] =
1279 			SLOT_INDEX_FROM_INFO(state, slot_info);
1280 		state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1281 		vchiq_log_info(vchiq_core_log_level, "%d: %s %d - recycle->%x", state->id, __func__,
1282 			       SLOT_INDEX_FROM_INFO(state, slot_info),
1283 			       state->remote->slot_queue_recycle);
1284 
1285 		/*
1286 		 * A write barrier is necessary, but remote_event_signal
1287 		 * contains one.
1288 		 */
1289 		remote_event_signal(&state->remote->recycle);
1290 	}
1291 
1292 	mutex_unlock(&state->recycle_mutex);
1293 }
1294 
1295 static inline enum vchiq_reason
get_bulk_reason(struct vchiq_bulk * bulk)1296 get_bulk_reason(struct vchiq_bulk *bulk)
1297 {
1298 	if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1299 		if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1300 			return VCHIQ_BULK_TRANSMIT_ABORTED;
1301 
1302 		return VCHIQ_BULK_TRANSMIT_DONE;
1303 	}
1304 
1305 	if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1306 		return VCHIQ_BULK_RECEIVE_ABORTED;
1307 
1308 	return VCHIQ_BULK_RECEIVE_DONE;
1309 }
1310 
1311 /* Called by the slot handler - don't hold the bulk mutex */
1312 static int
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1313 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1314 	     int retry_poll)
1315 {
1316 	int status = 0;
1317 
1318 	vchiq_log_trace(vchiq_core_log_level, "%d: nb:%d %cx - p=%x rn=%x r=%x", service->state->id,
1319 			service->localport, (queue == &service->bulk_tx) ? 't' : 'r',
1320 			queue->process, queue->remote_notify, queue->remove);
1321 
1322 	queue->remote_notify = queue->process;
1323 
1324 	while (queue->remove != queue->remote_notify) {
1325 		struct vchiq_bulk *bulk =
1326 			&queue->bulks[BULK_INDEX(queue->remove)];
1327 
1328 		/*
1329 		 * Only generate callbacks for non-dummy bulk
1330 		 * requests, and non-terminated services
1331 		 */
1332 		if (bulk->data && service->instance) {
1333 			if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1334 				if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1335 					VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
1336 					VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
1337 								bulk->actual);
1338 				} else {
1339 					VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
1340 					VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
1341 								bulk->actual);
1342 				}
1343 			} else {
1344 				VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
1345 			}
1346 			if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1347 				struct bulk_waiter *waiter;
1348 
1349 				spin_lock(&bulk_waiter_spinlock);
1350 				waiter = bulk->userdata;
1351 				if (waiter) {
1352 					waiter->actual = bulk->actual;
1353 					complete(&waiter->event);
1354 				}
1355 				spin_unlock(&bulk_waiter_spinlock);
1356 			} else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1357 				enum vchiq_reason reason =
1358 						get_bulk_reason(bulk);
1359 				status = make_service_callback(service, reason,	NULL,
1360 							       bulk->userdata);
1361 				if (status == -EAGAIN)
1362 					break;
1363 			}
1364 		}
1365 
1366 		queue->remove++;
1367 		complete(&service->bulk_remove_event);
1368 	}
1369 	if (!retry_poll)
1370 		status = 0;
1371 
1372 	if (status == -EAGAIN)
1373 		request_poll(service->state, service, (queue == &service->bulk_tx) ?
1374 			     VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1375 
1376 	return status;
1377 }
1378 
1379 static void
poll_services_of_group(struct vchiq_state * state,int group)1380 poll_services_of_group(struct vchiq_state *state, int group)
1381 {
1382 	u32 flags = atomic_xchg(&state->poll_services[group], 0);
1383 	int i;
1384 
1385 	for (i = 0; flags; i++) {
1386 		struct vchiq_service *service;
1387 		u32 service_flags;
1388 
1389 		if ((flags & BIT(i)) == 0)
1390 			continue;
1391 
1392 		service = find_service_by_port(state, (group << 5) + i);
1393 		flags &= ~BIT(i);
1394 
1395 		if (!service)
1396 			continue;
1397 
1398 		service_flags = atomic_xchg(&service->poll_flags, 0);
1399 		if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1400 			vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
1401 				       state->id, service->localport,
1402 				       service->remoteport);
1403 
1404 			/*
1405 			 * Make it look like a client, because
1406 			 * it must be removed and not left in
1407 			 * the LISTENING state.
1408 			 */
1409 			service->public_fourcc = VCHIQ_FOURCC_INVALID;
1410 
1411 			if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
1412 				request_poll(state, service, VCHIQ_POLL_REMOVE);
1413 		} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1414 			vchiq_log_info(vchiq_core_log_level, "%d: ps - terminate %d<->%d",
1415 				       state->id, service->localport, service->remoteport);
1416 			if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
1417 				request_poll(state, service, VCHIQ_POLL_TERMINATE);
1418 		}
1419 		if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1420 			notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1421 		if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1422 			notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1423 		vchiq_service_put(service);
1424 	}
1425 }
1426 
1427 /* Called by the slot handler thread */
1428 static void
poll_services(struct vchiq_state * state)1429 poll_services(struct vchiq_state *state)
1430 {
1431 	int group;
1432 
1433 	for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1434 		poll_services_of_group(state, group);
1435 }
1436 
1437 /* Called with the bulk_mutex held */
1438 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1439 abort_outstanding_bulks(struct vchiq_service *service,
1440 			struct vchiq_bulk_queue *queue)
1441 {
1442 	int is_tx = (queue == &service->bulk_tx);
1443 
1444 	vchiq_log_trace(vchiq_core_log_level, "%d: aob:%d %cx - li=%x ri=%x p=%x",
1445 			service->state->id, service->localport, is_tx ? 't' : 'r',
1446 			queue->local_insert, queue->remote_insert, queue->process);
1447 
1448 	WARN_ON((int)(queue->local_insert - queue->process) < 0);
1449 	WARN_ON((int)(queue->remote_insert - queue->process) < 0);
1450 
1451 	while ((queue->process != queue->local_insert) ||
1452 	       (queue->process != queue->remote_insert)) {
1453 		struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
1454 
1455 		if (queue->process == queue->remote_insert) {
1456 			/* fabricate a matching dummy bulk */
1457 			bulk->remote_data = NULL;
1458 			bulk->remote_size = 0;
1459 			queue->remote_insert++;
1460 		}
1461 
1462 		if (queue->process != queue->local_insert) {
1463 			vchiq_complete_bulk(service->instance, bulk);
1464 
1465 			vchiq_log_info(SRVTRACE_LEVEL(service),
1466 				       "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1467 				       is_tx ? "Send Bulk to" : "Recv Bulk from",
1468 				       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1469 				       service->remoteport, bulk->size, bulk->remote_size);
1470 		} else {
1471 			/* fabricate a matching dummy bulk */
1472 			bulk->data = 0;
1473 			bulk->size = 0;
1474 			bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1475 			bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1476 				VCHIQ_BULK_RECEIVE;
1477 			queue->local_insert++;
1478 		}
1479 
1480 		queue->process++;
1481 	}
1482 }
1483 
1484 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1485 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1486 {
1487 	const struct vchiq_open_payload *payload;
1488 	struct vchiq_service *service = NULL;
1489 	int msgid, size;
1490 	unsigned int localport, remoteport, fourcc;
1491 	short version, version_min;
1492 
1493 	msgid = header->msgid;
1494 	size = header->size;
1495 	localport = VCHIQ_MSG_DSTPORT(msgid);
1496 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1497 	if (size < sizeof(struct vchiq_open_payload))
1498 		goto fail_open;
1499 
1500 	payload = (struct vchiq_open_payload *)header->data;
1501 	fourcc = payload->fourcc;
1502 	vchiq_log_info(vchiq_core_log_level, "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1503 		       state->id, header, localport, VCHIQ_FOURCC_AS_4CHARS(fourcc));
1504 
1505 	service = get_listening_service(state, fourcc);
1506 	if (!service)
1507 		goto fail_open;
1508 
1509 	/* A matching service exists */
1510 	version = payload->version;
1511 	version_min = payload->version_min;
1512 
1513 	if ((service->version < version_min) || (version < service->version_min)) {
1514 		/* Version mismatch */
1515 		vchiq_loud_error_header();
1516 		vchiq_loud_error("%d: service %d (%c%c%c%c) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
1517 				 state->id, service->localport, VCHIQ_FOURCC_AS_4CHARS(fourcc),
1518 				 service->version, service->version_min, version, version_min);
1519 		vchiq_loud_error_footer();
1520 		vchiq_service_put(service);
1521 		service = NULL;
1522 		goto fail_open;
1523 	}
1524 	service->peer_version = version;
1525 
1526 	if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1527 		struct vchiq_openack_payload ack_payload = {
1528 			service->version
1529 		};
1530 		int openack_id = MAKE_OPENACK(service->localport, remoteport);
1531 
1532 		if (state->version_common <
1533 		    VCHIQ_VERSION_SYNCHRONOUS_MODE)
1534 			service->sync = 0;
1535 
1536 		/* Acknowledge the OPEN */
1537 		if (service->sync) {
1538 			if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
1539 					       &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
1540 				goto bail_not_ready;
1541 
1542 			/* The service is now open */
1543 			set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
1544 		} else {
1545 			if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
1546 					  &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
1547 				goto bail_not_ready;
1548 
1549 			/* The service is now open */
1550 			set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1551 		}
1552 	}
1553 
1554 	/* Success - the message has been dealt with */
1555 	vchiq_service_put(service);
1556 	return 1;
1557 
1558 fail_open:
1559 	/* No available service, or an invalid request - send a CLOSE */
1560 	if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
1561 			  NULL, NULL, 0, 0) == -EAGAIN)
1562 		goto bail_not_ready;
1563 
1564 	return 1;
1565 
1566 bail_not_ready:
1567 	if (service)
1568 		vchiq_service_put(service);
1569 
1570 	return 0;
1571 }
1572 
1573 /**
1574  * parse_message() - parses a single message from the rx slot
1575  * @state:  vchiq state struct
1576  * @header: message header
1577  *
1578  * Context: Process context
1579  *
1580  * Return:
1581  * * >= 0     - size of the parsed message payload (without header)
1582  * * -EINVAL  - fatal error occurred, bail out is required
1583  */
1584 static int
parse_message(struct vchiq_state * state,struct vchiq_header * header)1585 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1586 {
1587 	struct vchiq_service *service = NULL;
1588 	unsigned int localport, remoteport;
1589 	int msgid, size, type, ret = -EINVAL;
1590 
1591 	DEBUG_INITIALISE(state->local);
1592 
1593 	DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1594 	msgid = header->msgid;
1595 	DEBUG_VALUE(PARSE_MSGID, msgid);
1596 	size = header->size;
1597 	type = VCHIQ_MSG_TYPE(msgid);
1598 	localport = VCHIQ_MSG_DSTPORT(msgid);
1599 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1600 
1601 	if (type != VCHIQ_MSG_DATA)
1602 		VCHIQ_STATS_INC(state, ctrl_rx_count);
1603 
1604 	switch (type) {
1605 	case VCHIQ_MSG_OPENACK:
1606 	case VCHIQ_MSG_CLOSE:
1607 	case VCHIQ_MSG_DATA:
1608 	case VCHIQ_MSG_BULK_RX:
1609 	case VCHIQ_MSG_BULK_TX:
1610 	case VCHIQ_MSG_BULK_RX_DONE:
1611 	case VCHIQ_MSG_BULK_TX_DONE:
1612 		service = find_service_by_port(state, localport);
1613 		if ((!service ||
1614 		     ((service->remoteport != remoteport) &&
1615 		      (service->remoteport != VCHIQ_PORT_FREE))) &&
1616 		    (localport == 0) &&
1617 		    (type == VCHIQ_MSG_CLOSE)) {
1618 			/*
1619 			 * This could be a CLOSE from a client which
1620 			 * hadn't yet received the OPENACK - look for
1621 			 * the connected service
1622 			 */
1623 			if (service)
1624 				vchiq_service_put(service);
1625 			service = get_connected_service(state, remoteport);
1626 			if (service)
1627 				vchiq_log_warning(vchiq_core_log_level,
1628 						  "%d: prs %s@%pK (%d->%d) - found connected service %d",
1629 						  state->id, msg_type_str(type), header,
1630 						  remoteport, localport, service->localport);
1631 		}
1632 
1633 		if (!service) {
1634 			vchiq_log_error(vchiq_core_log_level,
1635 					"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1636 					state->id, msg_type_str(type), header, remoteport,
1637 					localport, localport);
1638 			goto skip_message;
1639 		}
1640 		break;
1641 	default:
1642 		break;
1643 	}
1644 
1645 	if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1646 		int svc_fourcc;
1647 
1648 		svc_fourcc = service
1649 			? service->base.fourcc
1650 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1651 		vchiq_log_info(SRVTRACE_LEVEL(service),
1652 			       "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1653 			       msg_type_str(type), type, VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1654 			       remoteport, localport, size);
1655 		if (size > 0)
1656 			vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
1657 	}
1658 
1659 	if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1660 	    calc_stride(size) > VCHIQ_SLOT_SIZE) {
1661 		vchiq_log_error(vchiq_core_log_level,
1662 				"header %pK (msgid %x) - size %x too big for slot",
1663 				header, (unsigned int)msgid, (unsigned int)size);
1664 		WARN(1, "oversized for slot\n");
1665 	}
1666 
1667 	switch (type) {
1668 	case VCHIQ_MSG_OPEN:
1669 		WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
1670 		if (!parse_open(state, header))
1671 			goto bail_not_ready;
1672 		break;
1673 	case VCHIQ_MSG_OPENACK:
1674 		if (size >= sizeof(struct vchiq_openack_payload)) {
1675 			const struct vchiq_openack_payload *payload =
1676 				(struct vchiq_openack_payload *)
1677 				header->data;
1678 			service->peer_version = payload->version;
1679 		}
1680 		vchiq_log_info(vchiq_core_log_level, "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1681 			       state->id, header, size, remoteport, localport,
1682 			       service->peer_version);
1683 		if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1684 			service->remoteport = remoteport;
1685 			set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1686 			complete(&service->remove_event);
1687 		} else {
1688 			vchiq_log_error(vchiq_core_log_level, "OPENACK received in state %s",
1689 					srvstate_names[service->srvstate]);
1690 		}
1691 		break;
1692 	case VCHIQ_MSG_CLOSE:
1693 		WARN_ON(size); /* There should be no data */
1694 
1695 		vchiq_log_info(vchiq_core_log_level, "%d: prs CLOSE@%pK (%d->%d)",
1696 			       state->id, header, remoteport, localport);
1697 
1698 		mark_service_closing_internal(service, 1);
1699 
1700 		if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
1701 			goto bail_not_ready;
1702 
1703 		vchiq_log_info(vchiq_core_log_level, "Close Service %c%c%c%c s:%u d:%d",
1704 			       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1705 			       service->localport, service->remoteport);
1706 		break;
1707 	case VCHIQ_MSG_DATA:
1708 		vchiq_log_info(vchiq_core_log_level, "%d: prs DATA@%pK,%x (%d->%d)",
1709 			       state->id, header, size, remoteport, localport);
1710 
1711 		if ((service->remoteport == remoteport) &&
1712 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1713 			header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1714 			claim_slot(state->rx_info);
1715 			DEBUG_TRACE(PARSE_LINE);
1716 			if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
1717 						  NULL) == -EAGAIN) {
1718 				DEBUG_TRACE(PARSE_LINE);
1719 				goto bail_not_ready;
1720 			}
1721 			VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1722 			VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
1723 		} else {
1724 			VCHIQ_STATS_INC(state, error_count);
1725 		}
1726 		break;
1727 	case VCHIQ_MSG_CONNECT:
1728 		vchiq_log_info(vchiq_core_log_level, "%d: prs CONNECT@%pK", state->id, header);
1729 		state->version_common =	((struct vchiq_slot_zero *)
1730 					 state->slot_data)->version;
1731 		complete(&state->connect);
1732 		break;
1733 	case VCHIQ_MSG_BULK_RX:
1734 	case VCHIQ_MSG_BULK_TX:
1735 		/*
1736 		 * We should never receive a bulk request from the
1737 		 * other side since we're not setup to perform as the
1738 		 * master.
1739 		 */
1740 		WARN_ON(1);
1741 		break;
1742 	case VCHIQ_MSG_BULK_RX_DONE:
1743 	case VCHIQ_MSG_BULK_TX_DONE:
1744 		if ((service->remoteport == remoteport) &&
1745 		    (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1746 			struct vchiq_bulk_queue *queue;
1747 			struct vchiq_bulk *bulk;
1748 
1749 			queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1750 				&service->bulk_rx : &service->bulk_tx;
1751 
1752 			DEBUG_TRACE(PARSE_LINE);
1753 			if (mutex_lock_killable(&service->bulk_mutex)) {
1754 				DEBUG_TRACE(PARSE_LINE);
1755 				goto bail_not_ready;
1756 			}
1757 			if ((int)(queue->remote_insert -
1758 				queue->local_insert) >= 0) {
1759 				vchiq_log_error(vchiq_core_log_level,
1760 						"%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1761 						state->id, msg_type_str(type), header, remoteport,
1762 						localport, queue->remote_insert,
1763 						queue->local_insert);
1764 				mutex_unlock(&service->bulk_mutex);
1765 				break;
1766 			}
1767 			if (queue->process != queue->remote_insert) {
1768 				pr_err("%s: p %x != ri %x\n",
1769 				       __func__,
1770 				       queue->process,
1771 				       queue->remote_insert);
1772 				mutex_unlock(&service->bulk_mutex);
1773 				goto bail_not_ready;
1774 			}
1775 
1776 			bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
1777 			bulk->actual = *(int *)header->data;
1778 			queue->remote_insert++;
1779 
1780 			vchiq_log_info(vchiq_core_log_level, "%d: prs %s@%pK (%d->%d) %x@%pad",
1781 				       state->id, msg_type_str(type), header, remoteport, localport,
1782 				       bulk->actual, &bulk->data);
1783 
1784 			vchiq_log_trace(vchiq_core_log_level, "%d: prs:%d %cx li=%x ri=%x p=%x",
1785 					state->id, localport,
1786 					(type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
1787 					queue->local_insert, queue->remote_insert, queue->process);
1788 
1789 			DEBUG_TRACE(PARSE_LINE);
1790 			WARN_ON(queue->process == queue->local_insert);
1791 			vchiq_complete_bulk(service->instance, bulk);
1792 			queue->process++;
1793 			mutex_unlock(&service->bulk_mutex);
1794 			DEBUG_TRACE(PARSE_LINE);
1795 			notify_bulks(service, queue, RETRY_POLL);
1796 			DEBUG_TRACE(PARSE_LINE);
1797 		}
1798 		break;
1799 	case VCHIQ_MSG_PADDING:
1800 		vchiq_log_trace(vchiq_core_log_level, "%d: prs PADDING@%pK,%x",
1801 				state->id, header, size);
1802 		break;
1803 	case VCHIQ_MSG_PAUSE:
1804 		/* If initiated, signal the application thread */
1805 		vchiq_log_trace(vchiq_core_log_level, "%d: prs PAUSE@%pK,%x",
1806 				state->id, header, size);
1807 		if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1808 			vchiq_log_error(vchiq_core_log_level, "%d: PAUSE received in state PAUSED",
1809 					state->id);
1810 			break;
1811 		}
1812 		if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1813 			/* Send a PAUSE in response */
1814 			if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1815 					  QMFLAGS_NO_MUTEX_UNLOCK) == -EAGAIN)
1816 				goto bail_not_ready;
1817 		}
1818 		/* At this point slot_mutex is held */
1819 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1820 		break;
1821 	case VCHIQ_MSG_RESUME:
1822 		vchiq_log_trace(vchiq_core_log_level, "%d: prs RESUME@%pK,%x",
1823 				state->id, header, size);
1824 		/* Release the slot mutex */
1825 		mutex_unlock(&state->slot_mutex);
1826 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1827 		break;
1828 
1829 	case VCHIQ_MSG_REMOTE_USE:
1830 		vchiq_on_remote_use(state);
1831 		break;
1832 	case VCHIQ_MSG_REMOTE_RELEASE:
1833 		vchiq_on_remote_release(state);
1834 		break;
1835 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1836 		break;
1837 
1838 	default:
1839 		vchiq_log_error(vchiq_core_log_level, "%d: prs invalid msgid %x@%pK,%x",
1840 				state->id, msgid, header, size);
1841 		WARN(1, "invalid message\n");
1842 		break;
1843 	}
1844 
1845 skip_message:
1846 	ret = size;
1847 
1848 bail_not_ready:
1849 	if (service)
1850 		vchiq_service_put(service);
1851 
1852 	return ret;
1853 }
1854 
1855 /* Called by the slot handler thread */
1856 static void
parse_rx_slots(struct vchiq_state * state)1857 parse_rx_slots(struct vchiq_state *state)
1858 {
1859 	struct vchiq_shared_state *remote = state->remote;
1860 	int tx_pos;
1861 
1862 	DEBUG_INITIALISE(state->local);
1863 
1864 	tx_pos = remote->tx_pos;
1865 
1866 	while (state->rx_pos != tx_pos) {
1867 		struct vchiq_header *header;
1868 		int size;
1869 
1870 		DEBUG_TRACE(PARSE_LINE);
1871 		if (!state->rx_data) {
1872 			int rx_index;
1873 
1874 			WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
1875 			rx_index = remote->slot_queue[
1876 				SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1877 			state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1878 				rx_index);
1879 			state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1880 
1881 			/*
1882 			 * Initialise use_count to one, and increment
1883 			 * release_count at the end of the slot to avoid
1884 			 * releasing the slot prematurely.
1885 			 */
1886 			state->rx_info->use_count = 1;
1887 			state->rx_info->release_count = 0;
1888 		}
1889 
1890 		header = (struct vchiq_header *)(state->rx_data +
1891 			(state->rx_pos & VCHIQ_SLOT_MASK));
1892 		size = parse_message(state, header);
1893 		if (size < 0)
1894 			return;
1895 
1896 		state->rx_pos += calc_stride(size);
1897 
1898 		DEBUG_TRACE(PARSE_LINE);
1899 		/*
1900 		 * Perform some housekeeping when the end of the slot is
1901 		 * reached.
1902 		 */
1903 		if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1904 			/* Remove the extra reference count. */
1905 			release_slot(state, state->rx_info, NULL, NULL);
1906 			state->rx_data = NULL;
1907 		}
1908 	}
1909 }
1910 
1911 /**
1912  * handle_poll() - handle service polling and other rare conditions
1913  * @state:  vchiq state struct
1914  *
1915  * Context: Process context
1916  *
1917  * Return:
1918  * * 0        - poll handled successful
1919  * * -EAGAIN  - retry later
1920  */
1921 static int
handle_poll(struct vchiq_state * state)1922 handle_poll(struct vchiq_state *state)
1923 {
1924 	switch (state->conn_state) {
1925 	case VCHIQ_CONNSTATE_CONNECTED:
1926 		/* Poll the services as requested */
1927 		poll_services(state);
1928 		break;
1929 
1930 	case VCHIQ_CONNSTATE_PAUSING:
1931 		if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1932 				  QMFLAGS_NO_MUTEX_UNLOCK) != -EAGAIN) {
1933 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
1934 		} else {
1935 			/* Retry later */
1936 			return -EAGAIN;
1937 		}
1938 		break;
1939 
1940 	case VCHIQ_CONNSTATE_RESUMING:
1941 		if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
1942 				  QMFLAGS_NO_MUTEX_LOCK) != -EAGAIN) {
1943 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1944 		} else {
1945 			/*
1946 			 * This should really be impossible,
1947 			 * since the PAUSE should have flushed
1948 			 * through outstanding messages.
1949 			 */
1950 			vchiq_log_error(vchiq_core_log_level, "Failed to send RESUME message");
1951 		}
1952 		break;
1953 	default:
1954 		break;
1955 	}
1956 
1957 	return 0;
1958 }
1959 
1960 /* Called by the slot handler thread */
1961 static int
slot_handler_func(void * v)1962 slot_handler_func(void *v)
1963 {
1964 	struct vchiq_state *state = v;
1965 	struct vchiq_shared_state *local = state->local;
1966 
1967 	DEBUG_INITIALISE(local);
1968 
1969 	while (1) {
1970 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
1971 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1972 		remote_event_wait(&state->trigger_event, &local->trigger);
1973 
1974 		/* Ensure that reads don't overtake the remote_event_wait. */
1975 		rmb();
1976 
1977 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1978 		if (state->poll_needed) {
1979 			state->poll_needed = 0;
1980 
1981 			/*
1982 			 * Handle service polling and other rare conditions here
1983 			 * out of the mainline code
1984 			 */
1985 			if (handle_poll(state) == -EAGAIN)
1986 				state->poll_needed = 1;
1987 		}
1988 
1989 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1990 		parse_rx_slots(state);
1991 	}
1992 	return 0;
1993 }
1994 
1995 /* Called by the recycle thread */
1996 static int
recycle_func(void * v)1997 recycle_func(void *v)
1998 {
1999 	struct vchiq_state *state = v;
2000 	struct vchiq_shared_state *local = state->local;
2001 	u32 *found;
2002 	size_t length;
2003 
2004 	length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
2005 
2006 	found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
2007 			      GFP_KERNEL);
2008 	if (!found)
2009 		return -ENOMEM;
2010 
2011 	while (1) {
2012 		remote_event_wait(&state->recycle_event, &local->recycle);
2013 
2014 		process_free_queue(state, found, length);
2015 	}
2016 	return 0;
2017 }
2018 
2019 /* Called by the sync thread */
2020 static int
sync_func(void * v)2021 sync_func(void *v)
2022 {
2023 	struct vchiq_state *state = v;
2024 	struct vchiq_shared_state *local = state->local;
2025 	struct vchiq_header *header =
2026 		(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2027 			state->remote->slot_sync);
2028 
2029 	while (1) {
2030 		struct vchiq_service *service;
2031 		int msgid, size;
2032 		int type;
2033 		unsigned int localport, remoteport;
2034 
2035 		remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2036 
2037 		/* Ensure that reads don't overtake the remote_event_wait. */
2038 		rmb();
2039 
2040 		msgid = header->msgid;
2041 		size = header->size;
2042 		type = VCHIQ_MSG_TYPE(msgid);
2043 		localport = VCHIQ_MSG_DSTPORT(msgid);
2044 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
2045 
2046 		service = find_service_by_port(state, localport);
2047 
2048 		if (!service) {
2049 			vchiq_log_error(vchiq_sync_log_level,
2050 					"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2051 					state->id, msg_type_str(type), header,
2052 					remoteport, localport, localport);
2053 			release_message_sync(state, header);
2054 			continue;
2055 		}
2056 
2057 		if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2058 			int svc_fourcc;
2059 
2060 			svc_fourcc = service
2061 				? service->base.fourcc
2062 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2063 			vchiq_log_trace(vchiq_sync_log_level,
2064 					"Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2065 					msg_type_str(type), VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2066 					remoteport, localport, size);
2067 			if (size > 0)
2068 				vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
2069 		}
2070 
2071 		switch (type) {
2072 		case VCHIQ_MSG_OPENACK:
2073 			if (size >= sizeof(struct vchiq_openack_payload)) {
2074 				const struct vchiq_openack_payload *payload =
2075 					(struct vchiq_openack_payload *)
2076 					header->data;
2077 				service->peer_version = payload->version;
2078 			}
2079 			vchiq_log_info(vchiq_sync_log_level, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2080 				       state->id, header, size, remoteport, localport,
2081 				       service->peer_version);
2082 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2083 				service->remoteport = remoteport;
2084 				set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
2085 				service->sync = 1;
2086 				complete(&service->remove_event);
2087 			}
2088 			release_message_sync(state, header);
2089 			break;
2090 
2091 		case VCHIQ_MSG_DATA:
2092 			vchiq_log_trace(vchiq_sync_log_level, "%d: sf DATA@%pK,%x (%d->%d)",
2093 					state->id, header, size, remoteport, localport);
2094 
2095 			if ((service->remoteport == remoteport) &&
2096 			    (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2097 				if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
2098 							  NULL) == -EAGAIN)
2099 					vchiq_log_error(vchiq_sync_log_level,
2100 							"synchronous callback to service %d returns -EAGAIN",
2101 							localport);
2102 			}
2103 			break;
2104 
2105 		default:
2106 			vchiq_log_error(vchiq_sync_log_level, "%d: sf unexpected msgid %x@%pK,%x",
2107 					state->id, msgid, header, size);
2108 			release_message_sync(state, header);
2109 			break;
2110 		}
2111 
2112 		vchiq_service_put(service);
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2119 get_conn_state_name(enum vchiq_connstate conn_state)
2120 {
2121 	return conn_state_names[conn_state];
2122 }
2123 
2124 struct vchiq_slot_zero *
vchiq_init_slots(void * mem_base,int mem_size)2125 vchiq_init_slots(void *mem_base, int mem_size)
2126 {
2127 	int mem_align =
2128 		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2129 	struct vchiq_slot_zero *slot_zero =
2130 		(struct vchiq_slot_zero *)(mem_base + mem_align);
2131 	int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2132 	int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2133 
2134 	check_sizes();
2135 
2136 	/* Ensure there is enough memory to run an absolutely minimum system */
2137 	num_slots -= first_data_slot;
2138 
2139 	if (num_slots < 4) {
2140 		vchiq_log_error(vchiq_core_log_level, "%s - insufficient memory %x bytes",
2141 				__func__, mem_size);
2142 		return NULL;
2143 	}
2144 
2145 	memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2146 
2147 	slot_zero->magic = VCHIQ_MAGIC;
2148 	slot_zero->version = VCHIQ_VERSION;
2149 	slot_zero->version_min = VCHIQ_VERSION_MIN;
2150 	slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2151 	slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2152 	slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2153 	slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2154 
2155 	slot_zero->master.slot_sync = first_data_slot;
2156 	slot_zero->master.slot_first = first_data_slot + 1;
2157 	slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2158 	slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2159 	slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2160 	slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2161 
2162 	return slot_zero;
2163 }
2164 
2165 int
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero,struct device * dev)2166 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev)
2167 {
2168 	struct vchiq_shared_state *local;
2169 	struct vchiq_shared_state *remote;
2170 	char threadname[16];
2171 	int i, ret;
2172 
2173 	local = &slot_zero->slave;
2174 	remote = &slot_zero->master;
2175 
2176 	if (local->initialised) {
2177 		vchiq_loud_error_header();
2178 		if (remote->initialised)
2179 			vchiq_loud_error("local state has already been initialised");
2180 		else
2181 			vchiq_loud_error("master/slave mismatch two slaves");
2182 		vchiq_loud_error_footer();
2183 		return -EINVAL;
2184 	}
2185 
2186 	memset(state, 0, sizeof(struct vchiq_state));
2187 
2188 	state->dev = dev;
2189 
2190 	/*
2191 	 * initialize shared state pointers
2192 	 */
2193 
2194 	state->local = local;
2195 	state->remote = remote;
2196 	state->slot_data = (struct vchiq_slot *)slot_zero;
2197 
2198 	/*
2199 	 * initialize events and mutexes
2200 	 */
2201 
2202 	init_completion(&state->connect);
2203 	mutex_init(&state->mutex);
2204 	mutex_init(&state->slot_mutex);
2205 	mutex_init(&state->recycle_mutex);
2206 	mutex_init(&state->sync_mutex);
2207 	mutex_init(&state->bulk_transfer_mutex);
2208 
2209 	init_completion(&state->slot_available_event);
2210 	init_completion(&state->slot_remove_event);
2211 	init_completion(&state->data_quota_event);
2212 
2213 	state->slot_queue_available = 0;
2214 
2215 	for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2216 		struct vchiq_service_quota *quota = &state->service_quotas[i];
2217 		init_completion(&quota->quota_event);
2218 	}
2219 
2220 	for (i = local->slot_first; i <= local->slot_last; i++) {
2221 		local->slot_queue[state->slot_queue_available] = i;
2222 		state->slot_queue_available++;
2223 		complete(&state->slot_available_event);
2224 	}
2225 
2226 	state->default_slot_quota = state->slot_queue_available / 2;
2227 	state->default_message_quota =
2228 		min_t(unsigned short, state->default_slot_quota * 256, ~0);
2229 
2230 	state->previous_data_index = -1;
2231 	state->data_use_count = 0;
2232 	state->data_quota = state->slot_queue_available - 1;
2233 
2234 	remote_event_create(&state->trigger_event, &local->trigger);
2235 	local->tx_pos = 0;
2236 	remote_event_create(&state->recycle_event, &local->recycle);
2237 	local->slot_queue_recycle = state->slot_queue_available;
2238 	remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2239 	remote_event_create(&state->sync_release_event, &local->sync_release);
2240 
2241 	/* At start-of-day, the slot is empty and available */
2242 	((struct vchiq_header *)
2243 		SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2244 							VCHIQ_MSGID_PADDING;
2245 	remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2246 
2247 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2248 
2249 	ret = vchiq_platform_init_state(state);
2250 	if (ret)
2251 		return ret;
2252 
2253 	/*
2254 	 * bring up slot handler thread
2255 	 */
2256 	snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2257 	state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
2258 
2259 	if (IS_ERR(state->slot_handler_thread)) {
2260 		vchiq_loud_error_header();
2261 		vchiq_loud_error("couldn't create thread %s", threadname);
2262 		vchiq_loud_error_footer();
2263 		return PTR_ERR(state->slot_handler_thread);
2264 	}
2265 	set_user_nice(state->slot_handler_thread, -19);
2266 
2267 	snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2268 	state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
2269 	if (IS_ERR(state->recycle_thread)) {
2270 		vchiq_loud_error_header();
2271 		vchiq_loud_error("couldn't create thread %s", threadname);
2272 		vchiq_loud_error_footer();
2273 		ret = PTR_ERR(state->recycle_thread);
2274 		goto fail_free_handler_thread;
2275 	}
2276 	set_user_nice(state->recycle_thread, -19);
2277 
2278 	snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2279 	state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
2280 	if (IS_ERR(state->sync_thread)) {
2281 		vchiq_loud_error_header();
2282 		vchiq_loud_error("couldn't create thread %s", threadname);
2283 		vchiq_loud_error_footer();
2284 		ret = PTR_ERR(state->sync_thread);
2285 		goto fail_free_recycle_thread;
2286 	}
2287 	set_user_nice(state->sync_thread, -20);
2288 
2289 	wake_up_process(state->slot_handler_thread);
2290 	wake_up_process(state->recycle_thread);
2291 	wake_up_process(state->sync_thread);
2292 
2293 	/* Indicate readiness to the other side */
2294 	local->initialised = 1;
2295 
2296 	return 0;
2297 
2298 fail_free_recycle_thread:
2299 	kthread_stop(state->recycle_thread);
2300 fail_free_handler_thread:
2301 	kthread_stop(state->slot_handler_thread);
2302 
2303 	return ret;
2304 }
2305 
vchiq_msg_queue_push(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)2306 void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
2307 			  struct vchiq_header *header)
2308 {
2309 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2310 	int pos;
2311 
2312 	if (!service)
2313 		return;
2314 
2315 	while (service->msg_queue_write == service->msg_queue_read +
2316 		VCHIQ_MAX_SLOTS) {
2317 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
2318 			flush_signals(current);
2319 	}
2320 
2321 	pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2322 	service->msg_queue_write++;
2323 	service->msg_queue[pos] = header;
2324 
2325 	complete(&service->msg_queue_push);
2326 }
2327 EXPORT_SYMBOL(vchiq_msg_queue_push);
2328 
vchiq_msg_hold(struct vchiq_instance * instance,unsigned int handle)2329 struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle)
2330 {
2331 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2332 	struct vchiq_header *header;
2333 	int pos;
2334 
2335 	if (!service)
2336 		return NULL;
2337 
2338 	if (service->msg_queue_write == service->msg_queue_read)
2339 		return NULL;
2340 
2341 	while (service->msg_queue_write == service->msg_queue_read) {
2342 		if (wait_for_completion_interruptible(&service->msg_queue_push))
2343 			flush_signals(current);
2344 	}
2345 
2346 	pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2347 	service->msg_queue_read++;
2348 	header = service->msg_queue[pos];
2349 
2350 	complete(&service->msg_queue_pop);
2351 
2352 	return header;
2353 }
2354 EXPORT_SYMBOL(vchiq_msg_hold);
2355 
vchiq_validate_params(const struct vchiq_service_params_kernel * params)2356 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2357 {
2358 	if (!params->callback || !params->fourcc) {
2359 		vchiq_loud_error("Can't add service, invalid params\n");
2360 		return -EINVAL;
2361 	}
2362 
2363 	return 0;
2364 }
2365 
2366 /* Called from application thread when a client or server service is created. */
2367 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,void (* userdata_term)(void * userdata))2368 vchiq_add_service_internal(struct vchiq_state *state,
2369 			   const struct vchiq_service_params_kernel *params,
2370 			   int srvstate, struct vchiq_instance *instance,
2371 			   void (*userdata_term)(void *userdata))
2372 {
2373 	struct vchiq_service *service;
2374 	struct vchiq_service __rcu **pservice = NULL;
2375 	struct vchiq_service_quota *quota;
2376 	int ret;
2377 	int i;
2378 
2379 	ret = vchiq_validate_params(params);
2380 	if (ret)
2381 		return NULL;
2382 
2383 	service = kzalloc(sizeof(*service), GFP_KERNEL);
2384 	if (!service)
2385 		return service;
2386 
2387 	service->base.fourcc   = params->fourcc;
2388 	service->base.callback = params->callback;
2389 	service->base.userdata = params->userdata;
2390 	service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
2391 	kref_init(&service->ref_count);
2392 	service->srvstate      = VCHIQ_SRVSTATE_FREE;
2393 	service->userdata_term = userdata_term;
2394 	service->localport     = VCHIQ_PORT_FREE;
2395 	service->remoteport    = VCHIQ_PORT_FREE;
2396 
2397 	service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2398 		VCHIQ_FOURCC_INVALID : params->fourcc;
2399 	service->auto_close    = 1;
2400 	atomic_set(&service->poll_flags, 0);
2401 	service->version       = params->version;
2402 	service->version_min   = params->version_min;
2403 	service->state         = state;
2404 	service->instance      = instance;
2405 	init_completion(&service->remove_event);
2406 	init_completion(&service->bulk_remove_event);
2407 	init_completion(&service->msg_queue_pop);
2408 	init_completion(&service->msg_queue_push);
2409 	mutex_init(&service->bulk_mutex);
2410 
2411 	/*
2412 	 * Although it is perfectly possible to use a spinlock
2413 	 * to protect the creation of services, it is overkill as it
2414 	 * disables interrupts while the array is searched.
2415 	 * The only danger is of another thread trying to create a
2416 	 * service - service deletion is safe.
2417 	 * Therefore it is preferable to use state->mutex which,
2418 	 * although slower to claim, doesn't block interrupts while
2419 	 * it is held.
2420 	 */
2421 
2422 	mutex_lock(&state->mutex);
2423 
2424 	/* Prepare to use a previously unused service */
2425 	if (state->unused_service < VCHIQ_MAX_SERVICES)
2426 		pservice = &state->services[state->unused_service];
2427 
2428 	if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2429 		for (i = 0; i < state->unused_service; i++) {
2430 			if (!rcu_access_pointer(state->services[i])) {
2431 				pservice = &state->services[i];
2432 				break;
2433 			}
2434 		}
2435 	} else {
2436 		rcu_read_lock();
2437 		for (i = (state->unused_service - 1); i >= 0; i--) {
2438 			struct vchiq_service *srv;
2439 
2440 			srv = rcu_dereference(state->services[i]);
2441 			if (!srv) {
2442 				pservice = &state->services[i];
2443 			} else if ((srv->public_fourcc == params->fourcc) &&
2444 				   ((srv->instance != instance) ||
2445 				   (srv->base.callback != params->callback))) {
2446 				/*
2447 				 * There is another server using this
2448 				 * fourcc which doesn't match.
2449 				 */
2450 				pservice = NULL;
2451 				break;
2452 			}
2453 		}
2454 		rcu_read_unlock();
2455 	}
2456 
2457 	if (pservice) {
2458 		service->localport = (pservice - state->services);
2459 		if (!handle_seq)
2460 			handle_seq = VCHIQ_MAX_STATES *
2461 				 VCHIQ_MAX_SERVICES;
2462 		service->handle = handle_seq |
2463 			(state->id * VCHIQ_MAX_SERVICES) |
2464 			service->localport;
2465 		handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2466 		rcu_assign_pointer(*pservice, service);
2467 		if (pservice == &state->services[state->unused_service])
2468 			state->unused_service++;
2469 	}
2470 
2471 	mutex_unlock(&state->mutex);
2472 
2473 	if (!pservice) {
2474 		kfree(service);
2475 		return NULL;
2476 	}
2477 
2478 	quota = &state->service_quotas[service->localport];
2479 	quota->slot_quota = state->default_slot_quota;
2480 	quota->message_quota = state->default_message_quota;
2481 	if (quota->slot_use_count == 0)
2482 		quota->previous_tx_index =
2483 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2484 			- 1;
2485 
2486 	/* Bring this service online */
2487 	set_service_state(service, srvstate);
2488 
2489 	vchiq_log_info(vchiq_core_msg_log_level, "%s Service %c%c%c%c SrcPort:%d",
2490 		       (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
2491 		       VCHIQ_FOURCC_AS_4CHARS(params->fourcc), service->localport);
2492 
2493 	/* Don't unlock the service - leave it with a ref_count of 1. */
2494 
2495 	return service;
2496 }
2497 
2498 int
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2499 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2500 {
2501 	struct vchiq_open_payload payload = {
2502 		service->base.fourcc,
2503 		client_id,
2504 		service->version,
2505 		service->version_min
2506 	};
2507 	int status = 0;
2508 
2509 	service->client_id = client_id;
2510 	vchiq_use_service_internal(service);
2511 	status = queue_message(service->state,
2512 			       NULL, MAKE_OPEN(service->localport),
2513 			       memcpy_copy_callback,
2514 			       &payload,
2515 			       sizeof(payload),
2516 			       QMFLAGS_IS_BLOCKING);
2517 
2518 	if (status)
2519 		return status;
2520 
2521 	/* Wait for the ACK/NAK */
2522 	if (wait_for_completion_interruptible(&service->remove_event)) {
2523 		status = -EAGAIN;
2524 		vchiq_release_service_internal(service);
2525 	} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2526 		   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2527 		if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2528 			vchiq_log_error(vchiq_core_log_level,
2529 					"%d: osi - srvstate = %s (ref %u)",
2530 					service->state->id,
2531 					srvstate_names[service->srvstate],
2532 					kref_read(&service->ref_count));
2533 		status = -EINVAL;
2534 		VCHIQ_SERVICE_STATS_INC(service, error_count);
2535 		vchiq_release_service_internal(service);
2536 	}
2537 
2538 	return status;
2539 }
2540 
2541 static void
release_service_messages(struct vchiq_service * service)2542 release_service_messages(struct vchiq_service *service)
2543 {
2544 	struct vchiq_state *state = service->state;
2545 	int slot_last = state->remote->slot_last;
2546 	int i;
2547 
2548 	/* Release any claimed messages aimed at this service */
2549 
2550 	if (service->sync) {
2551 		struct vchiq_header *header =
2552 			(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2553 						state->remote->slot_sync);
2554 		if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2555 			release_message_sync(state, header);
2556 
2557 		return;
2558 	}
2559 
2560 	for (i = state->remote->slot_first; i <= slot_last; i++) {
2561 		struct vchiq_slot_info *slot_info =
2562 			SLOT_INFO_FROM_INDEX(state, i);
2563 		unsigned int pos, end;
2564 		char *data;
2565 
2566 		if (slot_info->release_count == slot_info->use_count)
2567 			continue;
2568 
2569 		data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2570 		end = VCHIQ_SLOT_SIZE;
2571 		if (data == state->rx_data)
2572 			/*
2573 			 * This buffer is still being read from - stop
2574 			 * at the current read position
2575 			 */
2576 			end = state->rx_pos & VCHIQ_SLOT_MASK;
2577 
2578 		pos = 0;
2579 
2580 		while (pos < end) {
2581 			struct vchiq_header *header =
2582 				(struct vchiq_header *)(data + pos);
2583 			int msgid = header->msgid;
2584 			int port = VCHIQ_MSG_DSTPORT(msgid);
2585 
2586 			if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
2587 				vchiq_log_info(vchiq_core_log_level, "  fsi - hdr %pK", header);
2588 				release_slot(state, slot_info, header, NULL);
2589 			}
2590 			pos += calc_stride(header->size);
2591 			if (pos > VCHIQ_SLOT_SIZE) {
2592 				vchiq_log_error(vchiq_core_log_level,
2593 						"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2594 						pos, header, msgid, header->msgid, header->size);
2595 				WARN(1, "invalid slot position\n");
2596 			}
2597 		}
2598 	}
2599 }
2600 
2601 static int
do_abort_bulks(struct vchiq_service * service)2602 do_abort_bulks(struct vchiq_service *service)
2603 {
2604 	int status;
2605 
2606 	/* Abort any outstanding bulk transfers */
2607 	if (mutex_lock_killable(&service->bulk_mutex))
2608 		return 0;
2609 	abort_outstanding_bulks(service, &service->bulk_tx);
2610 	abort_outstanding_bulks(service, &service->bulk_rx);
2611 	mutex_unlock(&service->bulk_mutex);
2612 
2613 	status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2614 	if (status)
2615 		return 0;
2616 
2617 	status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2618 	return !status;
2619 }
2620 
2621 static int
close_service_complete(struct vchiq_service * service,int failstate)2622 close_service_complete(struct vchiq_service *service, int failstate)
2623 {
2624 	int status;
2625 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2626 	int newstate;
2627 
2628 	switch (service->srvstate) {
2629 	case VCHIQ_SRVSTATE_OPEN:
2630 	case VCHIQ_SRVSTATE_CLOSESENT:
2631 	case VCHIQ_SRVSTATE_CLOSERECVD:
2632 		if (is_server) {
2633 			if (service->auto_close) {
2634 				service->client_id = 0;
2635 				service->remoteport = VCHIQ_PORT_FREE;
2636 				newstate = VCHIQ_SRVSTATE_LISTENING;
2637 			} else {
2638 				newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2639 			}
2640 		} else {
2641 			newstate = VCHIQ_SRVSTATE_CLOSED;
2642 		}
2643 		set_service_state(service, newstate);
2644 		break;
2645 	case VCHIQ_SRVSTATE_LISTENING:
2646 		break;
2647 	default:
2648 		vchiq_log_error(vchiq_core_log_level, "%s(%x) called in state %s", __func__,
2649 				service->handle, srvstate_names[service->srvstate]);
2650 		WARN(1, "%s in unexpected state\n", __func__);
2651 		return -EINVAL;
2652 	}
2653 
2654 	status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
2655 
2656 	if (status != -EAGAIN) {
2657 		int uc = service->service_use_count;
2658 		int i;
2659 		/* Complete the close process */
2660 		for (i = 0; i < uc; i++)
2661 			/*
2662 			 * cater for cases where close is forced and the
2663 			 * client may not close all it's handles
2664 			 */
2665 			vchiq_release_service_internal(service);
2666 
2667 		service->client_id = 0;
2668 		service->remoteport = VCHIQ_PORT_FREE;
2669 
2670 		if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2671 			vchiq_free_service_internal(service);
2672 		} else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2673 			if (is_server)
2674 				service->closing = 0;
2675 
2676 			complete(&service->remove_event);
2677 		}
2678 	} else {
2679 		set_service_state(service, failstate);
2680 	}
2681 
2682 	return status;
2683 }
2684 
2685 /* Called by the slot handler */
2686 int
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)2687 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2688 {
2689 	struct vchiq_state *state = service->state;
2690 	int status = 0;
2691 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2692 	int close_id = MAKE_CLOSE(service->localport,
2693 				  VCHIQ_MSG_DSTPORT(service->remoteport));
2694 
2695 	vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", service->state->id,
2696 		       service->localport, close_recvd, srvstate_names[service->srvstate]);
2697 
2698 	switch (service->srvstate) {
2699 	case VCHIQ_SRVSTATE_CLOSED:
2700 	case VCHIQ_SRVSTATE_HIDDEN:
2701 	case VCHIQ_SRVSTATE_LISTENING:
2702 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2703 		if (close_recvd) {
2704 			vchiq_log_error(vchiq_core_log_level, "%s(1) called in state %s",
2705 					__func__, srvstate_names[service->srvstate]);
2706 		} else if (is_server) {
2707 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2708 				status = -EINVAL;
2709 			} else {
2710 				service->client_id = 0;
2711 				service->remoteport = VCHIQ_PORT_FREE;
2712 				if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT)
2713 					set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2714 			}
2715 			complete(&service->remove_event);
2716 		} else {
2717 			vchiq_free_service_internal(service);
2718 		}
2719 		break;
2720 	case VCHIQ_SRVSTATE_OPENING:
2721 		if (close_recvd) {
2722 			/* The open was rejected - tell the user */
2723 			set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
2724 			complete(&service->remove_event);
2725 		} else {
2726 			/* Shutdown mid-open - let the other side know */
2727 			status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
2728 		}
2729 		break;
2730 
2731 	case VCHIQ_SRVSTATE_OPENSYNC:
2732 		mutex_lock(&state->sync_mutex);
2733 		fallthrough;
2734 	case VCHIQ_SRVSTATE_OPEN:
2735 		if (close_recvd) {
2736 			if (!do_abort_bulks(service))
2737 				status = -EAGAIN;
2738 		}
2739 
2740 		release_service_messages(service);
2741 
2742 		if (!status)
2743 			status = queue_message(state, service, close_id, NULL,
2744 					       NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2745 
2746 		if (status) {
2747 			if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2748 				mutex_unlock(&state->sync_mutex);
2749 			break;
2750 		}
2751 
2752 		if (!close_recvd) {
2753 			/* Change the state while the mutex is still held */
2754 			set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
2755 			mutex_unlock(&state->slot_mutex);
2756 			if (service->sync)
2757 				mutex_unlock(&state->sync_mutex);
2758 			break;
2759 		}
2760 
2761 		/* Change the state while the mutex is still held */
2762 		set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2763 		mutex_unlock(&state->slot_mutex);
2764 		if (service->sync)
2765 			mutex_unlock(&state->sync_mutex);
2766 
2767 		status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2768 		break;
2769 
2770 	case VCHIQ_SRVSTATE_CLOSESENT:
2771 		if (!close_recvd)
2772 			/* This happens when a process is killed mid-close */
2773 			break;
2774 
2775 		if (!do_abort_bulks(service)) {
2776 			status = -EAGAIN;
2777 			break;
2778 		}
2779 
2780 		if (!status)
2781 			status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2782 		break;
2783 
2784 	case VCHIQ_SRVSTATE_CLOSERECVD:
2785 		if (!close_recvd && is_server)
2786 			/* Force into LISTENING mode */
2787 			set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2788 		status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2789 		break;
2790 
2791 	default:
2792 		vchiq_log_error(vchiq_core_log_level, "%s(%d) called in state %s", __func__,
2793 				close_recvd, srvstate_names[service->srvstate]);
2794 		break;
2795 	}
2796 
2797 	return status;
2798 }
2799 
2800 /* Called from the application process upon process death */
2801 void
vchiq_terminate_service_internal(struct vchiq_service * service)2802 vchiq_terminate_service_internal(struct vchiq_service *service)
2803 {
2804 	struct vchiq_state *state = service->state;
2805 
2806 	vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", state->id,
2807 		       service->localport, service->remoteport);
2808 
2809 	mark_service_closing(service);
2810 
2811 	/* Mark the service for removal by the slot handler */
2812 	request_poll(state, service, VCHIQ_POLL_REMOVE);
2813 }
2814 
2815 /* Called from the slot handler */
2816 void
vchiq_free_service_internal(struct vchiq_service * service)2817 vchiq_free_service_internal(struct vchiq_service *service)
2818 {
2819 	struct vchiq_state *state = service->state;
2820 
2821 	vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", state->id, service->localport);
2822 
2823 	switch (service->srvstate) {
2824 	case VCHIQ_SRVSTATE_OPENING:
2825 	case VCHIQ_SRVSTATE_CLOSED:
2826 	case VCHIQ_SRVSTATE_HIDDEN:
2827 	case VCHIQ_SRVSTATE_LISTENING:
2828 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2829 		break;
2830 	default:
2831 		vchiq_log_error(vchiq_core_log_level, "%d: fsi - (%d) in state %s", state->id,
2832 				service->localport, srvstate_names[service->srvstate]);
2833 		return;
2834 	}
2835 
2836 	set_service_state(service, VCHIQ_SRVSTATE_FREE);
2837 
2838 	complete(&service->remove_event);
2839 
2840 	/* Release the initial lock */
2841 	vchiq_service_put(service);
2842 }
2843 
2844 int
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)2845 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2846 {
2847 	struct vchiq_service *service;
2848 	int i;
2849 
2850 	/* Find all services registered to this client and enable them. */
2851 	i = 0;
2852 	while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2853 		if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2854 			set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2855 		vchiq_service_put(service);
2856 	}
2857 
2858 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2859 		if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
2860 				  QMFLAGS_IS_BLOCKING) == -EAGAIN)
2861 			return -EAGAIN;
2862 
2863 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2864 	}
2865 
2866 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2867 		if (wait_for_completion_interruptible(&state->connect))
2868 			return -EAGAIN;
2869 
2870 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2871 		complete(&state->connect);
2872 	}
2873 
2874 	return 0;
2875 }
2876 
2877 void
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)2878 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2879 {
2880 	struct vchiq_service *service;
2881 	int i;
2882 
2883 	/* Find all services registered to this client and remove them. */
2884 	i = 0;
2885 	while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2886 		(void)vchiq_remove_service(instance, service->handle);
2887 		vchiq_service_put(service);
2888 	}
2889 }
2890 
2891 int
vchiq_close_service(struct vchiq_instance * instance,unsigned int handle)2892 vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
2893 {
2894 	/* Unregister the service */
2895 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2896 	int status = 0;
2897 
2898 	if (!service)
2899 		return -EINVAL;
2900 
2901 	vchiq_log_info(vchiq_core_log_level, "%d: close_service:%d",
2902 		       service->state->id, service->localport);
2903 
2904 	if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2905 	    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2906 	    (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2907 		vchiq_service_put(service);
2908 		return -EINVAL;
2909 	}
2910 
2911 	mark_service_closing(service);
2912 
2913 	if (current == service->state->slot_handler_thread) {
2914 		status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2915 		WARN_ON(status == -EAGAIN);
2916 	} else {
2917 		/* Mark the service for termination by the slot handler */
2918 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2919 	}
2920 
2921 	while (1) {
2922 		if (wait_for_completion_interruptible(&service->remove_event)) {
2923 			status = -EAGAIN;
2924 			break;
2925 		}
2926 
2927 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2928 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2929 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2930 			break;
2931 
2932 		vchiq_log_warning(vchiq_core_log_level,
2933 				  "%d: close_service:%d - waiting in state %s",
2934 				  service->state->id, service->localport,
2935 				  srvstate_names[service->srvstate]);
2936 	}
2937 
2938 	if (!status &&
2939 	    (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2940 	    (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2941 		status = -EINVAL;
2942 
2943 	vchiq_service_put(service);
2944 
2945 	return status;
2946 }
2947 EXPORT_SYMBOL(vchiq_close_service);
2948 
2949 int
vchiq_remove_service(struct vchiq_instance * instance,unsigned int handle)2950 vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
2951 {
2952 	/* Unregister the service */
2953 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2954 	int status = 0;
2955 
2956 	if (!service)
2957 		return -EINVAL;
2958 
2959 	vchiq_log_info(vchiq_core_log_level, "%d: remove_service:%d",
2960 		       service->state->id, service->localport);
2961 
2962 	if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2963 		vchiq_service_put(service);
2964 		return -EINVAL;
2965 	}
2966 
2967 	mark_service_closing(service);
2968 
2969 	if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2970 	    (current == service->state->slot_handler_thread)) {
2971 		/*
2972 		 * Make it look like a client, because it must be removed and
2973 		 * not left in the LISTENING state.
2974 		 */
2975 		service->public_fourcc = VCHIQ_FOURCC_INVALID;
2976 
2977 		status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2978 		WARN_ON(status == -EAGAIN);
2979 	} else {
2980 		/* Mark the service for removal by the slot handler */
2981 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2982 	}
2983 	while (1) {
2984 		if (wait_for_completion_interruptible(&service->remove_event)) {
2985 			status = -EAGAIN;
2986 			break;
2987 		}
2988 
2989 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2990 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2991 			break;
2992 
2993 		vchiq_log_warning(vchiq_core_log_level,
2994 				  "%d: remove_service:%d - waiting in state %s",
2995 				  service->state->id, service->localport,
2996 				  srvstate_names[service->srvstate]);
2997 	}
2998 
2999 	if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
3000 		status = -EINVAL;
3001 
3002 	vchiq_service_put(service);
3003 
3004 	return status;
3005 }
3006 
3007 /*
3008  * This function may be called by kernel threads or user threads.
3009  * User threads may receive -EAGAIN to indicate that a signal has been
3010  * received and the call should be retried after being returned to user
3011  * context.
3012  * When called in blocking mode, the userdata field points to a bulk_waiter
3013  * structure.
3014  */
vchiq_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,void * offset,void __user * uoffset,int size,void * userdata,enum vchiq_bulk_mode mode,enum vchiq_bulk_dir dir)3015 int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
3016 			void *offset, void __user *uoffset, int size, void *userdata,
3017 			enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
3018 {
3019 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3020 	struct vchiq_bulk_queue *queue;
3021 	struct vchiq_bulk *bulk;
3022 	struct vchiq_state *state;
3023 	struct bulk_waiter *bulk_waiter = NULL;
3024 	const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3025 	const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3026 		VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3027 	int status = -EINVAL;
3028 	int payload[2];
3029 
3030 	if (!service)
3031 		goto error_exit;
3032 
3033 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3034 		goto error_exit;
3035 
3036 	if (!offset && !uoffset)
3037 		goto error_exit;
3038 
3039 	if (vchiq_check_service(service))
3040 		goto error_exit;
3041 
3042 	switch (mode) {
3043 	case VCHIQ_BULK_MODE_NOCALLBACK:
3044 	case VCHIQ_BULK_MODE_CALLBACK:
3045 		break;
3046 	case VCHIQ_BULK_MODE_BLOCKING:
3047 		bulk_waiter = userdata;
3048 		init_completion(&bulk_waiter->event);
3049 		bulk_waiter->actual = 0;
3050 		bulk_waiter->bulk = NULL;
3051 		break;
3052 	case VCHIQ_BULK_MODE_WAITING:
3053 		bulk_waiter = userdata;
3054 		bulk = bulk_waiter->bulk;
3055 		goto waiting;
3056 	default:
3057 		goto error_exit;
3058 	}
3059 
3060 	state = service->state;
3061 
3062 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3063 		&service->bulk_tx : &service->bulk_rx;
3064 
3065 	if (mutex_lock_killable(&service->bulk_mutex)) {
3066 		status = -EAGAIN;
3067 		goto error_exit;
3068 	}
3069 
3070 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3071 		VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3072 		do {
3073 			mutex_unlock(&service->bulk_mutex);
3074 			if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
3075 				status = -EAGAIN;
3076 				goto error_exit;
3077 			}
3078 			if (mutex_lock_killable(&service->bulk_mutex)) {
3079 				status = -EAGAIN;
3080 				goto error_exit;
3081 			}
3082 		} while (queue->local_insert == queue->remove +
3083 				VCHIQ_NUM_SERVICE_BULKS);
3084 	}
3085 
3086 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3087 
3088 	bulk->mode = mode;
3089 	bulk->dir = dir;
3090 	bulk->userdata = userdata;
3091 	bulk->size = size;
3092 	bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3093 
3094 	if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
3095 		goto unlock_error_exit;
3096 
3097 	/*
3098 	 * Ensure that the bulk data record is visible to the peer
3099 	 * before proceeding.
3100 	 */
3101 	wmb();
3102 
3103 	vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",
3104 		       state->id, service->localport, service->remoteport,
3105 		       dir_char, size, &bulk->data, userdata);
3106 
3107 	/*
3108 	 * The slot mutex must be held when the service is being closed, so
3109 	 * claim it here to ensure that isn't happening
3110 	 */
3111 	if (mutex_lock_killable(&state->slot_mutex)) {
3112 		status = -EAGAIN;
3113 		goto cancel_bulk_error_exit;
3114 	}
3115 
3116 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3117 		goto unlock_both_error_exit;
3118 
3119 	payload[0] = lower_32_bits(bulk->data);
3120 	payload[1] = bulk->size;
3121 	status = queue_message(state,
3122 			       NULL,
3123 			       VCHIQ_MAKE_MSG(dir_msgtype,
3124 					      service->localport,
3125 					      service->remoteport),
3126 			       memcpy_copy_callback,
3127 			       &payload,
3128 			       sizeof(payload),
3129 			       QMFLAGS_IS_BLOCKING |
3130 			       QMFLAGS_NO_MUTEX_LOCK |
3131 			       QMFLAGS_NO_MUTEX_UNLOCK);
3132 	if (status)
3133 		goto unlock_both_error_exit;
3134 
3135 	queue->local_insert++;
3136 
3137 	mutex_unlock(&state->slot_mutex);
3138 	mutex_unlock(&service->bulk_mutex);
3139 
3140 	vchiq_log_trace(vchiq_core_log_level, "%d: bt:%d %cx li=%x ri=%x p=%x",
3141 			state->id, service->localport, dir_char, queue->local_insert,
3142 			queue->remote_insert, queue->process);
3143 
3144 waiting:
3145 	vchiq_service_put(service);
3146 
3147 	status = 0;
3148 
3149 	if (bulk_waiter) {
3150 		bulk_waiter->bulk = bulk;
3151 		if (wait_for_completion_interruptible(&bulk_waiter->event))
3152 			status = -EAGAIN;
3153 		else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3154 			status = -EINVAL;
3155 	}
3156 
3157 	return status;
3158 
3159 unlock_both_error_exit:
3160 	mutex_unlock(&state->slot_mutex);
3161 cancel_bulk_error_exit:
3162 	vchiq_complete_bulk(service->instance, bulk);
3163 unlock_error_exit:
3164 	mutex_unlock(&service->bulk_mutex);
3165 
3166 error_exit:
3167 	if (service)
3168 		vchiq_service_put(service);
3169 	return status;
3170 }
3171 
3172 int
vchiq_queue_message(struct vchiq_instance * instance,unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3173 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
3174 		    ssize_t (*copy_callback)(void *context, void *dest,
3175 					     size_t offset, size_t maxsize),
3176 		    void *context,
3177 		    size_t size)
3178 {
3179 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3180 	int status = -EINVAL;
3181 	int data_id;
3182 
3183 	if (!service)
3184 		goto error_exit;
3185 
3186 	if (vchiq_check_service(service))
3187 		goto error_exit;
3188 
3189 	if (!size) {
3190 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3191 		goto error_exit;
3192 	}
3193 
3194 	if (size > VCHIQ_MAX_MSG_SIZE) {
3195 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3196 		goto error_exit;
3197 	}
3198 
3199 	data_id = MAKE_DATA(service->localport, service->remoteport);
3200 
3201 	switch (service->srvstate) {
3202 	case VCHIQ_SRVSTATE_OPEN:
3203 		status = queue_message(service->state, service, data_id,
3204 				       copy_callback, context, size, 1);
3205 		break;
3206 	case VCHIQ_SRVSTATE_OPENSYNC:
3207 		status = queue_message_sync(service->state, service, data_id,
3208 					    copy_callback, context, size, 1);
3209 		break;
3210 	default:
3211 		status = -EINVAL;
3212 		break;
3213 	}
3214 
3215 error_exit:
3216 	if (service)
3217 		vchiq_service_put(service);
3218 
3219 	return status;
3220 }
3221 
vchiq_queue_kernel_message(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size)3222 int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
3223 			       unsigned int size)
3224 {
3225 	int status;
3226 
3227 	while (1) {
3228 		status = vchiq_queue_message(instance, handle, memcpy_copy_callback,
3229 					     data, size);
3230 
3231 		/*
3232 		 * vchiq_queue_message() may return -EAGAIN, so we need to
3233 		 * implement a retry mechanism since this function is supposed
3234 		 * to block until queued
3235 		 */
3236 		if (status != -EAGAIN)
3237 			break;
3238 
3239 		msleep(1);
3240 	}
3241 
3242 	return status;
3243 }
3244 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3245 
3246 void
vchiq_release_message(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)3247 vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
3248 		      struct vchiq_header *header)
3249 {
3250 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3251 	struct vchiq_shared_state *remote;
3252 	struct vchiq_state *state;
3253 	int slot_index;
3254 
3255 	if (!service)
3256 		return;
3257 
3258 	state = service->state;
3259 	remote = state->remote;
3260 
3261 	slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3262 
3263 	if ((slot_index >= remote->slot_first) &&
3264 	    (slot_index <= remote->slot_last)) {
3265 		int msgid = header->msgid;
3266 
3267 		if (msgid & VCHIQ_MSGID_CLAIMED) {
3268 			struct vchiq_slot_info *slot_info =
3269 				SLOT_INFO_FROM_INDEX(state, slot_index);
3270 
3271 			release_slot(state, slot_info, header, service);
3272 		}
3273 	} else if (slot_index == remote->slot_sync) {
3274 		release_message_sync(state, header);
3275 	}
3276 
3277 	vchiq_service_put(service);
3278 }
3279 EXPORT_SYMBOL(vchiq_release_message);
3280 
3281 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3282 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3283 {
3284 	header->msgid = VCHIQ_MSGID_PADDING;
3285 	remote_event_signal(&state->remote->sync_release);
3286 }
3287 
3288 int
vchiq_get_peer_version(struct vchiq_instance * instance,unsigned int handle,short * peer_version)3289 vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
3290 {
3291 	int status = -EINVAL;
3292 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3293 
3294 	if (!service)
3295 		goto exit;
3296 
3297 	if (vchiq_check_service(service))
3298 		goto exit;
3299 
3300 	if (!peer_version)
3301 		goto exit;
3302 
3303 	*peer_version = service->peer_version;
3304 	status = 0;
3305 
3306 exit:
3307 	if (service)
3308 		vchiq_service_put(service);
3309 	return status;
3310 }
3311 EXPORT_SYMBOL(vchiq_get_peer_version);
3312 
vchiq_get_config(struct vchiq_config * config)3313 void vchiq_get_config(struct vchiq_config *config)
3314 {
3315 	config->max_msg_size           = VCHIQ_MAX_MSG_SIZE;
3316 	config->bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
3317 	config->max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
3318 	config->max_services           = VCHIQ_MAX_SERVICES;
3319 	config->version                = VCHIQ_VERSION;
3320 	config->version_min            = VCHIQ_VERSION_MIN;
3321 }
3322 
3323 int
vchiq_set_service_option(struct vchiq_instance * instance,unsigned int handle,enum vchiq_service_option option,int value)3324 vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
3325 			 enum vchiq_service_option option, int value)
3326 {
3327 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3328 	struct vchiq_service_quota *quota;
3329 	int ret = -EINVAL;
3330 
3331 	if (!service)
3332 		return -EINVAL;
3333 
3334 	switch (option) {
3335 	case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3336 		service->auto_close = value;
3337 		ret = 0;
3338 		break;
3339 
3340 	case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3341 		quota = &service->state->service_quotas[service->localport];
3342 		if (value == 0)
3343 			value = service->state->default_slot_quota;
3344 		if ((value >= quota->slot_use_count) &&
3345 		    (value < (unsigned short)~0)) {
3346 			quota->slot_quota = value;
3347 			if ((value >= quota->slot_use_count) &&
3348 			    (quota->message_quota >= quota->message_use_count))
3349 				/*
3350 				 * Signal the service that it may have
3351 				 * dropped below its quota
3352 				 */
3353 				complete(&quota->quota_event);
3354 			ret = 0;
3355 		}
3356 		break;
3357 
3358 	case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3359 		quota = &service->state->service_quotas[service->localport];
3360 		if (value == 0)
3361 			value = service->state->default_message_quota;
3362 		if ((value >= quota->message_use_count) &&
3363 		    (value < (unsigned short)~0)) {
3364 			quota->message_quota = value;
3365 			if ((value >= quota->message_use_count) &&
3366 			    (quota->slot_quota >= quota->slot_use_count))
3367 				/*
3368 				 * Signal the service that it may have
3369 				 * dropped below its quota
3370 				 */
3371 				complete(&quota->quota_event);
3372 			ret = 0;
3373 		}
3374 		break;
3375 
3376 	case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3377 		if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3378 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3379 			service->sync = value;
3380 			ret = 0;
3381 		}
3382 		break;
3383 
3384 	case VCHIQ_SERVICE_OPTION_TRACE:
3385 		service->trace = value;
3386 		ret = 0;
3387 		break;
3388 
3389 	default:
3390 		break;
3391 	}
3392 	vchiq_service_put(service);
3393 
3394 	return ret;
3395 }
3396 
3397 static int
vchiq_dump_shared_state(void * dump_context,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3398 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3399 			struct vchiq_shared_state *shared, const char *label)
3400 {
3401 	static const char *const debug_names[] = {
3402 		"<entries>",
3403 		"SLOT_HANDLER_COUNT",
3404 		"SLOT_HANDLER_LINE",
3405 		"PARSE_LINE",
3406 		"PARSE_HEADER",
3407 		"PARSE_MSGID",
3408 		"AWAIT_COMPLETION_LINE",
3409 		"DEQUEUE_MESSAGE_LINE",
3410 		"SERVICE_CALLBACK_LINE",
3411 		"MSG_QUEUE_FULL_COUNT",
3412 		"COMPLETION_QUEUE_FULL_COUNT"
3413 	};
3414 	int i;
3415 	char buf[80];
3416 	int len;
3417 	int err;
3418 
3419 	len = scnprintf(buf, sizeof(buf), "  %s: slots %d-%d tx_pos=%x recycle=%x",
3420 			label, shared->slot_first, shared->slot_last,
3421 			shared->tx_pos, shared->slot_queue_recycle);
3422 	err = vchiq_dump(dump_context, buf, len + 1);
3423 	if (err)
3424 		return err;
3425 
3426 	len = scnprintf(buf, sizeof(buf), "    Slots claimed:");
3427 	err = vchiq_dump(dump_context, buf, len + 1);
3428 	if (err)
3429 		return err;
3430 
3431 	for (i = shared->slot_first; i <= shared->slot_last; i++) {
3432 		struct vchiq_slot_info slot_info =
3433 						*SLOT_INFO_FROM_INDEX(state, i);
3434 		if (slot_info.use_count != slot_info.release_count) {
3435 			len = scnprintf(buf, sizeof(buf), "      %d: %d/%d", i, slot_info.use_count,
3436 					slot_info.release_count);
3437 			err = vchiq_dump(dump_context, buf, len + 1);
3438 			if (err)
3439 				return err;
3440 		}
3441 	}
3442 
3443 	for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3444 		len = scnprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
3445 				debug_names[i], shared->debug[i], shared->debug[i]);
3446 		err = vchiq_dump(dump_context, buf, len + 1);
3447 		if (err)
3448 			return err;
3449 	}
3450 	return 0;
3451 }
3452 
vchiq_dump_state(void * dump_context,struct vchiq_state * state)3453 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3454 {
3455 	char buf[80];
3456 	int len;
3457 	int i;
3458 	int err;
3459 
3460 	len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3461 			conn_state_names[state->conn_state]);
3462 	err = vchiq_dump(dump_context, buf, len + 1);
3463 	if (err)
3464 		return err;
3465 
3466 	len = scnprintf(buf, sizeof(buf), "  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3467 			state->local->tx_pos,
3468 			state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3469 			state->rx_pos,
3470 			state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3471 	err = vchiq_dump(dump_context, buf, len + 1);
3472 	if (err)
3473 		return err;
3474 
3475 	len = scnprintf(buf, sizeof(buf), "  Version: %d (min %d)",
3476 			VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3477 	err = vchiq_dump(dump_context, buf, len + 1);
3478 	if (err)
3479 		return err;
3480 
3481 	if (VCHIQ_ENABLE_STATS) {
3482 		len = scnprintf(buf, sizeof(buf),
3483 				"  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3484 				state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3485 				state->stats.error_count);
3486 		err = vchiq_dump(dump_context, buf, len + 1);
3487 		if (err)
3488 			return err;
3489 	}
3490 
3491 	len = scnprintf(buf, sizeof(buf),
3492 			"  Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3493 			((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3494 			state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3495 			state->data_quota - state->data_use_count,
3496 			state->local->slot_queue_recycle - state->slot_queue_available,
3497 			state->stats.slot_stalls, state->stats.data_stalls);
3498 	err = vchiq_dump(dump_context, buf, len + 1);
3499 	if (err)
3500 		return err;
3501 
3502 	err = vchiq_dump_platform_state(dump_context);
3503 	if (err)
3504 		return err;
3505 
3506 	err = vchiq_dump_shared_state(dump_context,
3507 				      state,
3508 				      state->local,
3509 				      "Local");
3510 	if (err)
3511 		return err;
3512 	err = vchiq_dump_shared_state(dump_context,
3513 				      state,
3514 				      state->remote,
3515 				      "Remote");
3516 	if (err)
3517 		return err;
3518 
3519 	err = vchiq_dump_platform_instances(dump_context);
3520 	if (err)
3521 		return err;
3522 
3523 	for (i = 0; i < state->unused_service; i++) {
3524 		struct vchiq_service *service = find_service_by_port(state, i);
3525 
3526 		if (service) {
3527 			err = vchiq_dump_service_state(dump_context, service);
3528 			vchiq_service_put(service);
3529 			if (err)
3530 				return err;
3531 		}
3532 	}
3533 	return 0;
3534 }
3535 
vchiq_dump_service_state(void * dump_context,struct vchiq_service * service)3536 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3537 {
3538 	char buf[80];
3539 	int len;
3540 	int err;
3541 	unsigned int ref_count;
3542 
3543 	/*Don't include the lock just taken*/
3544 	ref_count = kref_read(&service->ref_count) - 1;
3545 	len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3546 			service->localport, srvstate_names[service->srvstate],
3547 			ref_count);
3548 
3549 	if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3550 		char remoteport[30];
3551 		struct vchiq_service_quota *quota =
3552 			&service->state->service_quotas[service->localport];
3553 		int fourcc = service->base.fourcc;
3554 		int tx_pending, rx_pending;
3555 
3556 		if (service->remoteport != VCHIQ_PORT_FREE) {
3557 			int len2 = scnprintf(remoteport, sizeof(remoteport),
3558 				"%u", service->remoteport);
3559 
3560 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3561 				scnprintf(remoteport + len2, sizeof(remoteport) - len2,
3562 					  " (client %x)", service->client_id);
3563 		} else {
3564 			strscpy(remoteport, "n/a", sizeof(remoteport));
3565 		}
3566 
3567 		len += scnprintf(buf + len, sizeof(buf) - len,
3568 				 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3569 				 VCHIQ_FOURCC_AS_4CHARS(fourcc), remoteport,
3570 				 quota->message_use_count, quota->message_quota,
3571 				 quota->slot_use_count, quota->slot_quota);
3572 
3573 		err = vchiq_dump(dump_context, buf, len + 1);
3574 		if (err)
3575 			return err;
3576 
3577 		tx_pending = service->bulk_tx.local_insert -
3578 			service->bulk_tx.remote_insert;
3579 
3580 		rx_pending = service->bulk_rx.local_insert -
3581 			service->bulk_rx.remote_insert;
3582 
3583 		len = scnprintf(buf, sizeof(buf),
3584 				"  Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3585 				tx_pending,
3586 				tx_pending ?
3587 				service->bulk_tx.bulks[BULK_INDEX(service->bulk_tx.remove)].size :
3588 				0, rx_pending, rx_pending ?
3589 				service->bulk_rx.bulks[BULK_INDEX(service->bulk_rx.remove)].size :
3590 				0);
3591 
3592 		if (VCHIQ_ENABLE_STATS) {
3593 			err = vchiq_dump(dump_context, buf, len + 1);
3594 			if (err)
3595 				return err;
3596 
3597 			len = scnprintf(buf, sizeof(buf),
3598 					"  Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3599 					service->stats.ctrl_tx_count, service->stats.ctrl_tx_bytes,
3600 					service->stats.ctrl_rx_count, service->stats.ctrl_rx_bytes);
3601 			err = vchiq_dump(dump_context, buf, len + 1);
3602 			if (err)
3603 				return err;
3604 
3605 			len = scnprintf(buf, sizeof(buf),
3606 					"  Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3607 					service->stats.bulk_tx_count, service->stats.bulk_tx_bytes,
3608 					service->stats.bulk_rx_count, service->stats.bulk_rx_bytes);
3609 			err = vchiq_dump(dump_context, buf, len + 1);
3610 			if (err)
3611 				return err;
3612 
3613 			len = scnprintf(buf, sizeof(buf),
3614 					"  %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3615 					service->stats.quota_stalls, service->stats.slot_stalls,
3616 					service->stats.bulk_stalls,
3617 					service->stats.bulk_aborted_count,
3618 					service->stats.error_count);
3619 		}
3620 	}
3621 
3622 	err = vchiq_dump(dump_context, buf, len + 1);
3623 	if (err)
3624 		return err;
3625 
3626 	if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3627 		err = vchiq_dump_platform_service_state(dump_context, service);
3628 	return err;
3629 }
3630 
3631 void
vchiq_loud_error_header(void)3632 vchiq_loud_error_header(void)
3633 {
3634 	vchiq_log_error(vchiq_core_log_level,
3635 			"============================================================================");
3636 	vchiq_log_error(vchiq_core_log_level,
3637 			"============================================================================");
3638 	vchiq_log_error(vchiq_core_log_level, "=====");
3639 }
3640 
3641 void
vchiq_loud_error_footer(void)3642 vchiq_loud_error_footer(void)
3643 {
3644 	vchiq_log_error(vchiq_core_log_level, "=====");
3645 	vchiq_log_error(vchiq_core_log_level,
3646 			"============================================================================");
3647 	vchiq_log_error(vchiq_core_log_level,
3648 			"============================================================================");
3649 }
3650 
vchiq_send_remote_use(struct vchiq_state * state)3651 int vchiq_send_remote_use(struct vchiq_state *state)
3652 {
3653 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3654 		return -ENOTCONN;
3655 
3656 	return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
3657 }
3658 
vchiq_send_remote_use_active(struct vchiq_state * state)3659 int vchiq_send_remote_use_active(struct vchiq_state *state)
3660 {
3661 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3662 		return -ENOTCONN;
3663 
3664 	return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
3665 			     NULL, NULL, 0, 0);
3666 }
3667 
vchiq_log_dump_mem(const char * label,u32 addr,const void * void_mem,size_t num_bytes)3668 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes)
3669 {
3670 	const u8 *mem = void_mem;
3671 	size_t offset;
3672 	char line_buf[100];
3673 	char *s;
3674 
3675 	while (num_bytes > 0) {
3676 		s = line_buf;
3677 
3678 		for (offset = 0; offset < 16; offset++) {
3679 			if (offset < num_bytes)
3680 				s += scnprintf(s, 4, "%02x ", mem[offset]);
3681 			else
3682 				s += scnprintf(s, 4, "   ");
3683 		}
3684 
3685 		for (offset = 0; offset < 16; offset++) {
3686 			if (offset < num_bytes) {
3687 				u8 ch = mem[offset];
3688 
3689 				if ((ch < ' ') || (ch > '~'))
3690 					ch = '.';
3691 				*s++ = (char)ch;
3692 			}
3693 		}
3694 		*s++ = '\0';
3695 
3696 		if (label && (*label != '\0'))
3697 			vchiq_log_trace(VCHIQ_LOG_TRACE, "%s: %08x: %s", label, addr, line_buf);
3698 		else
3699 			vchiq_log_trace(VCHIQ_LOG_TRACE, "%08x: %s", addr, line_buf);
3700 
3701 		addr += 16;
3702 		mem += 16;
3703 		if (num_bytes > 16)
3704 			num_bytes -= 16;
3705 		else
3706 			num_bytes = 0;
3707 	}
3708 }
3709