1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15 
16 #include "vchiq_core.h"
17 
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
19 
20 #define VCHIQ_MSG_PADDING            0  /* -                                 */
21 #define VCHIQ_MSG_CONNECT            1  /* -                                 */
22 #define VCHIQ_MSG_OPEN               2  /* + (srcport, -), fourcc, client_id */
23 #define VCHIQ_MSG_OPENACK            3  /* + (srcport, dstport)              */
24 #define VCHIQ_MSG_CLOSE              4  /* + (srcport, dstport)              */
25 #define VCHIQ_MSG_DATA               5  /* + (srcport, dstport)              */
26 #define VCHIQ_MSG_BULK_RX            6  /* + (srcport, dstport), data, size  */
27 #define VCHIQ_MSG_BULK_TX            7  /* + (srcport, dstport), data, size  */
28 #define VCHIQ_MSG_BULK_RX_DONE       8  /* + (srcport, dstport), actual      */
29 #define VCHIQ_MSG_BULK_TX_DONE       9  /* + (srcport, dstport), actual      */
30 #define VCHIQ_MSG_PAUSE             10  /* -                                 */
31 #define VCHIQ_MSG_RESUME            11  /* -                                 */
32 #define VCHIQ_MSG_REMOTE_USE        12  /* -                                 */
33 #define VCHIQ_MSG_REMOTE_RELEASE    13  /* -                                 */
34 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14  /* -                                 */
35 
36 #define TYPE_SHIFT 24
37 
38 #define VCHIQ_PORT_MAX                 (VCHIQ_MAX_SERVICES - 1)
39 #define VCHIQ_PORT_FREE                0x1000
40 #define VCHIQ_PORT_IS_VALID(port)      ((port) < VCHIQ_PORT_FREE)
41 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
42 	(((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
43 #define VCHIQ_MSG_TYPE(msgid)          ((unsigned int)(msgid) >> TYPE_SHIFT)
44 #define VCHIQ_MSG_SRCPORT(msgid) \
45 	(unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
46 #define VCHIQ_MSG_DSTPORT(msgid) \
47 	((unsigned short)(msgid) & 0xfff)
48 
49 #define MAKE_CONNECT			(VCHIQ_MSG_CONNECT << TYPE_SHIFT)
50 #define MAKE_OPEN(srcport) \
51 	((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
52 #define MAKE_OPENACK(srcport, dstport) \
53 	((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
54 #define MAKE_CLOSE(srcport, dstport) \
55 	((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
56 #define MAKE_DATA(srcport, dstport) \
57 	((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
58 #define MAKE_PAUSE			(VCHIQ_MSG_PAUSE << TYPE_SHIFT)
59 #define MAKE_RESUME			(VCHIQ_MSG_RESUME << TYPE_SHIFT)
60 #define MAKE_REMOTE_USE			(VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
61 #define MAKE_REMOTE_USE_ACTIVE		(VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
62 
63 /* Ensure the fields are wide enough */
64 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
65 	== 0);
66 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
67 static_assert((unsigned int)VCHIQ_PORT_MAX <
68 	(unsigned int)VCHIQ_PORT_FREE);
69 
70 #define VCHIQ_MSGID_PADDING            VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
71 #define VCHIQ_MSGID_CLAIMED            0x40000000
72 
73 #define VCHIQ_FOURCC_INVALID           0x00000000
74 #define VCHIQ_FOURCC_IS_LEGAL(fourcc)  ((fourcc) != VCHIQ_FOURCC_INVALID)
75 
76 #define VCHIQ_BULK_ACTUAL_ABORTED -1
77 
78 #if VCHIQ_ENABLE_STATS
79 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
80 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
81 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
82 	(service->stats. stat += addend)
83 #else
84 #define VCHIQ_STATS_INC(state, stat) ((void)0)
85 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
86 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
87 #endif
88 
89 #define HANDLE_STATE_SHIFT 12
90 
91 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
92 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
93 #define SLOT_INDEX_FROM_DATA(state, data) \
94 	(((unsigned int)((char *)data - (char *)state->slot_data)) / \
95 	VCHIQ_SLOT_SIZE)
96 #define SLOT_INDEX_FROM_INFO(state, info) \
97 	((unsigned int)(info - state->slot_info))
98 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
99 	((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
100 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
101 	(SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
102 
103 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
104 
105 #define SRVTRACE_LEVEL(srv) \
106 	(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
107 #define SRVTRACE_ENABLED(srv, lev) \
108 	(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
109 
110 #define NO_CLOSE_RECVD	0
111 #define CLOSE_RECVD	1
112 
113 #define NO_RETRY_POLL	0
114 #define RETRY_POLL	1
115 
116 struct vchiq_open_payload {
117 	int fourcc;
118 	int client_id;
119 	short version;
120 	short version_min;
121 };
122 
123 struct vchiq_openack_payload {
124 	short version;
125 };
126 
127 enum {
128 	QMFLAGS_IS_BLOCKING     = BIT(0),
129 	QMFLAGS_NO_MUTEX_LOCK   = BIT(1),
130 	QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
131 };
132 
133 enum {
134 	VCHIQ_POLL_TERMINATE,
135 	VCHIQ_POLL_REMOVE,
136 	VCHIQ_POLL_TXNOTIFY,
137 	VCHIQ_POLL_RXNOTIFY,
138 	VCHIQ_POLL_COUNT
139 };
140 
141 /* we require this for consistency between endpoints */
142 static_assert(sizeof(struct vchiq_header) == 8);
143 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
144 
145 static inline void check_sizes(void)
146 {
147 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
148 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
149 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
150 	BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
151 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
152 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
153 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
154 }
155 
156 /* Run time control of log level, based on KERN_XXX level. */
157 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
158 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
159 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
160 
161 DEFINE_SPINLOCK(bulk_waiter_spinlock);
162 static DEFINE_SPINLOCK(quota_spinlock);
163 
164 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
165 static unsigned int handle_seq;
166 
167 static const char *const srvstate_names[] = {
168 	"FREE",
169 	"HIDDEN",
170 	"LISTENING",
171 	"OPENING",
172 	"OPEN",
173 	"OPENSYNC",
174 	"CLOSESENT",
175 	"CLOSERECVD",
176 	"CLOSEWAIT",
177 	"CLOSED"
178 };
179 
180 static const char *const reason_names[] = {
181 	"SERVICE_OPENED",
182 	"SERVICE_CLOSED",
183 	"MESSAGE_AVAILABLE",
184 	"BULK_TRANSMIT_DONE",
185 	"BULK_RECEIVE_DONE",
186 	"BULK_TRANSMIT_ABORTED",
187 	"BULK_RECEIVE_ABORTED"
188 };
189 
190 static const char *const conn_state_names[] = {
191 	"DISCONNECTED",
192 	"CONNECTING",
193 	"CONNECTED",
194 	"PAUSING",
195 	"PAUSE_SENT",
196 	"PAUSED",
197 	"RESUMING",
198 	"PAUSE_TIMEOUT",
199 	"RESUME_TIMEOUT"
200 };
201 
202 static void
203 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
204 
205 static const char *msg_type_str(unsigned int msg_type)
206 {
207 	switch (msg_type) {
208 	case VCHIQ_MSG_PADDING:       return "PADDING";
209 	case VCHIQ_MSG_CONNECT:       return "CONNECT";
210 	case VCHIQ_MSG_OPEN:          return "OPEN";
211 	case VCHIQ_MSG_OPENACK:       return "OPENACK";
212 	case VCHIQ_MSG_CLOSE:         return "CLOSE";
213 	case VCHIQ_MSG_DATA:          return "DATA";
214 	case VCHIQ_MSG_BULK_RX:       return "BULK_RX";
215 	case VCHIQ_MSG_BULK_TX:       return "BULK_TX";
216 	case VCHIQ_MSG_BULK_RX_DONE:  return "BULK_RX_DONE";
217 	case VCHIQ_MSG_BULK_TX_DONE:  return "BULK_TX_DONE";
218 	case VCHIQ_MSG_PAUSE:         return "PAUSE";
219 	case VCHIQ_MSG_RESUME:        return "RESUME";
220 	case VCHIQ_MSG_REMOTE_USE:    return "REMOTE_USE";
221 	case VCHIQ_MSG_REMOTE_RELEASE:      return "REMOTE_RELEASE";
222 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:   return "REMOTE_USE_ACTIVE";
223 	}
224 	return "???";
225 }
226 
227 static inline void
228 vchiq_set_service_state(struct vchiq_service *service, int newstate)
229 {
230 	vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
231 		       service->state->id, service->localport,
232 		       srvstate_names[service->srvstate],
233 		       srvstate_names[newstate]);
234 	service->srvstate = newstate;
235 }
236 
237 struct vchiq_service *
238 find_service_by_handle(unsigned int handle)
239 {
240 	struct vchiq_service *service;
241 
242 	rcu_read_lock();
243 	service = handle_to_service(handle);
244 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
245 	    service->handle == handle &&
246 	    kref_get_unless_zero(&service->ref_count)) {
247 		service = rcu_pointer_handoff(service);
248 		rcu_read_unlock();
249 		return service;
250 	}
251 	rcu_read_unlock();
252 	vchiq_log_info(vchiq_core_log_level,
253 		       "Invalid service handle 0x%x", handle);
254 	return NULL;
255 }
256 
257 struct vchiq_service *
258 find_service_by_port(struct vchiq_state *state, int localport)
259 {
260 	if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
261 		struct vchiq_service *service;
262 
263 		rcu_read_lock();
264 		service = rcu_dereference(state->services[localport]);
265 		if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
266 		    kref_get_unless_zero(&service->ref_count)) {
267 			service = rcu_pointer_handoff(service);
268 			rcu_read_unlock();
269 			return service;
270 		}
271 		rcu_read_unlock();
272 	}
273 	vchiq_log_info(vchiq_core_log_level,
274 		       "Invalid port %d", localport);
275 	return NULL;
276 }
277 
278 struct vchiq_service *
279 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
280 {
281 	struct vchiq_service *service;
282 
283 	rcu_read_lock();
284 	service = handle_to_service(handle);
285 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
286 	    service->handle == handle &&
287 	    service->instance == instance &&
288 	    kref_get_unless_zero(&service->ref_count)) {
289 		service = rcu_pointer_handoff(service);
290 		rcu_read_unlock();
291 		return service;
292 	}
293 	rcu_read_unlock();
294 	vchiq_log_info(vchiq_core_log_level,
295 		       "Invalid service handle 0x%x", handle);
296 	return NULL;
297 }
298 
299 struct vchiq_service *
300 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
301 {
302 	struct vchiq_service *service;
303 
304 	rcu_read_lock();
305 	service = handle_to_service(handle);
306 	if (service &&
307 	    (service->srvstate == VCHIQ_SRVSTATE_FREE ||
308 	     service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
309 	    service->handle == handle &&
310 	    service->instance == instance &&
311 	    kref_get_unless_zero(&service->ref_count)) {
312 		service = rcu_pointer_handoff(service);
313 		rcu_read_unlock();
314 		return service;
315 	}
316 	rcu_read_unlock();
317 	vchiq_log_info(vchiq_core_log_level,
318 		       "Invalid service handle 0x%x", handle);
319 	return service;
320 }
321 
322 struct vchiq_service *
323 __next_service_by_instance(struct vchiq_state *state,
324 			   struct vchiq_instance *instance,
325 			   int *pidx)
326 {
327 	struct vchiq_service *service = NULL;
328 	int idx = *pidx;
329 
330 	while (idx < state->unused_service) {
331 		struct vchiq_service *srv;
332 
333 		srv = rcu_dereference(state->services[idx]);
334 		idx++;
335 		if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
336 		    srv->instance == instance) {
337 			service = srv;
338 			break;
339 		}
340 	}
341 
342 	*pidx = idx;
343 	return service;
344 }
345 
346 struct vchiq_service *
347 next_service_by_instance(struct vchiq_state *state,
348 			 struct vchiq_instance *instance,
349 			 int *pidx)
350 {
351 	struct vchiq_service *service;
352 
353 	rcu_read_lock();
354 	while (1) {
355 		service = __next_service_by_instance(state, instance, pidx);
356 		if (!service)
357 			break;
358 		if (kref_get_unless_zero(&service->ref_count)) {
359 			service = rcu_pointer_handoff(service);
360 			break;
361 		}
362 	}
363 	rcu_read_unlock();
364 	return service;
365 }
366 
367 void
368 vchiq_service_get(struct vchiq_service *service)
369 {
370 	if (!service) {
371 		WARN(1, "%s service is NULL\n", __func__);
372 		return;
373 	}
374 	kref_get(&service->ref_count);
375 }
376 
377 static void service_release(struct kref *kref)
378 {
379 	struct vchiq_service *service =
380 		container_of(kref, struct vchiq_service, ref_count);
381 	struct vchiq_state *state = service->state;
382 
383 	WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
384 	rcu_assign_pointer(state->services[service->localport], NULL);
385 	if (service->userdata_term)
386 		service->userdata_term(service->base.userdata);
387 	kfree_rcu(service, rcu);
388 }
389 
390 void
391 vchiq_service_put(struct vchiq_service *service)
392 {
393 	if (!service) {
394 		WARN(1, "%s: service is NULL\n", __func__);
395 		return;
396 	}
397 	kref_put(&service->ref_count, service_release);
398 }
399 
400 int
401 vchiq_get_client_id(unsigned int handle)
402 {
403 	struct vchiq_service *service;
404 	int id;
405 
406 	rcu_read_lock();
407 	service = handle_to_service(handle);
408 	id = service ? service->client_id : 0;
409 	rcu_read_unlock();
410 	return id;
411 }
412 
413 void *
414 vchiq_get_service_userdata(unsigned int handle)
415 {
416 	void *userdata;
417 	struct vchiq_service *service;
418 
419 	rcu_read_lock();
420 	service = handle_to_service(handle);
421 	userdata = service ? service->base.userdata : NULL;
422 	rcu_read_unlock();
423 	return userdata;
424 }
425 EXPORT_SYMBOL(vchiq_get_service_userdata);
426 
427 static void
428 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
429 {
430 	struct vchiq_state *state = service->state;
431 	struct vchiq_service_quota *quota;
432 
433 	service->closing = 1;
434 
435 	/* Synchronise with other threads. */
436 	mutex_lock(&state->recycle_mutex);
437 	mutex_unlock(&state->recycle_mutex);
438 	if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
439 		/*
440 		 * If we're pausing then the slot_mutex is held until resume
441 		 * by the slot handler.  Therefore don't try to acquire this
442 		 * mutex if we're the slot handler and in the pause sent state.
443 		 * We don't need to in this case anyway.
444 		 */
445 		mutex_lock(&state->slot_mutex);
446 		mutex_unlock(&state->slot_mutex);
447 	}
448 
449 	/* Unblock any sending thread. */
450 	quota = &state->service_quotas[service->localport];
451 	complete(&quota->quota_event);
452 }
453 
454 static void
455 mark_service_closing(struct vchiq_service *service)
456 {
457 	mark_service_closing_internal(service, 0);
458 }
459 
460 static inline enum vchiq_status
461 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
462 		      struct vchiq_header *header, void *bulk_userdata)
463 {
464 	enum vchiq_status status;
465 
466 	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
467 			service->state->id, service->localport, reason_names[reason],
468 			header, bulk_userdata);
469 	status = service->base.callback(reason, header, service->handle, bulk_userdata);
470 	if (status == VCHIQ_ERROR) {
471 		vchiq_log_warning(vchiq_core_log_level,
472 				  "%d: ignoring ERROR from callback to service %x",
473 				  service->state->id, service->handle);
474 		status = VCHIQ_SUCCESS;
475 	}
476 
477 	if (reason != VCHIQ_MESSAGE_AVAILABLE)
478 		vchiq_release_message(service->handle, header);
479 
480 	return status;
481 }
482 
483 inline void
484 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
485 {
486 	enum vchiq_connstate oldstate = state->conn_state;
487 
488 	vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, conn_state_names[oldstate],
489 		       conn_state_names[newstate]);
490 	state->conn_state = newstate;
491 	vchiq_platform_conn_state_changed(state, oldstate, newstate);
492 }
493 
494 static inline void
495 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
496 {
497 	event->armed = 0;
498 	/*
499 	 * Don't clear the 'fired' flag because it may already have been set
500 	 * by the other side.
501 	 */
502 	init_waitqueue_head(wq);
503 }
504 
505 /*
506  * All the event waiting routines in VCHIQ used a custom semaphore
507  * implementation that filtered most signals. This achieved a behaviour similar
508  * to the "killable" family of functions. While cleaning up this code all the
509  * routines where switched to the "interruptible" family of functions, as the
510  * former was deemed unjustified and the use "killable" set all VCHIQ's
511  * threads in D state.
512  */
513 static inline int
514 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
515 {
516 	if (!event->fired) {
517 		event->armed = 1;
518 		dsb(sy);
519 		if (wait_event_interruptible(*wq, event->fired)) {
520 			event->armed = 0;
521 			return 0;
522 		}
523 		event->armed = 0;
524 		wmb();
525 	}
526 
527 	event->fired = 0;
528 	return 1;
529 }
530 
531 static inline void
532 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
533 {
534 	event->fired = 1;
535 	event->armed = 0;
536 	wake_up_all(wq);
537 }
538 
539 static inline void
540 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
541 {
542 	if (event->fired && event->armed)
543 		remote_event_signal_local(wq, event);
544 }
545 
546 void
547 remote_event_pollall(struct vchiq_state *state)
548 {
549 	remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
550 	remote_event_poll(&state->sync_release_event, &state->local->sync_release);
551 	remote_event_poll(&state->trigger_event, &state->local->trigger);
552 	remote_event_poll(&state->recycle_event, &state->local->recycle);
553 }
554 
555 /*
556  * Round up message sizes so that any space at the end of a slot is always big
557  * enough for a header. This relies on header size being a power of two, which
558  * has been verified earlier by a static assertion.
559  */
560 
561 static inline size_t
562 calc_stride(size_t size)
563 {
564 	/* Allow room for the header */
565 	size += sizeof(struct vchiq_header);
566 
567 	/* Round up */
568 	return (size + sizeof(struct vchiq_header) - 1) &
569 		~(sizeof(struct vchiq_header) - 1);
570 }
571 
572 /* Called by the slot handler thread */
573 static struct vchiq_service *
574 get_listening_service(struct vchiq_state *state, int fourcc)
575 {
576 	int i;
577 
578 	WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
579 
580 	rcu_read_lock();
581 	for (i = 0; i < state->unused_service; i++) {
582 		struct vchiq_service *service;
583 
584 		service = rcu_dereference(state->services[i]);
585 		if (service &&
586 		    service->public_fourcc == fourcc &&
587 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
588 		     (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
589 		      service->remoteport == VCHIQ_PORT_FREE)) &&
590 		    kref_get_unless_zero(&service->ref_count)) {
591 			service = rcu_pointer_handoff(service);
592 			rcu_read_unlock();
593 			return service;
594 		}
595 	}
596 	rcu_read_unlock();
597 	return NULL;
598 }
599 
600 /* Called by the slot handler thread */
601 static struct vchiq_service *
602 get_connected_service(struct vchiq_state *state, unsigned int port)
603 {
604 	int i;
605 
606 	rcu_read_lock();
607 	for (i = 0; i < state->unused_service; i++) {
608 		struct vchiq_service *service =
609 			rcu_dereference(state->services[i]);
610 
611 		if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
612 		    service->remoteport == port &&
613 		    kref_get_unless_zero(&service->ref_count)) {
614 			service = rcu_pointer_handoff(service);
615 			rcu_read_unlock();
616 			return service;
617 		}
618 	}
619 	rcu_read_unlock();
620 	return NULL;
621 }
622 
623 inline void
624 request_poll(struct vchiq_state *state, struct vchiq_service *service,
625 	     int poll_type)
626 {
627 	u32 value;
628 	int index;
629 
630 	if (!service)
631 		goto skip_service;
632 
633 	do {
634 		value = atomic_read(&service->poll_flags);
635 	} while (atomic_cmpxchg(&service->poll_flags, value,
636 		 value | BIT(poll_type)) != value);
637 
638 	index = BITSET_WORD(service->localport);
639 	do {
640 		value = atomic_read(&state->poll_services[index]);
641 	} while (atomic_cmpxchg(&state->poll_services[index],
642 		 value, value | BIT(service->localport & 0x1f)) != value);
643 
644 skip_service:
645 	state->poll_needed = 1;
646 	wmb();
647 
648 	/* ... and ensure the slot handler runs. */
649 	remote_event_signal_local(&state->trigger_event, &state->local->trigger);
650 }
651 
652 /*
653  * Called from queue_message, by the slot handler and application threads,
654  * with slot_mutex held
655  */
656 static struct vchiq_header *
657 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
658 {
659 	struct vchiq_shared_state *local = state->local;
660 	int tx_pos = state->local_tx_pos;
661 	int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
662 
663 	if (space > slot_space) {
664 		struct vchiq_header *header;
665 		/* Fill the remaining space with padding */
666 		WARN_ON(!state->tx_data);
667 		header = (struct vchiq_header *)
668 			(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
669 		header->msgid = VCHIQ_MSGID_PADDING;
670 		header->size = slot_space - sizeof(struct vchiq_header);
671 
672 		tx_pos += slot_space;
673 	}
674 
675 	/* If necessary, get the next slot. */
676 	if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
677 		int slot_index;
678 
679 		/* If there is no free slot... */
680 
681 		if (!try_wait_for_completion(&state->slot_available_event)) {
682 			/* ...wait for one. */
683 
684 			VCHIQ_STATS_INC(state, slot_stalls);
685 
686 			/* But first, flush through the last slot. */
687 			state->local_tx_pos = tx_pos;
688 			local->tx_pos = tx_pos;
689 			remote_event_signal(&state->remote->trigger);
690 
691 			if (!is_blocking ||
692 			    (wait_for_completion_interruptible(&state->slot_available_event)))
693 				return NULL; /* No space available */
694 		}
695 
696 		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
697 			complete(&state->slot_available_event);
698 			pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
699 			return NULL;
700 		}
701 
702 		slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
703 		state->tx_data =
704 			(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
705 	}
706 
707 	state->local_tx_pos = tx_pos + space;
708 
709 	return (struct vchiq_header *)(state->tx_data +
710 						(tx_pos & VCHIQ_SLOT_MASK));
711 }
712 
713 static void
714 process_free_data_message(struct vchiq_state *state, u32 *service_found,
715 			  struct vchiq_header *header)
716 {
717 	int msgid = header->msgid;
718 	int port = VCHIQ_MSG_SRCPORT(msgid);
719 	struct vchiq_service_quota *quota = &state->service_quotas[port];
720 	int count;
721 
722 	spin_lock(&quota_spinlock);
723 	count = quota->message_use_count;
724 	if (count > 0)
725 		quota->message_use_count = count - 1;
726 	spin_unlock(&quota_spinlock);
727 
728 	if (count == quota->message_quota) {
729 		/*
730 		 * Signal the service that it
731 		 * has dropped below its quota
732 		 */
733 		complete(&quota->quota_event);
734 	} else if (count == 0) {
735 		vchiq_log_error(vchiq_core_log_level,
736 				"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
737 				port, quota->message_use_count, header, msgid, header->msgid,
738 				header->size);
739 		WARN(1, "invalid message use count\n");
740 	}
741 	if (!BITSET_IS_SET(service_found, port)) {
742 		/* Set the found bit for this service */
743 		BITSET_SET(service_found, port);
744 
745 		spin_lock(&quota_spinlock);
746 		count = quota->slot_use_count;
747 		if (count > 0)
748 			quota->slot_use_count = count - 1;
749 		spin_unlock(&quota_spinlock);
750 
751 		if (count > 0) {
752 			/*
753 			 * Signal the service in case
754 			 * it has dropped below its quota
755 			 */
756 			complete(&quota->quota_event);
757 			vchiq_log_trace(vchiq_core_log_level, "%d: pfq:%d %x@%pK - slot_use->%d",
758 					state->id, port, header->size, header, count - 1);
759 		} else {
760 			vchiq_log_error(vchiq_core_log_level,
761 					"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
762 					port, count, header, msgid, header->msgid, header->size);
763 			WARN(1, "bad slot use count\n");
764 		}
765 	}
766 }
767 
768 /* Called by the recycle thread. */
769 static void
770 process_free_queue(struct vchiq_state *state, u32 *service_found,
771 		   size_t length)
772 {
773 	struct vchiq_shared_state *local = state->local;
774 	int slot_queue_available;
775 
776 	/*
777 	 * Find slots which have been freed by the other side, and return them
778 	 * to the available queue.
779 	 */
780 	slot_queue_available = state->slot_queue_available;
781 
782 	/*
783 	 * Use a memory barrier to ensure that any state that may have been
784 	 * modified by another thread is not masked by stale prefetched
785 	 * values.
786 	 */
787 	mb();
788 
789 	while (slot_queue_available != local->slot_queue_recycle) {
790 		unsigned int pos;
791 		int slot_index = local->slot_queue[slot_queue_available &
792 			VCHIQ_SLOT_QUEUE_MASK];
793 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
794 		int data_found = 0;
795 
796 		slot_queue_available++;
797 		/*
798 		 * Beware of the address dependency - data is calculated
799 		 * using an index written by the other side.
800 		 */
801 		rmb();
802 
803 		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
804 				state->id, slot_index, data, local->slot_queue_recycle,
805 				slot_queue_available);
806 
807 		/* Initialise the bitmask for services which have used this slot */
808 		memset(service_found, 0, length);
809 
810 		pos = 0;
811 
812 		while (pos < VCHIQ_SLOT_SIZE) {
813 			struct vchiq_header *header =
814 				(struct vchiq_header *)(data + pos);
815 			int msgid = header->msgid;
816 
817 			if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
818 				process_free_data_message(state, service_found,
819 							  header);
820 				data_found = 1;
821 			}
822 
823 			pos += calc_stride(header->size);
824 			if (pos > VCHIQ_SLOT_SIZE) {
825 				vchiq_log_error(vchiq_core_log_level,
826 						"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
827 						pos, header, msgid, header->msgid, header->size);
828 				WARN(1, "invalid slot position\n");
829 			}
830 		}
831 
832 		if (data_found) {
833 			int count;
834 
835 			spin_lock(&quota_spinlock);
836 			count = state->data_use_count;
837 			if (count > 0)
838 				state->data_use_count = count - 1;
839 			spin_unlock(&quota_spinlock);
840 			if (count == state->data_quota)
841 				complete(&state->data_quota_event);
842 		}
843 
844 		/*
845 		 * Don't allow the slot to be reused until we are no
846 		 * longer interested in it.
847 		 */
848 		mb();
849 
850 		state->slot_queue_available = slot_queue_available;
851 		complete(&state->slot_available_event);
852 	}
853 }
854 
855 static ssize_t
856 memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
857 {
858 	memcpy(dest + offset, context + offset, maxsize);
859 	return maxsize;
860 }
861 
862 static ssize_t
863 copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
864 					   size_t maxsize),
865 	void *context,
866 	void *dest,
867 	size_t size)
868 {
869 	size_t pos = 0;
870 
871 	while (pos < size) {
872 		ssize_t callback_result;
873 		size_t max_bytes = size - pos;
874 
875 		callback_result =
876 			copy_callback(context, dest + pos,
877 				      pos, max_bytes);
878 
879 		if (callback_result < 0)
880 			return callback_result;
881 
882 		if (!callback_result)
883 			return -EIO;
884 
885 		if (callback_result > max_bytes)
886 			return -EIO;
887 
888 		pos += callback_result;
889 	}
890 
891 	return size;
892 }
893 
894 /* Called by the slot handler and application threads */
895 static enum vchiq_status
896 queue_message(struct vchiq_state *state, struct vchiq_service *service,
897 	      int msgid,
898 	      ssize_t (*copy_callback)(void *context, void *dest,
899 				       size_t offset, size_t maxsize),
900 	      void *context, size_t size, int flags)
901 {
902 	struct vchiq_shared_state *local;
903 	struct vchiq_service_quota *quota = NULL;
904 	struct vchiq_header *header;
905 	int type = VCHIQ_MSG_TYPE(msgid);
906 
907 	size_t stride;
908 
909 	local = state->local;
910 
911 	stride = calc_stride(size);
912 
913 	WARN_ON(stride > VCHIQ_SLOT_SIZE);
914 
915 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
916 	    mutex_lock_killable(&state->slot_mutex))
917 		return VCHIQ_RETRY;
918 
919 	if (type == VCHIQ_MSG_DATA) {
920 		int tx_end_index;
921 
922 		if (!service) {
923 			WARN(1, "%s: service is NULL\n", __func__);
924 			mutex_unlock(&state->slot_mutex);
925 			return VCHIQ_ERROR;
926 		}
927 
928 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
929 				 QMFLAGS_NO_MUTEX_UNLOCK));
930 
931 		if (service->closing) {
932 			/* The service has been closed */
933 			mutex_unlock(&state->slot_mutex);
934 			return VCHIQ_ERROR;
935 		}
936 
937 		quota = &state->service_quotas[service->localport];
938 
939 		spin_lock(&quota_spinlock);
940 
941 		/*
942 		 * Ensure this service doesn't use more than its quota of
943 		 * messages or slots
944 		 */
945 		tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
946 
947 		/*
948 		 * Ensure data messages don't use more than their quota of
949 		 * slots
950 		 */
951 		while ((tx_end_index != state->previous_data_index) &&
952 		       (state->data_use_count == state->data_quota)) {
953 			VCHIQ_STATS_INC(state, data_stalls);
954 			spin_unlock(&quota_spinlock);
955 			mutex_unlock(&state->slot_mutex);
956 
957 			if (wait_for_completion_interruptible(&state->data_quota_event))
958 				return VCHIQ_RETRY;
959 
960 			mutex_lock(&state->slot_mutex);
961 			spin_lock(&quota_spinlock);
962 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
963 			if ((tx_end_index == state->previous_data_index) ||
964 			    (state->data_use_count < state->data_quota)) {
965 				/* Pass the signal on to other waiters */
966 				complete(&state->data_quota_event);
967 				break;
968 			}
969 		}
970 
971 		while ((quota->message_use_count == quota->message_quota) ||
972 		       ((tx_end_index != quota->previous_tx_index) &&
973 			(quota->slot_use_count == quota->slot_quota))) {
974 			spin_unlock(&quota_spinlock);
975 			vchiq_log_trace(vchiq_core_log_level,
976 					"%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
977 					state->id, service->localport, msg_type_str(type), size,
978 					quota->message_use_count, quota->slot_use_count);
979 			VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
980 			mutex_unlock(&state->slot_mutex);
981 			if (wait_for_completion_interruptible(&quota->quota_event))
982 				return VCHIQ_RETRY;
983 			if (service->closing)
984 				return VCHIQ_ERROR;
985 			if (mutex_lock_killable(&state->slot_mutex))
986 				return VCHIQ_RETRY;
987 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
988 				/* The service has been closed */
989 				mutex_unlock(&state->slot_mutex);
990 				return VCHIQ_ERROR;
991 			}
992 			spin_lock(&quota_spinlock);
993 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
994 		}
995 
996 		spin_unlock(&quota_spinlock);
997 	}
998 
999 	header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1000 
1001 	if (!header) {
1002 		if (service)
1003 			VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1004 		/*
1005 		 * In the event of a failure, return the mutex to the
1006 		 * state it was in
1007 		 */
1008 		if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1009 			mutex_unlock(&state->slot_mutex);
1010 		return VCHIQ_RETRY;
1011 	}
1012 
1013 	if (type == VCHIQ_MSG_DATA) {
1014 		ssize_t callback_result;
1015 		int tx_end_index;
1016 		int slot_use_count;
1017 
1018 		vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1019 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1020 			       VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1021 
1022 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1023 				 QMFLAGS_NO_MUTEX_UNLOCK));
1024 
1025 		callback_result =
1026 			copy_message_data(copy_callback, context,
1027 					  header->data, size);
1028 
1029 		if (callback_result < 0) {
1030 			mutex_unlock(&state->slot_mutex);
1031 			VCHIQ_SERVICE_STATS_INC(service,
1032 						error_count);
1033 			return VCHIQ_ERROR;
1034 		}
1035 
1036 		if (SRVTRACE_ENABLED(service,
1037 				     VCHIQ_LOG_INFO))
1038 			vchiq_log_dump_mem("Sent", 0,
1039 					   header->data,
1040 					   min((size_t)16,
1041 					       (size_t)callback_result));
1042 
1043 		spin_lock(&quota_spinlock);
1044 		quota->message_use_count++;
1045 
1046 		tx_end_index =
1047 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1048 
1049 		/*
1050 		 * If this transmission can't fit in the last slot used by any
1051 		 * service, the data_use_count must be increased.
1052 		 */
1053 		if (tx_end_index != state->previous_data_index) {
1054 			state->previous_data_index = tx_end_index;
1055 			state->data_use_count++;
1056 		}
1057 
1058 		/*
1059 		 * If this isn't the same slot last used by this service,
1060 		 * the service's slot_use_count must be increased.
1061 		 */
1062 		if (tx_end_index != quota->previous_tx_index) {
1063 			quota->previous_tx_index = tx_end_index;
1064 			slot_use_count = ++quota->slot_use_count;
1065 		} else {
1066 			slot_use_count = 0;
1067 		}
1068 
1069 		spin_unlock(&quota_spinlock);
1070 
1071 		if (slot_use_count)
1072 			vchiq_log_trace(vchiq_core_log_level,
1073 					"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
1074 					service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1075 					size, slot_use_count, header);
1076 
1077 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1078 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1079 	} else {
1080 		vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1081 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1082 			       VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1083 		if (size != 0) {
1084 			/*
1085 			 * It is assumed for now that this code path
1086 			 * only happens from calls inside this file.
1087 			 *
1088 			 * External callers are through the vchiq_queue_message
1089 			 * path which always sets the type to be VCHIQ_MSG_DATA
1090 			 *
1091 			 * At first glance this appears to be correct but
1092 			 * more review is needed.
1093 			 */
1094 			copy_message_data(copy_callback, context,
1095 					  header->data, size);
1096 		}
1097 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1098 	}
1099 
1100 	header->msgid = msgid;
1101 	header->size = size;
1102 
1103 	{
1104 		int svc_fourcc;
1105 
1106 		svc_fourcc = service
1107 			? service->base.fourcc
1108 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1109 
1110 		vchiq_log_info(SRVTRACE_LEVEL(service),
1111 			       "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1112 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1113 			       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
1114 			       VCHIQ_MSG_DSTPORT(msgid), size);
1115 	}
1116 
1117 	/* Make sure the new header is visible to the peer. */
1118 	wmb();
1119 
1120 	/* Make the new tx_pos visible to the peer. */
1121 	local->tx_pos = state->local_tx_pos;
1122 	wmb();
1123 
1124 	if (service && (type == VCHIQ_MSG_CLOSE))
1125 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1126 
1127 	if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1128 		mutex_unlock(&state->slot_mutex);
1129 
1130 	remote_event_signal(&state->remote->trigger);
1131 
1132 	return VCHIQ_SUCCESS;
1133 }
1134 
1135 /* Called by the slot handler and application threads */
1136 static enum vchiq_status
1137 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1138 		   int msgid,
1139 		   ssize_t (*copy_callback)(void *context, void *dest,
1140 					    size_t offset, size_t maxsize),
1141 		   void *context, int size, int is_blocking)
1142 {
1143 	struct vchiq_shared_state *local;
1144 	struct vchiq_header *header;
1145 	ssize_t callback_result;
1146 
1147 	local = state->local;
1148 
1149 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1150 	    mutex_lock_killable(&state->sync_mutex))
1151 		return VCHIQ_RETRY;
1152 
1153 	remote_event_wait(&state->sync_release_event, &local->sync_release);
1154 
1155 	rmb();
1156 
1157 	header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1158 		local->slot_sync);
1159 
1160 	{
1161 		int oldmsgid = header->msgid;
1162 
1163 		if (oldmsgid != VCHIQ_MSGID_PADDING)
1164 			vchiq_log_error(vchiq_core_log_level, "%d: qms - msgid %x, not PADDING",
1165 					state->id, oldmsgid);
1166 	}
1167 
1168 	vchiq_log_info(vchiq_sync_log_level,
1169 		       "%d: qms %s@%pK,%x (%d->%d)", state->id,
1170 		       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1171 		       header, size, VCHIQ_MSG_SRCPORT(msgid),
1172 		       VCHIQ_MSG_DSTPORT(msgid));
1173 
1174 	callback_result =
1175 		copy_message_data(copy_callback, context,
1176 				  header->data, size);
1177 
1178 	if (callback_result < 0) {
1179 		mutex_unlock(&state->slot_mutex);
1180 		VCHIQ_SERVICE_STATS_INC(service,
1181 					error_count);
1182 		return VCHIQ_ERROR;
1183 	}
1184 
1185 	if (service) {
1186 		if (SRVTRACE_ENABLED(service,
1187 				     VCHIQ_LOG_INFO))
1188 			vchiq_log_dump_mem("Sent", 0,
1189 					   header->data,
1190 					   min((size_t)16,
1191 					       (size_t)callback_result));
1192 
1193 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1194 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1195 	} else {
1196 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1197 	}
1198 
1199 	header->size = size;
1200 	header->msgid = msgid;
1201 
1202 	if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1203 		int svc_fourcc;
1204 
1205 		svc_fourcc = service
1206 			? service->base.fourcc
1207 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1208 
1209 		vchiq_log_trace(vchiq_sync_log_level,
1210 				"Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1211 				msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1212 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
1213 				VCHIQ_MSG_DSTPORT(msgid), size);
1214 	}
1215 
1216 	remote_event_signal(&state->remote->sync_trigger);
1217 
1218 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1219 		mutex_unlock(&state->sync_mutex);
1220 
1221 	return VCHIQ_SUCCESS;
1222 }
1223 
1224 static inline void
1225 claim_slot(struct vchiq_slot_info *slot)
1226 {
1227 	slot->use_count++;
1228 }
1229 
1230 static void
1231 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1232 	     struct vchiq_header *header, struct vchiq_service *service)
1233 {
1234 	mutex_lock(&state->recycle_mutex);
1235 
1236 	if (header) {
1237 		int msgid = header->msgid;
1238 
1239 		if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
1240 			mutex_unlock(&state->recycle_mutex);
1241 			return;
1242 		}
1243 
1244 		/* Rewrite the message header to prevent a double release */
1245 		header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1246 	}
1247 
1248 	slot_info->release_count++;
1249 
1250 	if (slot_info->release_count == slot_info->use_count) {
1251 		int slot_queue_recycle;
1252 		/* Add to the freed queue */
1253 
1254 		/*
1255 		 * A read barrier is necessary here to prevent speculative
1256 		 * fetches of remote->slot_queue_recycle from overtaking the
1257 		 * mutex.
1258 		 */
1259 		rmb();
1260 
1261 		slot_queue_recycle = state->remote->slot_queue_recycle;
1262 		state->remote->slot_queue[slot_queue_recycle &
1263 			VCHIQ_SLOT_QUEUE_MASK] =
1264 			SLOT_INDEX_FROM_INFO(state, slot_info);
1265 		state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1266 		vchiq_log_info(vchiq_core_log_level, "%d: %s %d - recycle->%x", state->id, __func__,
1267 			       SLOT_INDEX_FROM_INFO(state, slot_info),
1268 			       state->remote->slot_queue_recycle);
1269 
1270 		/*
1271 		 * A write barrier is necessary, but remote_event_signal
1272 		 * contains one.
1273 		 */
1274 		remote_event_signal(&state->remote->recycle);
1275 	}
1276 
1277 	mutex_unlock(&state->recycle_mutex);
1278 }
1279 
1280 static inline enum vchiq_reason
1281 get_bulk_reason(struct vchiq_bulk *bulk)
1282 {
1283 	if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1284 		if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1285 			return VCHIQ_BULK_TRANSMIT_ABORTED;
1286 
1287 		return VCHIQ_BULK_TRANSMIT_DONE;
1288 	}
1289 
1290 	if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1291 		return VCHIQ_BULK_RECEIVE_ABORTED;
1292 
1293 	return VCHIQ_BULK_RECEIVE_DONE;
1294 }
1295 
1296 /* Called by the slot handler - don't hold the bulk mutex */
1297 static enum vchiq_status
1298 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1299 	     int retry_poll)
1300 {
1301 	enum vchiq_status status = VCHIQ_SUCCESS;
1302 
1303 	vchiq_log_trace(vchiq_core_log_level, "%d: nb:%d %cx - p=%x rn=%x r=%x", service->state->id,
1304 			service->localport, (queue == &service->bulk_tx) ? 't' : 'r',
1305 			queue->process, queue->remote_notify, queue->remove);
1306 
1307 	queue->remote_notify = queue->process;
1308 
1309 	while (queue->remove != queue->remote_notify) {
1310 		struct vchiq_bulk *bulk =
1311 			&queue->bulks[BULK_INDEX(queue->remove)];
1312 
1313 		/*
1314 		 * Only generate callbacks for non-dummy bulk
1315 		 * requests, and non-terminated services
1316 		 */
1317 		if (bulk->data && service->instance) {
1318 			if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1319 				if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1320 					VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
1321 					VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
1322 								bulk->actual);
1323 				} else {
1324 					VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
1325 					VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
1326 								bulk->actual);
1327 				}
1328 			} else {
1329 				VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
1330 			}
1331 			if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1332 				struct bulk_waiter *waiter;
1333 
1334 				spin_lock(&bulk_waiter_spinlock);
1335 				waiter = bulk->userdata;
1336 				if (waiter) {
1337 					waiter->actual = bulk->actual;
1338 					complete(&waiter->event);
1339 				}
1340 				spin_unlock(&bulk_waiter_spinlock);
1341 			} else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1342 				enum vchiq_reason reason =
1343 						get_bulk_reason(bulk);
1344 				status = make_service_callback(service, reason,	NULL,
1345 							       bulk->userdata);
1346 				if (status == VCHIQ_RETRY)
1347 					break;
1348 			}
1349 		}
1350 
1351 		queue->remove++;
1352 		complete(&service->bulk_remove_event);
1353 	}
1354 	if (!retry_poll)
1355 		status = VCHIQ_SUCCESS;
1356 
1357 	if (status == VCHIQ_RETRY)
1358 		request_poll(service->state, service, (queue == &service->bulk_tx) ?
1359 			     VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1360 
1361 	return status;
1362 }
1363 
1364 static void
1365 poll_services_of_group(struct vchiq_state *state, int group)
1366 {
1367 	u32 flags = atomic_xchg(&state->poll_services[group], 0);
1368 	int i;
1369 
1370 	for (i = 0; flags; i++) {
1371 		struct vchiq_service *service;
1372 		u32 service_flags;
1373 
1374 		if ((flags & BIT(i)) == 0)
1375 			continue;
1376 
1377 		service = find_service_by_port(state, (group << 5) + i);
1378 		flags &= ~BIT(i);
1379 
1380 		if (!service)
1381 			continue;
1382 
1383 		service_flags = atomic_xchg(&service->poll_flags, 0);
1384 		if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1385 			vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
1386 				       state->id, service->localport,
1387 				       service->remoteport);
1388 
1389 			/*
1390 			 * Make it look like a client, because
1391 			 * it must be removed and not left in
1392 			 * the LISTENING state.
1393 			 */
1394 			service->public_fourcc = VCHIQ_FOURCC_INVALID;
1395 
1396 			if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
1397 							 VCHIQ_SUCCESS)
1398 				request_poll(state, service, VCHIQ_POLL_REMOVE);
1399 		} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1400 			vchiq_log_info(vchiq_core_log_level, "%d: ps - terminate %d<->%d",
1401 				       state->id, service->localport, service->remoteport);
1402 			if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) != VCHIQ_SUCCESS)
1403 				request_poll(state, service, VCHIQ_POLL_TERMINATE);
1404 		}
1405 		if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1406 			notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1407 		if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1408 			notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1409 		vchiq_service_put(service);
1410 	}
1411 }
1412 
1413 /* Called by the slot handler thread */
1414 static void
1415 poll_services(struct vchiq_state *state)
1416 {
1417 	int group;
1418 
1419 	for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1420 		poll_services_of_group(state, group);
1421 }
1422 
1423 /* Called with the bulk_mutex held */
1424 static void
1425 abort_outstanding_bulks(struct vchiq_service *service,
1426 			struct vchiq_bulk_queue *queue)
1427 {
1428 	int is_tx = (queue == &service->bulk_tx);
1429 
1430 	vchiq_log_trace(vchiq_core_log_level, "%d: aob:%d %cx - li=%x ri=%x p=%x",
1431 			service->state->id, service->localport, is_tx ? 't' : 'r',
1432 			queue->local_insert, queue->remote_insert, queue->process);
1433 
1434 	WARN_ON((int)(queue->local_insert - queue->process) < 0);
1435 	WARN_ON((int)(queue->remote_insert - queue->process) < 0);
1436 
1437 	while ((queue->process != queue->local_insert) ||
1438 	       (queue->process != queue->remote_insert)) {
1439 		struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
1440 
1441 		if (queue->process == queue->remote_insert) {
1442 			/* fabricate a matching dummy bulk */
1443 			bulk->remote_data = NULL;
1444 			bulk->remote_size = 0;
1445 			queue->remote_insert++;
1446 		}
1447 
1448 		if (queue->process != queue->local_insert) {
1449 			vchiq_complete_bulk(bulk);
1450 
1451 			vchiq_log_info(SRVTRACE_LEVEL(service),
1452 				       "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1453 				       is_tx ? "Send Bulk to" : "Recv Bulk from",
1454 				       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1455 				       service->remoteport, bulk->size, bulk->remote_size);
1456 		} else {
1457 			/* fabricate a matching dummy bulk */
1458 			bulk->data = 0;
1459 			bulk->size = 0;
1460 			bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1461 			bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1462 				VCHIQ_BULK_RECEIVE;
1463 			queue->local_insert++;
1464 		}
1465 
1466 		queue->process++;
1467 	}
1468 }
1469 
1470 static int
1471 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1472 {
1473 	const struct vchiq_open_payload *payload;
1474 	struct vchiq_service *service = NULL;
1475 	int msgid, size;
1476 	unsigned int localport, remoteport, fourcc;
1477 	short version, version_min;
1478 
1479 	msgid = header->msgid;
1480 	size = header->size;
1481 	localport = VCHIQ_MSG_DSTPORT(msgid);
1482 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1483 	if (size < sizeof(struct vchiq_open_payload))
1484 		goto fail_open;
1485 
1486 	payload = (struct vchiq_open_payload *)header->data;
1487 	fourcc = payload->fourcc;
1488 	vchiq_log_info(vchiq_core_log_level, "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1489 		       state->id, header, localport, VCHIQ_FOURCC_AS_4CHARS(fourcc));
1490 
1491 	service = get_listening_service(state, fourcc);
1492 	if (!service)
1493 		goto fail_open;
1494 
1495 	/* A matching service exists */
1496 	version = payload->version;
1497 	version_min = payload->version_min;
1498 
1499 	if ((service->version < version_min) || (version < service->version_min)) {
1500 		/* Version mismatch */
1501 		vchiq_loud_error_header();
1502 		vchiq_loud_error("%d: service %d (%c%c%c%c) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
1503 				 state->id, service->localport, VCHIQ_FOURCC_AS_4CHARS(fourcc),
1504 				 service->version, service->version_min, version, version_min);
1505 		vchiq_loud_error_footer();
1506 		vchiq_service_put(service);
1507 		service = NULL;
1508 		goto fail_open;
1509 	}
1510 	service->peer_version = version;
1511 
1512 	if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1513 		struct vchiq_openack_payload ack_payload = {
1514 			service->version
1515 		};
1516 		int openack_id = MAKE_OPENACK(service->localport, remoteport);
1517 
1518 		if (state->version_common <
1519 		    VCHIQ_VERSION_SYNCHRONOUS_MODE)
1520 			service->sync = 0;
1521 
1522 		/* Acknowledge the OPEN */
1523 		if (service->sync) {
1524 			if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
1525 					       &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
1526 				goto bail_not_ready;
1527 		} else {
1528 			if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
1529 					  &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
1530 				goto bail_not_ready;
1531 		}
1532 
1533 		/* The service is now open */
1534 		vchiq_set_service_state(service, service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1535 					: VCHIQ_SRVSTATE_OPEN);
1536 	}
1537 
1538 	/* Success - the message has been dealt with */
1539 	vchiq_service_put(service);
1540 	return 1;
1541 
1542 fail_open:
1543 	/* No available service, or an invalid request - send a CLOSE */
1544 	if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
1545 			  NULL, NULL, 0, 0) == VCHIQ_RETRY)
1546 		goto bail_not_ready;
1547 
1548 	return 1;
1549 
1550 bail_not_ready:
1551 	if (service)
1552 		vchiq_service_put(service);
1553 
1554 	return 0;
1555 }
1556 
1557 /**
1558  * parse_message() - parses a single message from the rx slot
1559  * @state:  vchiq state struct
1560  * @header: message header
1561  *
1562  * Context: Process context
1563  *
1564  * Return:
1565  * * >= 0     - size of the parsed message payload (without header)
1566  * * -EINVAL  - fatal error occurred, bail out is required
1567  */
1568 static int
1569 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1570 {
1571 	struct vchiq_service *service = NULL;
1572 	unsigned int localport, remoteport;
1573 	int msgid, size, type, ret = -EINVAL;
1574 
1575 	DEBUG_INITIALISE(state->local);
1576 
1577 	DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1578 	msgid = header->msgid;
1579 	DEBUG_VALUE(PARSE_MSGID, msgid);
1580 	size = header->size;
1581 	type = VCHIQ_MSG_TYPE(msgid);
1582 	localport = VCHIQ_MSG_DSTPORT(msgid);
1583 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1584 
1585 	if (type != VCHIQ_MSG_DATA)
1586 		VCHIQ_STATS_INC(state, ctrl_rx_count);
1587 
1588 	switch (type) {
1589 	case VCHIQ_MSG_OPENACK:
1590 	case VCHIQ_MSG_CLOSE:
1591 	case VCHIQ_MSG_DATA:
1592 	case VCHIQ_MSG_BULK_RX:
1593 	case VCHIQ_MSG_BULK_TX:
1594 	case VCHIQ_MSG_BULK_RX_DONE:
1595 	case VCHIQ_MSG_BULK_TX_DONE:
1596 		service = find_service_by_port(state, localport);
1597 		if ((!service ||
1598 		     ((service->remoteport != remoteport) &&
1599 		      (service->remoteport != VCHIQ_PORT_FREE))) &&
1600 		    (localport == 0) &&
1601 		    (type == VCHIQ_MSG_CLOSE)) {
1602 			/*
1603 			 * This could be a CLOSE from a client which
1604 			 * hadn't yet received the OPENACK - look for
1605 			 * the connected service
1606 			 */
1607 			if (service)
1608 				vchiq_service_put(service);
1609 			service = get_connected_service(state, remoteport);
1610 			if (service)
1611 				vchiq_log_warning(vchiq_core_log_level,
1612 						  "%d: prs %s@%pK (%d->%d) - found connected service %d",
1613 						  state->id, msg_type_str(type), header,
1614 						  remoteport, localport, service->localport);
1615 		}
1616 
1617 		if (!service) {
1618 			vchiq_log_error(vchiq_core_log_level,
1619 					"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1620 					state->id, msg_type_str(type), header, remoteport,
1621 					localport, localport);
1622 			goto skip_message;
1623 		}
1624 		break;
1625 	default:
1626 		break;
1627 	}
1628 
1629 	if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1630 		int svc_fourcc;
1631 
1632 		svc_fourcc = service
1633 			? service->base.fourcc
1634 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1635 		vchiq_log_info(SRVTRACE_LEVEL(service),
1636 			       "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1637 			       msg_type_str(type), type, VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1638 			       remoteport, localport, size);
1639 		if (size > 0)
1640 			vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
1641 	}
1642 
1643 	if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1644 	    calc_stride(size) > VCHIQ_SLOT_SIZE) {
1645 		vchiq_log_error(vchiq_core_log_level,
1646 				"header %pK (msgid %x) - size %x too big for slot",
1647 				header, (unsigned int)msgid, (unsigned int)size);
1648 		WARN(1, "oversized for slot\n");
1649 	}
1650 
1651 	switch (type) {
1652 	case VCHIQ_MSG_OPEN:
1653 		WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
1654 		if (!parse_open(state, header))
1655 			goto bail_not_ready;
1656 		break;
1657 	case VCHIQ_MSG_OPENACK:
1658 		if (size >= sizeof(struct vchiq_openack_payload)) {
1659 			const struct vchiq_openack_payload *payload =
1660 				(struct vchiq_openack_payload *)
1661 				header->data;
1662 			service->peer_version = payload->version;
1663 		}
1664 		vchiq_log_info(vchiq_core_log_level, "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1665 			       state->id, header, size, remoteport, localport,
1666 			       service->peer_version);
1667 		if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1668 			service->remoteport = remoteport;
1669 			vchiq_set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1670 			complete(&service->remove_event);
1671 		} else {
1672 			vchiq_log_error(vchiq_core_log_level, "OPENACK received in state %s",
1673 					srvstate_names[service->srvstate]);
1674 		}
1675 		break;
1676 	case VCHIQ_MSG_CLOSE:
1677 		WARN_ON(size); /* There should be no data */
1678 
1679 		vchiq_log_info(vchiq_core_log_level, "%d: prs CLOSE@%pK (%d->%d)",
1680 			       state->id, header, remoteport, localport);
1681 
1682 		mark_service_closing_internal(service, 1);
1683 
1684 		if (vchiq_close_service_internal(service, CLOSE_RECVD) == VCHIQ_RETRY)
1685 			goto bail_not_ready;
1686 
1687 		vchiq_log_info(vchiq_core_log_level, "Close Service %c%c%c%c s:%u d:%d",
1688 			       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1689 			       service->localport, service->remoteport);
1690 		break;
1691 	case VCHIQ_MSG_DATA:
1692 		vchiq_log_info(vchiq_core_log_level, "%d: prs DATA@%pK,%x (%d->%d)",
1693 			       state->id, header, size, remoteport, localport);
1694 
1695 		if ((service->remoteport == remoteport) &&
1696 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1697 			header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1698 			claim_slot(state->rx_info);
1699 			DEBUG_TRACE(PARSE_LINE);
1700 			if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
1701 						  NULL) == VCHIQ_RETRY) {
1702 				DEBUG_TRACE(PARSE_LINE);
1703 				goto bail_not_ready;
1704 			}
1705 			VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1706 			VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
1707 		} else {
1708 			VCHIQ_STATS_INC(state, error_count);
1709 		}
1710 		break;
1711 	case VCHIQ_MSG_CONNECT:
1712 		vchiq_log_info(vchiq_core_log_level, "%d: prs CONNECT@%pK", state->id, header);
1713 		state->version_common =	((struct vchiq_slot_zero *)
1714 					 state->slot_data)->version;
1715 		complete(&state->connect);
1716 		break;
1717 	case VCHIQ_MSG_BULK_RX:
1718 	case VCHIQ_MSG_BULK_TX:
1719 		/*
1720 		 * We should never receive a bulk request from the
1721 		 * other side since we're not setup to perform as the
1722 		 * master.
1723 		 */
1724 		WARN_ON(1);
1725 		break;
1726 	case VCHIQ_MSG_BULK_RX_DONE:
1727 	case VCHIQ_MSG_BULK_TX_DONE:
1728 		if ((service->remoteport == remoteport) &&
1729 		    (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1730 			struct vchiq_bulk_queue *queue;
1731 			struct vchiq_bulk *bulk;
1732 
1733 			queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1734 				&service->bulk_rx : &service->bulk_tx;
1735 
1736 			DEBUG_TRACE(PARSE_LINE);
1737 			if (mutex_lock_killable(&service->bulk_mutex)) {
1738 				DEBUG_TRACE(PARSE_LINE);
1739 				goto bail_not_ready;
1740 			}
1741 			if ((int)(queue->remote_insert -
1742 				queue->local_insert) >= 0) {
1743 				vchiq_log_error(vchiq_core_log_level,
1744 						"%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1745 						state->id, msg_type_str(type), header, remoteport,
1746 						localport, queue->remote_insert,
1747 						queue->local_insert);
1748 				mutex_unlock(&service->bulk_mutex);
1749 				break;
1750 			}
1751 			if (queue->process != queue->remote_insert) {
1752 				pr_err("%s: p %x != ri %x\n",
1753 				       __func__,
1754 				       queue->process,
1755 				       queue->remote_insert);
1756 				mutex_unlock(&service->bulk_mutex);
1757 				goto bail_not_ready;
1758 			}
1759 
1760 			bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
1761 			bulk->actual = *(int *)header->data;
1762 			queue->remote_insert++;
1763 
1764 			vchiq_log_info(vchiq_core_log_level, "%d: prs %s@%pK (%d->%d) %x@%pad",
1765 				       state->id, msg_type_str(type), header, remoteport, localport,
1766 				       bulk->actual, &bulk->data);
1767 
1768 			vchiq_log_trace(vchiq_core_log_level, "%d: prs:%d %cx li=%x ri=%x p=%x",
1769 					state->id, localport,
1770 					(type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
1771 					queue->local_insert, queue->remote_insert, queue->process);
1772 
1773 			DEBUG_TRACE(PARSE_LINE);
1774 			WARN_ON(queue->process == queue->local_insert);
1775 			vchiq_complete_bulk(bulk);
1776 			queue->process++;
1777 			mutex_unlock(&service->bulk_mutex);
1778 			DEBUG_TRACE(PARSE_LINE);
1779 			notify_bulks(service, queue, RETRY_POLL);
1780 			DEBUG_TRACE(PARSE_LINE);
1781 		}
1782 		break;
1783 	case VCHIQ_MSG_PADDING:
1784 		vchiq_log_trace(vchiq_core_log_level, "%d: prs PADDING@%pK,%x",
1785 				state->id, header, size);
1786 		break;
1787 	case VCHIQ_MSG_PAUSE:
1788 		/* If initiated, signal the application thread */
1789 		vchiq_log_trace(vchiq_core_log_level, "%d: prs PAUSE@%pK,%x",
1790 				state->id, header, size);
1791 		if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1792 			vchiq_log_error(vchiq_core_log_level, "%d: PAUSE received in state PAUSED",
1793 					state->id);
1794 			break;
1795 		}
1796 		if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1797 			/* Send a PAUSE in response */
1798 			if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1799 					  QMFLAGS_NO_MUTEX_UNLOCK) == VCHIQ_RETRY)
1800 				goto bail_not_ready;
1801 		}
1802 		/* At this point slot_mutex is held */
1803 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1804 		break;
1805 	case VCHIQ_MSG_RESUME:
1806 		vchiq_log_trace(vchiq_core_log_level, "%d: prs RESUME@%pK,%x",
1807 				state->id, header, size);
1808 		/* Release the slot mutex */
1809 		mutex_unlock(&state->slot_mutex);
1810 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1811 		break;
1812 
1813 	case VCHIQ_MSG_REMOTE_USE:
1814 		vchiq_on_remote_use(state);
1815 		break;
1816 	case VCHIQ_MSG_REMOTE_RELEASE:
1817 		vchiq_on_remote_release(state);
1818 		break;
1819 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1820 		break;
1821 
1822 	default:
1823 		vchiq_log_error(vchiq_core_log_level, "%d: prs invalid msgid %x@%pK,%x",
1824 				state->id, msgid, header, size);
1825 		WARN(1, "invalid message\n");
1826 		break;
1827 	}
1828 
1829 skip_message:
1830 	ret = size;
1831 
1832 bail_not_ready:
1833 	if (service)
1834 		vchiq_service_put(service);
1835 
1836 	return ret;
1837 }
1838 
1839 /* Called by the slot handler thread */
1840 static void
1841 parse_rx_slots(struct vchiq_state *state)
1842 {
1843 	struct vchiq_shared_state *remote = state->remote;
1844 	int tx_pos;
1845 
1846 	DEBUG_INITIALISE(state->local);
1847 
1848 	tx_pos = remote->tx_pos;
1849 
1850 	while (state->rx_pos != tx_pos) {
1851 		struct vchiq_header *header;
1852 		int size;
1853 
1854 		DEBUG_TRACE(PARSE_LINE);
1855 		if (!state->rx_data) {
1856 			int rx_index;
1857 
1858 			WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
1859 			rx_index = remote->slot_queue[
1860 				SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1861 			state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1862 				rx_index);
1863 			state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1864 
1865 			/*
1866 			 * Initialise use_count to one, and increment
1867 			 * release_count at the end of the slot to avoid
1868 			 * releasing the slot prematurely.
1869 			 */
1870 			state->rx_info->use_count = 1;
1871 			state->rx_info->release_count = 0;
1872 		}
1873 
1874 		header = (struct vchiq_header *)(state->rx_data +
1875 			(state->rx_pos & VCHIQ_SLOT_MASK));
1876 		size = parse_message(state, header);
1877 		if (size < 0)
1878 			return;
1879 
1880 		state->rx_pos += calc_stride(size);
1881 
1882 		DEBUG_TRACE(PARSE_LINE);
1883 		/*
1884 		 * Perform some housekeeping when the end of the slot is
1885 		 * reached.
1886 		 */
1887 		if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1888 			/* Remove the extra reference count. */
1889 			release_slot(state, state->rx_info, NULL, NULL);
1890 			state->rx_data = NULL;
1891 		}
1892 	}
1893 }
1894 
1895 /**
1896  * handle_poll() - handle service polling and other rare conditions
1897  * @state:  vchiq state struct
1898  *
1899  * Context: Process context
1900  *
1901  * Return:
1902  * * 0        - poll handled successful
1903  * * -EAGAIN  - retry later
1904  */
1905 static int
1906 handle_poll(struct vchiq_state *state)
1907 {
1908 	switch (state->conn_state) {
1909 	case VCHIQ_CONNSTATE_CONNECTED:
1910 		/* Poll the services as requested */
1911 		poll_services(state);
1912 		break;
1913 
1914 	case VCHIQ_CONNSTATE_PAUSING:
1915 		if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1916 				  QMFLAGS_NO_MUTEX_UNLOCK) != VCHIQ_RETRY) {
1917 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
1918 		} else {
1919 			/* Retry later */
1920 			return -EAGAIN;
1921 		}
1922 		break;
1923 
1924 	case VCHIQ_CONNSTATE_RESUMING:
1925 		if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
1926 				  QMFLAGS_NO_MUTEX_LOCK) != VCHIQ_RETRY) {
1927 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1928 		} else {
1929 			/*
1930 			 * This should really be impossible,
1931 			 * since the PAUSE should have flushed
1932 			 * through outstanding messages.
1933 			 */
1934 			vchiq_log_error(vchiq_core_log_level, "Failed to send RESUME message");
1935 		}
1936 		break;
1937 	default:
1938 		break;
1939 	}
1940 
1941 	return 0;
1942 }
1943 
1944 /* Called by the slot handler thread */
1945 static int
1946 slot_handler_func(void *v)
1947 {
1948 	struct vchiq_state *state = v;
1949 	struct vchiq_shared_state *local = state->local;
1950 
1951 	DEBUG_INITIALISE(local);
1952 
1953 	while (1) {
1954 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
1955 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1956 		remote_event_wait(&state->trigger_event, &local->trigger);
1957 
1958 		rmb();
1959 
1960 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1961 		if (state->poll_needed) {
1962 			state->poll_needed = 0;
1963 
1964 			/*
1965 			 * Handle service polling and other rare conditions here
1966 			 * out of the mainline code
1967 			 */
1968 			if (handle_poll(state) == -EAGAIN)
1969 				state->poll_needed = 1;
1970 		}
1971 
1972 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1973 		parse_rx_slots(state);
1974 	}
1975 	return 0;
1976 }
1977 
1978 /* Called by the recycle thread */
1979 static int
1980 recycle_func(void *v)
1981 {
1982 	struct vchiq_state *state = v;
1983 	struct vchiq_shared_state *local = state->local;
1984 	u32 *found;
1985 	size_t length;
1986 
1987 	length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1988 
1989 	found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1990 			      GFP_KERNEL);
1991 	if (!found)
1992 		return -ENOMEM;
1993 
1994 	while (1) {
1995 		remote_event_wait(&state->recycle_event, &local->recycle);
1996 
1997 		process_free_queue(state, found, length);
1998 	}
1999 	return 0;
2000 }
2001 
2002 /* Called by the sync thread */
2003 static int
2004 sync_func(void *v)
2005 {
2006 	struct vchiq_state *state = v;
2007 	struct vchiq_shared_state *local = state->local;
2008 	struct vchiq_header *header =
2009 		(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2010 			state->remote->slot_sync);
2011 
2012 	while (1) {
2013 		struct vchiq_service *service;
2014 		int msgid, size;
2015 		int type;
2016 		unsigned int localport, remoteport;
2017 
2018 		remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2019 
2020 		rmb();
2021 
2022 		msgid = header->msgid;
2023 		size = header->size;
2024 		type = VCHIQ_MSG_TYPE(msgid);
2025 		localport = VCHIQ_MSG_DSTPORT(msgid);
2026 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
2027 
2028 		service = find_service_by_port(state, localport);
2029 
2030 		if (!service) {
2031 			vchiq_log_error(vchiq_sync_log_level,
2032 					"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2033 					state->id, msg_type_str(type), header,
2034 					remoteport, localport, localport);
2035 			release_message_sync(state, header);
2036 			continue;
2037 		}
2038 
2039 		if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2040 			int svc_fourcc;
2041 
2042 			svc_fourcc = service
2043 				? service->base.fourcc
2044 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2045 			vchiq_log_trace(vchiq_sync_log_level,
2046 					"Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2047 					msg_type_str(type), VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2048 					remoteport, localport, size);
2049 			if (size > 0)
2050 				vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
2051 		}
2052 
2053 		switch (type) {
2054 		case VCHIQ_MSG_OPENACK:
2055 			if (size >= sizeof(struct vchiq_openack_payload)) {
2056 				const struct vchiq_openack_payload *payload =
2057 					(struct vchiq_openack_payload *)
2058 					header->data;
2059 				service->peer_version = payload->version;
2060 			}
2061 			vchiq_log_info(vchiq_sync_log_level, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2062 				       state->id, header, size, remoteport, localport,
2063 				       service->peer_version);
2064 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2065 				service->remoteport = remoteport;
2066 				vchiq_set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
2067 				service->sync = 1;
2068 				complete(&service->remove_event);
2069 			}
2070 			release_message_sync(state, header);
2071 			break;
2072 
2073 		case VCHIQ_MSG_DATA:
2074 			vchiq_log_trace(vchiq_sync_log_level, "%d: sf DATA@%pK,%x (%d->%d)",
2075 					state->id, header, size, remoteport, localport);
2076 
2077 			if ((service->remoteport == remoteport) &&
2078 			    (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2079 				if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
2080 							  NULL) == VCHIQ_RETRY)
2081 					vchiq_log_error(vchiq_sync_log_level,
2082 							"synchronous callback to service %d returns VCHIQ_RETRY",
2083 							localport);
2084 			}
2085 			break;
2086 
2087 		default:
2088 			vchiq_log_error(vchiq_sync_log_level, "%d: sf unexpected msgid %x@%pK,%x",
2089 					state->id, msgid, header, size);
2090 			release_message_sync(state, header);
2091 			break;
2092 		}
2093 
2094 		vchiq_service_put(service);
2095 	}
2096 
2097 	return 0;
2098 }
2099 
2100 static void
2101 init_bulk_queue(struct vchiq_bulk_queue *queue)
2102 {
2103 	queue->local_insert = 0;
2104 	queue->remote_insert = 0;
2105 	queue->process = 0;
2106 	queue->remote_notify = 0;
2107 	queue->remove = 0;
2108 }
2109 
2110 inline const char *
2111 get_conn_state_name(enum vchiq_connstate conn_state)
2112 {
2113 	return conn_state_names[conn_state];
2114 }
2115 
2116 struct vchiq_slot_zero *
2117 vchiq_init_slots(void *mem_base, int mem_size)
2118 {
2119 	int mem_align =
2120 		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2121 	struct vchiq_slot_zero *slot_zero =
2122 		(struct vchiq_slot_zero *)(mem_base + mem_align);
2123 	int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2124 	int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2125 
2126 	check_sizes();
2127 
2128 	/* Ensure there is enough memory to run an absolutely minimum system */
2129 	num_slots -= first_data_slot;
2130 
2131 	if (num_slots < 4) {
2132 		vchiq_log_error(vchiq_core_log_level, "%s - insufficient memory %x bytes",
2133 				__func__, mem_size);
2134 		return NULL;
2135 	}
2136 
2137 	memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2138 
2139 	slot_zero->magic = VCHIQ_MAGIC;
2140 	slot_zero->version = VCHIQ_VERSION;
2141 	slot_zero->version_min = VCHIQ_VERSION_MIN;
2142 	slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2143 	slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2144 	slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2145 	slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2146 
2147 	slot_zero->master.slot_sync = first_data_slot;
2148 	slot_zero->master.slot_first = first_data_slot + 1;
2149 	slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2150 	slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2151 	slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2152 	slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2153 
2154 	return slot_zero;
2155 }
2156 
2157 int
2158 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2159 {
2160 	struct vchiq_shared_state *local;
2161 	struct vchiq_shared_state *remote;
2162 	char threadname[16];
2163 	int i, ret;
2164 
2165 	if (vchiq_states[0]) {
2166 		pr_err("%s: VCHIQ state already initialized\n", __func__);
2167 		return -EINVAL;
2168 	}
2169 
2170 	local = &slot_zero->slave;
2171 	remote = &slot_zero->master;
2172 
2173 	if (local->initialised) {
2174 		vchiq_loud_error_header();
2175 		if (remote->initialised)
2176 			vchiq_loud_error("local state has already been initialised");
2177 		else
2178 			vchiq_loud_error("master/slave mismatch two slaves");
2179 		vchiq_loud_error_footer();
2180 		return -EINVAL;
2181 	}
2182 
2183 	memset(state, 0, sizeof(struct vchiq_state));
2184 
2185 	/*
2186 	 * initialize shared state pointers
2187 	 */
2188 
2189 	state->local = local;
2190 	state->remote = remote;
2191 	state->slot_data = (struct vchiq_slot *)slot_zero;
2192 
2193 	/*
2194 	 * initialize events and mutexes
2195 	 */
2196 
2197 	init_completion(&state->connect);
2198 	mutex_init(&state->mutex);
2199 	mutex_init(&state->slot_mutex);
2200 	mutex_init(&state->recycle_mutex);
2201 	mutex_init(&state->sync_mutex);
2202 	mutex_init(&state->bulk_transfer_mutex);
2203 
2204 	init_completion(&state->slot_available_event);
2205 	init_completion(&state->slot_remove_event);
2206 	init_completion(&state->data_quota_event);
2207 
2208 	state->slot_queue_available = 0;
2209 
2210 	for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2211 		struct vchiq_service_quota *quota = &state->service_quotas[i];
2212 		init_completion(&quota->quota_event);
2213 	}
2214 
2215 	for (i = local->slot_first; i <= local->slot_last; i++) {
2216 		local->slot_queue[state->slot_queue_available] = i;
2217 		state->slot_queue_available++;
2218 		complete(&state->slot_available_event);
2219 	}
2220 
2221 	state->default_slot_quota = state->slot_queue_available / 2;
2222 	state->default_message_quota =
2223 		min((unsigned short)(state->default_slot_quota * 256),
2224 		    (unsigned short)~0);
2225 
2226 	state->previous_data_index = -1;
2227 	state->data_use_count = 0;
2228 	state->data_quota = state->slot_queue_available - 1;
2229 
2230 	remote_event_create(&state->trigger_event, &local->trigger);
2231 	local->tx_pos = 0;
2232 	remote_event_create(&state->recycle_event, &local->recycle);
2233 	local->slot_queue_recycle = state->slot_queue_available;
2234 	remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2235 	remote_event_create(&state->sync_release_event, &local->sync_release);
2236 
2237 	/* At start-of-day, the slot is empty and available */
2238 	((struct vchiq_header *)
2239 		SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2240 							VCHIQ_MSGID_PADDING;
2241 	remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2242 
2243 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2244 
2245 	ret = vchiq_platform_init_state(state);
2246 	if (ret)
2247 		return ret;
2248 
2249 	/*
2250 	 * bring up slot handler thread
2251 	 */
2252 	snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2253 	state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
2254 
2255 	if (IS_ERR(state->slot_handler_thread)) {
2256 		vchiq_loud_error_header();
2257 		vchiq_loud_error("couldn't create thread %s", threadname);
2258 		vchiq_loud_error_footer();
2259 		return PTR_ERR(state->slot_handler_thread);
2260 	}
2261 	set_user_nice(state->slot_handler_thread, -19);
2262 
2263 	snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2264 	state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
2265 	if (IS_ERR(state->recycle_thread)) {
2266 		vchiq_loud_error_header();
2267 		vchiq_loud_error("couldn't create thread %s", threadname);
2268 		vchiq_loud_error_footer();
2269 		ret = PTR_ERR(state->recycle_thread);
2270 		goto fail_free_handler_thread;
2271 	}
2272 	set_user_nice(state->recycle_thread, -19);
2273 
2274 	snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2275 	state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
2276 	if (IS_ERR(state->sync_thread)) {
2277 		vchiq_loud_error_header();
2278 		vchiq_loud_error("couldn't create thread %s", threadname);
2279 		vchiq_loud_error_footer();
2280 		ret = PTR_ERR(state->sync_thread);
2281 		goto fail_free_recycle_thread;
2282 	}
2283 	set_user_nice(state->sync_thread, -20);
2284 
2285 	wake_up_process(state->slot_handler_thread);
2286 	wake_up_process(state->recycle_thread);
2287 	wake_up_process(state->sync_thread);
2288 
2289 	vchiq_states[0] = state;
2290 
2291 	/* Indicate readiness to the other side */
2292 	local->initialised = 1;
2293 
2294 	return 0;
2295 
2296 fail_free_recycle_thread:
2297 	kthread_stop(state->recycle_thread);
2298 fail_free_handler_thread:
2299 	kthread_stop(state->slot_handler_thread);
2300 
2301 	return ret;
2302 }
2303 
2304 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2305 {
2306 	struct vchiq_service *service = find_service_by_handle(handle);
2307 	int pos;
2308 
2309 	while (service->msg_queue_write == service->msg_queue_read +
2310 		VCHIQ_MAX_SLOTS) {
2311 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
2312 			flush_signals(current);
2313 	}
2314 
2315 	pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2316 	service->msg_queue_write++;
2317 	service->msg_queue[pos] = header;
2318 
2319 	complete(&service->msg_queue_push);
2320 }
2321 EXPORT_SYMBOL(vchiq_msg_queue_push);
2322 
2323 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2324 {
2325 	struct vchiq_service *service = find_service_by_handle(handle);
2326 	struct vchiq_header *header;
2327 	int pos;
2328 
2329 	if (service->msg_queue_write == service->msg_queue_read)
2330 		return NULL;
2331 
2332 	while (service->msg_queue_write == service->msg_queue_read) {
2333 		if (wait_for_completion_interruptible(&service->msg_queue_push))
2334 			flush_signals(current);
2335 	}
2336 
2337 	pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2338 	service->msg_queue_read++;
2339 	header = service->msg_queue[pos];
2340 
2341 	complete(&service->msg_queue_pop);
2342 
2343 	return header;
2344 }
2345 EXPORT_SYMBOL(vchiq_msg_hold);
2346 
2347 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2348 {
2349 	if (!params->callback || !params->fourcc) {
2350 		vchiq_loud_error("Can't add service, invalid params\n");
2351 		return -EINVAL;
2352 	}
2353 
2354 	return 0;
2355 }
2356 
2357 /* Called from application thread when a client or server service is created. */
2358 struct vchiq_service *
2359 vchiq_add_service_internal(struct vchiq_state *state,
2360 			   const struct vchiq_service_params_kernel *params,
2361 			   int srvstate, struct vchiq_instance *instance,
2362 			   void (*userdata_term)(void *userdata))
2363 {
2364 	struct vchiq_service *service;
2365 	struct vchiq_service __rcu **pservice = NULL;
2366 	struct vchiq_service_quota *quota;
2367 	int ret;
2368 	int i;
2369 
2370 	ret = vchiq_validate_params(params);
2371 	if (ret)
2372 		return NULL;
2373 
2374 	service = kmalloc(sizeof(*service), GFP_KERNEL);
2375 	if (!service)
2376 		return service;
2377 
2378 	service->base.fourcc   = params->fourcc;
2379 	service->base.callback = params->callback;
2380 	service->base.userdata = params->userdata;
2381 	service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
2382 	kref_init(&service->ref_count);
2383 	service->srvstate      = VCHIQ_SRVSTATE_FREE;
2384 	service->userdata_term = userdata_term;
2385 	service->localport     = VCHIQ_PORT_FREE;
2386 	service->remoteport    = VCHIQ_PORT_FREE;
2387 
2388 	service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2389 		VCHIQ_FOURCC_INVALID : params->fourcc;
2390 	service->client_id     = 0;
2391 	service->auto_close    = 1;
2392 	service->sync          = 0;
2393 	service->closing       = 0;
2394 	service->trace         = 0;
2395 	atomic_set(&service->poll_flags, 0);
2396 	service->version       = params->version;
2397 	service->version_min   = params->version_min;
2398 	service->state         = state;
2399 	service->instance      = instance;
2400 	service->service_use_count = 0;
2401 	service->msg_queue_read = 0;
2402 	service->msg_queue_write = 0;
2403 	init_bulk_queue(&service->bulk_tx);
2404 	init_bulk_queue(&service->bulk_rx);
2405 	init_completion(&service->remove_event);
2406 	init_completion(&service->bulk_remove_event);
2407 	init_completion(&service->msg_queue_pop);
2408 	init_completion(&service->msg_queue_push);
2409 	mutex_init(&service->bulk_mutex);
2410 	memset(&service->stats, 0, sizeof(service->stats));
2411 	memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2412 
2413 	/*
2414 	 * Although it is perfectly possible to use a spinlock
2415 	 * to protect the creation of services, it is overkill as it
2416 	 * disables interrupts while the array is searched.
2417 	 * The only danger is of another thread trying to create a
2418 	 * service - service deletion is safe.
2419 	 * Therefore it is preferable to use state->mutex which,
2420 	 * although slower to claim, doesn't block interrupts while
2421 	 * it is held.
2422 	 */
2423 
2424 	mutex_lock(&state->mutex);
2425 
2426 	/* Prepare to use a previously unused service */
2427 	if (state->unused_service < VCHIQ_MAX_SERVICES)
2428 		pservice = &state->services[state->unused_service];
2429 
2430 	if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2431 		for (i = 0; i < state->unused_service; i++) {
2432 			if (!rcu_access_pointer(state->services[i])) {
2433 				pservice = &state->services[i];
2434 				break;
2435 			}
2436 		}
2437 	} else {
2438 		rcu_read_lock();
2439 		for (i = (state->unused_service - 1); i >= 0; i--) {
2440 			struct vchiq_service *srv;
2441 
2442 			srv = rcu_dereference(state->services[i]);
2443 			if (!srv) {
2444 				pservice = &state->services[i];
2445 			} else if ((srv->public_fourcc == params->fourcc) &&
2446 				   ((srv->instance != instance) ||
2447 				   (srv->base.callback != params->callback))) {
2448 				/*
2449 				 * There is another server using this
2450 				 * fourcc which doesn't match.
2451 				 */
2452 				pservice = NULL;
2453 				break;
2454 			}
2455 		}
2456 		rcu_read_unlock();
2457 	}
2458 
2459 	if (pservice) {
2460 		service->localport = (pservice - state->services);
2461 		if (!handle_seq)
2462 			handle_seq = VCHIQ_MAX_STATES *
2463 				 VCHIQ_MAX_SERVICES;
2464 		service->handle = handle_seq |
2465 			(state->id * VCHIQ_MAX_SERVICES) |
2466 			service->localport;
2467 		handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2468 		rcu_assign_pointer(*pservice, service);
2469 		if (pservice == &state->services[state->unused_service])
2470 			state->unused_service++;
2471 	}
2472 
2473 	mutex_unlock(&state->mutex);
2474 
2475 	if (!pservice) {
2476 		kfree(service);
2477 		return NULL;
2478 	}
2479 
2480 	quota = &state->service_quotas[service->localport];
2481 	quota->slot_quota = state->default_slot_quota;
2482 	quota->message_quota = state->default_message_quota;
2483 	if (quota->slot_use_count == 0)
2484 		quota->previous_tx_index =
2485 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2486 			- 1;
2487 
2488 	/* Bring this service online */
2489 	vchiq_set_service_state(service, srvstate);
2490 
2491 	vchiq_log_info(vchiq_core_msg_log_level, "%s Service %c%c%c%c SrcPort:%d",
2492 		       (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
2493 		       VCHIQ_FOURCC_AS_4CHARS(params->fourcc), service->localport);
2494 
2495 	/* Don't unlock the service - leave it with a ref_count of 1. */
2496 
2497 	return service;
2498 }
2499 
2500 enum vchiq_status
2501 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2502 {
2503 	struct vchiq_open_payload payload = {
2504 		service->base.fourcc,
2505 		client_id,
2506 		service->version,
2507 		service->version_min
2508 	};
2509 	enum vchiq_status status = VCHIQ_SUCCESS;
2510 
2511 	service->client_id = client_id;
2512 	vchiq_use_service_internal(service);
2513 	status = queue_message(service->state,
2514 			       NULL, MAKE_OPEN(service->localport),
2515 			       memcpy_copy_callback,
2516 			       &payload,
2517 			       sizeof(payload),
2518 			       QMFLAGS_IS_BLOCKING);
2519 
2520 	if (status != VCHIQ_SUCCESS)
2521 		return status;
2522 
2523 	/* Wait for the ACK/NAK */
2524 	if (wait_for_completion_interruptible(&service->remove_event)) {
2525 		status = VCHIQ_RETRY;
2526 		vchiq_release_service_internal(service);
2527 	} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2528 		   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2529 		if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2530 			vchiq_log_error(vchiq_core_log_level,
2531 					"%d: osi - srvstate = %s (ref %u)",
2532 					service->state->id,
2533 					srvstate_names[service->srvstate],
2534 					kref_read(&service->ref_count));
2535 		status = VCHIQ_ERROR;
2536 		VCHIQ_SERVICE_STATS_INC(service, error_count);
2537 		vchiq_release_service_internal(service);
2538 	}
2539 
2540 	return status;
2541 }
2542 
2543 static void
2544 release_service_messages(struct vchiq_service *service)
2545 {
2546 	struct vchiq_state *state = service->state;
2547 	int slot_last = state->remote->slot_last;
2548 	int i;
2549 
2550 	/* Release any claimed messages aimed at this service */
2551 
2552 	if (service->sync) {
2553 		struct vchiq_header *header =
2554 			(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2555 						state->remote->slot_sync);
2556 		if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2557 			release_message_sync(state, header);
2558 
2559 		return;
2560 	}
2561 
2562 	for (i = state->remote->slot_first; i <= slot_last; i++) {
2563 		struct vchiq_slot_info *slot_info =
2564 			SLOT_INFO_FROM_INDEX(state, i);
2565 		unsigned int pos, end;
2566 		char *data;
2567 
2568 		if (slot_info->release_count == slot_info->use_count)
2569 			continue;
2570 
2571 		data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2572 		end = VCHIQ_SLOT_SIZE;
2573 		if (data == state->rx_data)
2574 			/*
2575 			 * This buffer is still being read from - stop
2576 			 * at the current read position
2577 			 */
2578 			end = state->rx_pos & VCHIQ_SLOT_MASK;
2579 
2580 		pos = 0;
2581 
2582 		while (pos < end) {
2583 			struct vchiq_header *header =
2584 				(struct vchiq_header *)(data + pos);
2585 			int msgid = header->msgid;
2586 			int port = VCHIQ_MSG_DSTPORT(msgid);
2587 
2588 			if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
2589 				vchiq_log_info(vchiq_core_log_level, "  fsi - hdr %pK", header);
2590 				release_slot(state, slot_info, header, NULL);
2591 			}
2592 			pos += calc_stride(header->size);
2593 			if (pos > VCHIQ_SLOT_SIZE) {
2594 				vchiq_log_error(vchiq_core_log_level,
2595 						"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2596 						pos, header, msgid, header->msgid, header->size);
2597 				WARN(1, "invalid slot position\n");
2598 			}
2599 		}
2600 	}
2601 }
2602 
2603 static int
2604 do_abort_bulks(struct vchiq_service *service)
2605 {
2606 	enum vchiq_status status;
2607 
2608 	/* Abort any outstanding bulk transfers */
2609 	if (mutex_lock_killable(&service->bulk_mutex))
2610 		return 0;
2611 	abort_outstanding_bulks(service, &service->bulk_tx);
2612 	abort_outstanding_bulks(service, &service->bulk_rx);
2613 	mutex_unlock(&service->bulk_mutex);
2614 
2615 	status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2616 	if (status != VCHIQ_SUCCESS)
2617 		return 0;
2618 
2619 	status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2620 	return (status == VCHIQ_SUCCESS);
2621 }
2622 
2623 static enum vchiq_status
2624 close_service_complete(struct vchiq_service *service, int failstate)
2625 {
2626 	enum vchiq_status status;
2627 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2628 	int newstate;
2629 
2630 	switch (service->srvstate) {
2631 	case VCHIQ_SRVSTATE_OPEN:
2632 	case VCHIQ_SRVSTATE_CLOSESENT:
2633 	case VCHIQ_SRVSTATE_CLOSERECVD:
2634 		if (is_server) {
2635 			if (service->auto_close) {
2636 				service->client_id = 0;
2637 				service->remoteport = VCHIQ_PORT_FREE;
2638 				newstate = VCHIQ_SRVSTATE_LISTENING;
2639 			} else {
2640 				newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2641 			}
2642 		} else {
2643 			newstate = VCHIQ_SRVSTATE_CLOSED;
2644 		}
2645 		vchiq_set_service_state(service, newstate);
2646 		break;
2647 	case VCHIQ_SRVSTATE_LISTENING:
2648 		break;
2649 	default:
2650 		vchiq_log_error(vchiq_core_log_level, "%s(%x) called in state %s", __func__,
2651 				service->handle, srvstate_names[service->srvstate]);
2652 		WARN(1, "%s in unexpected state\n", __func__);
2653 		return VCHIQ_ERROR;
2654 	}
2655 
2656 	status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
2657 
2658 	if (status != VCHIQ_RETRY) {
2659 		int uc = service->service_use_count;
2660 		int i;
2661 		/* Complete the close process */
2662 		for (i = 0; i < uc; i++)
2663 			/*
2664 			 * cater for cases where close is forced and the
2665 			 * client may not close all it's handles
2666 			 */
2667 			vchiq_release_service_internal(service);
2668 
2669 		service->client_id = 0;
2670 		service->remoteport = VCHIQ_PORT_FREE;
2671 
2672 		if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2673 			vchiq_free_service_internal(service);
2674 		} else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2675 			if (is_server)
2676 				service->closing = 0;
2677 
2678 			complete(&service->remove_event);
2679 		}
2680 	} else {
2681 		vchiq_set_service_state(service, failstate);
2682 	}
2683 
2684 	return status;
2685 }
2686 
2687 /* Called by the slot handler */
2688 enum vchiq_status
2689 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2690 {
2691 	struct vchiq_state *state = service->state;
2692 	enum vchiq_status status = VCHIQ_SUCCESS;
2693 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2694 	int close_id = MAKE_CLOSE(service->localport,
2695 				  VCHIQ_MSG_DSTPORT(service->remoteport));
2696 
2697 	vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", service->state->id,
2698 		       service->localport, close_recvd, srvstate_names[service->srvstate]);
2699 
2700 	switch (service->srvstate) {
2701 	case VCHIQ_SRVSTATE_CLOSED:
2702 	case VCHIQ_SRVSTATE_HIDDEN:
2703 	case VCHIQ_SRVSTATE_LISTENING:
2704 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2705 		if (close_recvd) {
2706 			vchiq_log_error(vchiq_core_log_level, "%s(1) called in state %s",
2707 					__func__, srvstate_names[service->srvstate]);
2708 		} else if (is_server) {
2709 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2710 				status = VCHIQ_ERROR;
2711 			} else {
2712 				service->client_id = 0;
2713 				service->remoteport = VCHIQ_PORT_FREE;
2714 				if (service->srvstate ==
2715 					VCHIQ_SRVSTATE_CLOSEWAIT)
2716 					vchiq_set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2717 			}
2718 			complete(&service->remove_event);
2719 		} else {
2720 			vchiq_free_service_internal(service);
2721 		}
2722 		break;
2723 	case VCHIQ_SRVSTATE_OPENING:
2724 		if (close_recvd) {
2725 			/* The open was rejected - tell the user */
2726 			vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
2727 			complete(&service->remove_event);
2728 		} else {
2729 			/* Shutdown mid-open - let the other side know */
2730 			status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
2731 		}
2732 		break;
2733 
2734 	case VCHIQ_SRVSTATE_OPENSYNC:
2735 		mutex_lock(&state->sync_mutex);
2736 		fallthrough;
2737 	case VCHIQ_SRVSTATE_OPEN:
2738 		if (close_recvd) {
2739 			if (!do_abort_bulks(service))
2740 				status = VCHIQ_RETRY;
2741 		}
2742 
2743 		release_service_messages(service);
2744 
2745 		if (status == VCHIQ_SUCCESS)
2746 			status = queue_message(state, service, close_id, NULL,
2747 					       NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2748 
2749 		if (status != VCHIQ_SUCCESS) {
2750 			if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2751 				mutex_unlock(&state->sync_mutex);
2752 			break;
2753 		}
2754 
2755 		if (!close_recvd) {
2756 			/* Change the state while the mutex is still held */
2757 			vchiq_set_service_state(service,
2758 						VCHIQ_SRVSTATE_CLOSESENT);
2759 			mutex_unlock(&state->slot_mutex);
2760 			if (service->sync)
2761 				mutex_unlock(&state->sync_mutex);
2762 			break;
2763 		}
2764 
2765 		/* Change the state while the mutex is still held */
2766 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2767 		mutex_unlock(&state->slot_mutex);
2768 		if (service->sync)
2769 			mutex_unlock(&state->sync_mutex);
2770 
2771 		status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2772 		break;
2773 
2774 	case VCHIQ_SRVSTATE_CLOSESENT:
2775 		if (!close_recvd)
2776 			/* This happens when a process is killed mid-close */
2777 			break;
2778 
2779 		if (!do_abort_bulks(service)) {
2780 			status = VCHIQ_RETRY;
2781 			break;
2782 		}
2783 
2784 		if (status == VCHIQ_SUCCESS)
2785 			status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2786 		break;
2787 
2788 	case VCHIQ_SRVSTATE_CLOSERECVD:
2789 		if (!close_recvd && is_server)
2790 			/* Force into LISTENING mode */
2791 			vchiq_set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2792 		status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2793 		break;
2794 
2795 	default:
2796 		vchiq_log_error(vchiq_core_log_level, "%s(%d) called in state %s", __func__,
2797 				close_recvd, srvstate_names[service->srvstate]);
2798 		break;
2799 	}
2800 
2801 	return status;
2802 }
2803 
2804 /* Called from the application process upon process death */
2805 void
2806 vchiq_terminate_service_internal(struct vchiq_service *service)
2807 {
2808 	struct vchiq_state *state = service->state;
2809 
2810 	vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", state->id,
2811 		       service->localport, service->remoteport);
2812 
2813 	mark_service_closing(service);
2814 
2815 	/* Mark the service for removal by the slot handler */
2816 	request_poll(state, service, VCHIQ_POLL_REMOVE);
2817 }
2818 
2819 /* Called from the slot handler */
2820 void
2821 vchiq_free_service_internal(struct vchiq_service *service)
2822 {
2823 	struct vchiq_state *state = service->state;
2824 
2825 	vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", state->id, service->localport);
2826 
2827 	switch (service->srvstate) {
2828 	case VCHIQ_SRVSTATE_OPENING:
2829 	case VCHIQ_SRVSTATE_CLOSED:
2830 	case VCHIQ_SRVSTATE_HIDDEN:
2831 	case VCHIQ_SRVSTATE_LISTENING:
2832 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2833 		break;
2834 	default:
2835 		vchiq_log_error(vchiq_core_log_level, "%d: fsi - (%d) in state %s", state->id,
2836 				service->localport, srvstate_names[service->srvstate]);
2837 		return;
2838 	}
2839 
2840 	vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2841 
2842 	complete(&service->remove_event);
2843 
2844 	/* Release the initial lock */
2845 	vchiq_service_put(service);
2846 }
2847 
2848 enum vchiq_status
2849 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2850 {
2851 	struct vchiq_service *service;
2852 	int i;
2853 
2854 	/* Find all services registered to this client and enable them. */
2855 	i = 0;
2856 	while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2857 		if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2858 			vchiq_set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2859 		vchiq_service_put(service);
2860 	}
2861 
2862 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2863 		if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
2864 				  QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2865 			return VCHIQ_RETRY;
2866 
2867 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2868 	}
2869 
2870 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2871 		if (wait_for_completion_interruptible(&state->connect))
2872 			return VCHIQ_RETRY;
2873 
2874 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2875 		complete(&state->connect);
2876 	}
2877 
2878 	return VCHIQ_SUCCESS;
2879 }
2880 
2881 void
2882 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2883 {
2884 	struct vchiq_service *service;
2885 	int i;
2886 
2887 	/* Find all services registered to this client and remove them. */
2888 	i = 0;
2889 	while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2890 		(void)vchiq_remove_service(service->handle);
2891 		vchiq_service_put(service);
2892 	}
2893 }
2894 
2895 enum vchiq_status
2896 vchiq_close_service(unsigned int handle)
2897 {
2898 	/* Unregister the service */
2899 	struct vchiq_service *service = find_service_by_handle(handle);
2900 	enum vchiq_status status = VCHIQ_SUCCESS;
2901 
2902 	if (!service)
2903 		return VCHIQ_ERROR;
2904 
2905 	vchiq_log_info(vchiq_core_log_level, "%d: close_service:%d",
2906 		       service->state->id, service->localport);
2907 
2908 	if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2909 	    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2910 	    (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2911 		vchiq_service_put(service);
2912 		return VCHIQ_ERROR;
2913 	}
2914 
2915 	mark_service_closing(service);
2916 
2917 	if (current == service->state->slot_handler_thread) {
2918 		status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2919 		WARN_ON(status == VCHIQ_RETRY);
2920 	} else {
2921 	/* Mark the service for termination by the slot handler */
2922 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2923 	}
2924 
2925 	while (1) {
2926 		if (wait_for_completion_interruptible(&service->remove_event)) {
2927 			status = VCHIQ_RETRY;
2928 			break;
2929 		}
2930 
2931 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2932 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2933 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2934 			break;
2935 
2936 		vchiq_log_warning(vchiq_core_log_level,
2937 				  "%d: close_service:%d - waiting in state %s",
2938 				  service->state->id, service->localport,
2939 				  srvstate_names[service->srvstate]);
2940 	}
2941 
2942 	if ((status == VCHIQ_SUCCESS) &&
2943 	    (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2944 	    (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2945 		status = VCHIQ_ERROR;
2946 
2947 	vchiq_service_put(service);
2948 
2949 	return status;
2950 }
2951 EXPORT_SYMBOL(vchiq_close_service);
2952 
2953 enum vchiq_status
2954 vchiq_remove_service(unsigned int handle)
2955 {
2956 	/* Unregister the service */
2957 	struct vchiq_service *service = find_service_by_handle(handle);
2958 	enum vchiq_status status = VCHIQ_SUCCESS;
2959 
2960 	if (!service)
2961 		return VCHIQ_ERROR;
2962 
2963 	vchiq_log_info(vchiq_core_log_level, "%d: remove_service:%d",
2964 		       service->state->id, service->localport);
2965 
2966 	if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2967 		vchiq_service_put(service);
2968 		return VCHIQ_ERROR;
2969 	}
2970 
2971 	mark_service_closing(service);
2972 
2973 	if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2974 	    (current == service->state->slot_handler_thread)) {
2975 		/*
2976 		 * Make it look like a client, because it must be removed and
2977 		 * not left in the LISTENING state.
2978 		 */
2979 		service->public_fourcc = VCHIQ_FOURCC_INVALID;
2980 
2981 		status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2982 		WARN_ON(status == VCHIQ_RETRY);
2983 	} else {
2984 		/* Mark the service for removal by the slot handler */
2985 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2986 	}
2987 	while (1) {
2988 		if (wait_for_completion_interruptible(&service->remove_event)) {
2989 			status = VCHIQ_RETRY;
2990 			break;
2991 		}
2992 
2993 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2994 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2995 			break;
2996 
2997 		vchiq_log_warning(vchiq_core_log_level,
2998 				  "%d: remove_service:%d - waiting in state %s",
2999 				  service->state->id, service->localport,
3000 				  srvstate_names[service->srvstate]);
3001 	}
3002 
3003 	if ((status == VCHIQ_SUCCESS) &&
3004 	    (service->srvstate != VCHIQ_SRVSTATE_FREE))
3005 		status = VCHIQ_ERROR;
3006 
3007 	vchiq_service_put(service);
3008 
3009 	return status;
3010 }
3011 
3012 /*
3013  * This function may be called by kernel threads or user threads.
3014  * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3015  * received and the call should be retried after being returned to user
3016  * context.
3017  * When called in blocking mode, the userdata field points to a bulk_waiter
3018  * structure.
3019  */
3020 enum vchiq_status vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset,
3021 				      int size, void *userdata, enum vchiq_bulk_mode mode,
3022 				      enum vchiq_bulk_dir dir)
3023 {
3024 	struct vchiq_service *service = find_service_by_handle(handle);
3025 	struct vchiq_bulk_queue *queue;
3026 	struct vchiq_bulk *bulk;
3027 	struct vchiq_state *state;
3028 	struct bulk_waiter *bulk_waiter = NULL;
3029 	const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3030 	const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3031 		VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3032 	enum vchiq_status status = VCHIQ_ERROR;
3033 	int payload[2];
3034 
3035 	if (!service)
3036 		goto error_exit;
3037 
3038 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3039 		goto error_exit;
3040 
3041 	if (!offset && !uoffset)
3042 		goto error_exit;
3043 
3044 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3045 		goto error_exit;
3046 
3047 	switch (mode) {
3048 	case VCHIQ_BULK_MODE_NOCALLBACK:
3049 	case VCHIQ_BULK_MODE_CALLBACK:
3050 		break;
3051 	case VCHIQ_BULK_MODE_BLOCKING:
3052 		bulk_waiter = userdata;
3053 		init_completion(&bulk_waiter->event);
3054 		bulk_waiter->actual = 0;
3055 		bulk_waiter->bulk = NULL;
3056 		break;
3057 	case VCHIQ_BULK_MODE_WAITING:
3058 		bulk_waiter = userdata;
3059 		bulk = bulk_waiter->bulk;
3060 		goto waiting;
3061 	default:
3062 		goto error_exit;
3063 	}
3064 
3065 	state = service->state;
3066 
3067 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3068 		&service->bulk_tx : &service->bulk_rx;
3069 
3070 	if (mutex_lock_killable(&service->bulk_mutex)) {
3071 		status = VCHIQ_RETRY;
3072 		goto error_exit;
3073 	}
3074 
3075 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3076 		VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3077 		do {
3078 			mutex_unlock(&service->bulk_mutex);
3079 			if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
3080 				status = VCHIQ_RETRY;
3081 				goto error_exit;
3082 			}
3083 			if (mutex_lock_killable(&service->bulk_mutex)) {
3084 				status = VCHIQ_RETRY;
3085 				goto error_exit;
3086 			}
3087 		} while (queue->local_insert == queue->remove +
3088 				VCHIQ_NUM_SERVICE_BULKS);
3089 	}
3090 
3091 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3092 
3093 	bulk->mode = mode;
3094 	bulk->dir = dir;
3095 	bulk->userdata = userdata;
3096 	bulk->size = size;
3097 	bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3098 
3099 	if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
3100 		goto unlock_error_exit;
3101 
3102 	wmb();
3103 
3104 	vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",
3105 		       state->id, service->localport, service->remoteport,
3106 		       dir_char, size, &bulk->data, userdata);
3107 
3108 	/*
3109 	 * The slot mutex must be held when the service is being closed, so
3110 	 * claim it here to ensure that isn't happening
3111 	 */
3112 	if (mutex_lock_killable(&state->slot_mutex)) {
3113 		status = VCHIQ_RETRY;
3114 		goto cancel_bulk_error_exit;
3115 	}
3116 
3117 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3118 		goto unlock_both_error_exit;
3119 
3120 	payload[0] = lower_32_bits(bulk->data);
3121 	payload[1] = bulk->size;
3122 	status = queue_message(state,
3123 			       NULL,
3124 			       VCHIQ_MAKE_MSG(dir_msgtype,
3125 					      service->localport,
3126 					      service->remoteport),
3127 			       memcpy_copy_callback,
3128 			       &payload,
3129 			       sizeof(payload),
3130 			       QMFLAGS_IS_BLOCKING |
3131 			       QMFLAGS_NO_MUTEX_LOCK |
3132 			       QMFLAGS_NO_MUTEX_UNLOCK);
3133 	if (status != VCHIQ_SUCCESS)
3134 		goto unlock_both_error_exit;
3135 
3136 	queue->local_insert++;
3137 
3138 	mutex_unlock(&state->slot_mutex);
3139 	mutex_unlock(&service->bulk_mutex);
3140 
3141 	vchiq_log_trace(vchiq_core_log_level, "%d: bt:%d %cx li=%x ri=%x p=%x",
3142 			state->id, service->localport, dir_char, queue->local_insert,
3143 			queue->remote_insert, queue->process);
3144 
3145 waiting:
3146 	vchiq_service_put(service);
3147 
3148 	status = VCHIQ_SUCCESS;
3149 
3150 	if (bulk_waiter) {
3151 		bulk_waiter->bulk = bulk;
3152 		if (wait_for_completion_interruptible(&bulk_waiter->event))
3153 			status = VCHIQ_RETRY;
3154 		else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3155 			status = VCHIQ_ERROR;
3156 	}
3157 
3158 	return status;
3159 
3160 unlock_both_error_exit:
3161 	mutex_unlock(&state->slot_mutex);
3162 cancel_bulk_error_exit:
3163 	vchiq_complete_bulk(bulk);
3164 unlock_error_exit:
3165 	mutex_unlock(&service->bulk_mutex);
3166 
3167 error_exit:
3168 	if (service)
3169 		vchiq_service_put(service);
3170 	return status;
3171 }
3172 
3173 enum vchiq_status
3174 vchiq_queue_message(unsigned int handle,
3175 		    ssize_t (*copy_callback)(void *context, void *dest,
3176 					     size_t offset, size_t maxsize),
3177 		    void *context,
3178 		    size_t size)
3179 {
3180 	struct vchiq_service *service = find_service_by_handle(handle);
3181 	enum vchiq_status status = VCHIQ_ERROR;
3182 	int data_id;
3183 
3184 	if (!service)
3185 		goto error_exit;
3186 
3187 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3188 		goto error_exit;
3189 
3190 	if (!size) {
3191 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3192 		goto error_exit;
3193 	}
3194 
3195 	if (size > VCHIQ_MAX_MSG_SIZE) {
3196 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3197 		goto error_exit;
3198 	}
3199 
3200 	data_id = MAKE_DATA(service->localport, service->remoteport);
3201 
3202 	switch (service->srvstate) {
3203 	case VCHIQ_SRVSTATE_OPEN:
3204 		status = queue_message(service->state, service, data_id,
3205 				       copy_callback, context, size, 1);
3206 		break;
3207 	case VCHIQ_SRVSTATE_OPENSYNC:
3208 		status = queue_message_sync(service->state, service, data_id,
3209 					    copy_callback, context, size, 1);
3210 		break;
3211 	default:
3212 		status = VCHIQ_ERROR;
3213 		break;
3214 	}
3215 
3216 error_exit:
3217 	if (service)
3218 		vchiq_service_put(service);
3219 
3220 	return status;
3221 }
3222 
3223 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3224 {
3225 	enum vchiq_status status;
3226 
3227 	while (1) {
3228 		status = vchiq_queue_message(handle, memcpy_copy_callback,
3229 					     data, size);
3230 
3231 		/*
3232 		 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3233 		 * implement a retry mechanism since this function is supposed
3234 		 * to block until queued
3235 		 */
3236 		if (status != VCHIQ_RETRY)
3237 			break;
3238 
3239 		msleep(1);
3240 	}
3241 
3242 	return status;
3243 }
3244 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3245 
3246 void
3247 vchiq_release_message(unsigned int handle,
3248 		      struct vchiq_header *header)
3249 {
3250 	struct vchiq_service *service = find_service_by_handle(handle);
3251 	struct vchiq_shared_state *remote;
3252 	struct vchiq_state *state;
3253 	int slot_index;
3254 
3255 	if (!service)
3256 		return;
3257 
3258 	state = service->state;
3259 	remote = state->remote;
3260 
3261 	slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3262 
3263 	if ((slot_index >= remote->slot_first) &&
3264 	    (slot_index <= remote->slot_last)) {
3265 		int msgid = header->msgid;
3266 
3267 		if (msgid & VCHIQ_MSGID_CLAIMED) {
3268 			struct vchiq_slot_info *slot_info =
3269 				SLOT_INFO_FROM_INDEX(state, slot_index);
3270 
3271 			release_slot(state, slot_info, header, service);
3272 		}
3273 	} else if (slot_index == remote->slot_sync) {
3274 		release_message_sync(state, header);
3275 	}
3276 
3277 	vchiq_service_put(service);
3278 }
3279 EXPORT_SYMBOL(vchiq_release_message);
3280 
3281 static void
3282 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3283 {
3284 	header->msgid = VCHIQ_MSGID_PADDING;
3285 	remote_event_signal(&state->remote->sync_release);
3286 }
3287 
3288 enum vchiq_status
3289 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3290 {
3291 	enum vchiq_status status = VCHIQ_ERROR;
3292 	struct vchiq_service *service = find_service_by_handle(handle);
3293 
3294 	if (!service)
3295 		goto exit;
3296 
3297 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3298 		goto exit;
3299 
3300 	if (!peer_version)
3301 		goto exit;
3302 
3303 	*peer_version = service->peer_version;
3304 	status = VCHIQ_SUCCESS;
3305 
3306 exit:
3307 	if (service)
3308 		vchiq_service_put(service);
3309 	return status;
3310 }
3311 EXPORT_SYMBOL(vchiq_get_peer_version);
3312 
3313 void vchiq_get_config(struct vchiq_config *config)
3314 {
3315 	config->max_msg_size           = VCHIQ_MAX_MSG_SIZE;
3316 	config->bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
3317 	config->max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
3318 	config->max_services           = VCHIQ_MAX_SERVICES;
3319 	config->version                = VCHIQ_VERSION;
3320 	config->version_min            = VCHIQ_VERSION_MIN;
3321 }
3322 
3323 int
3324 vchiq_set_service_option(unsigned int handle, enum vchiq_service_option option, int value)
3325 {
3326 	struct vchiq_service *service = find_service_by_handle(handle);
3327 	struct vchiq_service_quota *quota;
3328 	int ret = -EINVAL;
3329 
3330 	if (!service)
3331 		return -EINVAL;
3332 
3333 	switch (option) {
3334 	case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3335 		service->auto_close = value;
3336 		ret = 0;
3337 		break;
3338 
3339 	case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3340 		quota = &service->state->service_quotas[service->localport];
3341 		if (value == 0)
3342 			value = service->state->default_slot_quota;
3343 		if ((value >= quota->slot_use_count) &&
3344 		    (value < (unsigned short)~0)) {
3345 			quota->slot_quota = value;
3346 			if ((value >= quota->slot_use_count) &&
3347 			    (quota->message_quota >= quota->message_use_count))
3348 				/*
3349 				 * Signal the service that it may have
3350 				 * dropped below its quota
3351 				 */
3352 				complete(&quota->quota_event);
3353 			ret = 0;
3354 		}
3355 		break;
3356 
3357 	case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3358 		quota = &service->state->service_quotas[service->localport];
3359 		if (value == 0)
3360 			value = service->state->default_message_quota;
3361 		if ((value >= quota->message_use_count) &&
3362 		    (value < (unsigned short)~0)) {
3363 			quota->message_quota = value;
3364 			if ((value >= quota->message_use_count) &&
3365 			    (quota->slot_quota >= quota->slot_use_count))
3366 				/*
3367 				 * Signal the service that it may have
3368 				 * dropped below its quota
3369 				 */
3370 				complete(&quota->quota_event);
3371 			ret = 0;
3372 		}
3373 		break;
3374 
3375 	case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3376 		if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3377 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3378 			service->sync = value;
3379 			ret = 0;
3380 		}
3381 		break;
3382 
3383 	case VCHIQ_SERVICE_OPTION_TRACE:
3384 		service->trace = value;
3385 		ret = 0;
3386 		break;
3387 
3388 	default:
3389 		break;
3390 	}
3391 	vchiq_service_put(service);
3392 
3393 	return ret;
3394 }
3395 
3396 static int
3397 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3398 			struct vchiq_shared_state *shared, const char *label)
3399 {
3400 	static const char *const debug_names[] = {
3401 		"<entries>",
3402 		"SLOT_HANDLER_COUNT",
3403 		"SLOT_HANDLER_LINE",
3404 		"PARSE_LINE",
3405 		"PARSE_HEADER",
3406 		"PARSE_MSGID",
3407 		"AWAIT_COMPLETION_LINE",
3408 		"DEQUEUE_MESSAGE_LINE",
3409 		"SERVICE_CALLBACK_LINE",
3410 		"MSG_QUEUE_FULL_COUNT",
3411 		"COMPLETION_QUEUE_FULL_COUNT"
3412 	};
3413 	int i;
3414 	char buf[80];
3415 	int len;
3416 	int err;
3417 
3418 	len = scnprintf(buf, sizeof(buf), "  %s: slots %d-%d tx_pos=%x recycle=%x",
3419 			label, shared->slot_first, shared->slot_last,
3420 			shared->tx_pos, shared->slot_queue_recycle);
3421 	err = vchiq_dump(dump_context, buf, len + 1);
3422 	if (err)
3423 		return err;
3424 
3425 	len = scnprintf(buf, sizeof(buf), "    Slots claimed:");
3426 	err = vchiq_dump(dump_context, buf, len + 1);
3427 	if (err)
3428 		return err;
3429 
3430 	for (i = shared->slot_first; i <= shared->slot_last; i++) {
3431 		struct vchiq_slot_info slot_info =
3432 						*SLOT_INFO_FROM_INDEX(state, i);
3433 		if (slot_info.use_count != slot_info.release_count) {
3434 			len = scnprintf(buf, sizeof(buf), "      %d: %d/%d", i, slot_info.use_count,
3435 					slot_info.release_count);
3436 			err = vchiq_dump(dump_context, buf, len + 1);
3437 			if (err)
3438 				return err;
3439 		}
3440 	}
3441 
3442 	for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3443 		len = scnprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
3444 				debug_names[i], shared->debug[i], shared->debug[i]);
3445 		err = vchiq_dump(dump_context, buf, len + 1);
3446 		if (err)
3447 			return err;
3448 	}
3449 	return 0;
3450 }
3451 
3452 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3453 {
3454 	char buf[80];
3455 	int len;
3456 	int i;
3457 	int err;
3458 
3459 	len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3460 			conn_state_names[state->conn_state]);
3461 	err = vchiq_dump(dump_context, buf, len + 1);
3462 	if (err)
3463 		return err;
3464 
3465 	len = scnprintf(buf, sizeof(buf), "  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3466 			state->local->tx_pos,
3467 			state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3468 			state->rx_pos,
3469 			state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3470 	err = vchiq_dump(dump_context, buf, len + 1);
3471 	if (err)
3472 		return err;
3473 
3474 	len = scnprintf(buf, sizeof(buf), "  Version: %d (min %d)",
3475 			VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3476 	err = vchiq_dump(dump_context, buf, len + 1);
3477 	if (err)
3478 		return err;
3479 
3480 	if (VCHIQ_ENABLE_STATS) {
3481 		len = scnprintf(buf, sizeof(buf),
3482 				"  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3483 				state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3484 				state->stats.error_count);
3485 		err = vchiq_dump(dump_context, buf, len + 1);
3486 		if (err)
3487 			return err;
3488 	}
3489 
3490 	len = scnprintf(buf, sizeof(buf),
3491 			"  Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3492 			((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3493 			state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3494 			state->data_quota - state->data_use_count,
3495 			state->local->slot_queue_recycle - state->slot_queue_available,
3496 			state->stats.slot_stalls, state->stats.data_stalls);
3497 	err = vchiq_dump(dump_context, buf, len + 1);
3498 	if (err)
3499 		return err;
3500 
3501 	err = vchiq_dump_platform_state(dump_context);
3502 	if (err)
3503 		return err;
3504 
3505 	err = vchiq_dump_shared_state(dump_context,
3506 				      state,
3507 				      state->local,
3508 				      "Local");
3509 	if (err)
3510 		return err;
3511 	err = vchiq_dump_shared_state(dump_context,
3512 				      state,
3513 				      state->remote,
3514 				      "Remote");
3515 	if (err)
3516 		return err;
3517 
3518 	err = vchiq_dump_platform_instances(dump_context);
3519 	if (err)
3520 		return err;
3521 
3522 	for (i = 0; i < state->unused_service; i++) {
3523 		struct vchiq_service *service = find_service_by_port(state, i);
3524 
3525 		if (service) {
3526 			err = vchiq_dump_service_state(dump_context, service);
3527 			vchiq_service_put(service);
3528 			if (err)
3529 				return err;
3530 		}
3531 	}
3532 	return 0;
3533 }
3534 
3535 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3536 {
3537 	char buf[80];
3538 	int len;
3539 	int err;
3540 	unsigned int ref_count;
3541 
3542 	/*Don't include the lock just taken*/
3543 	ref_count = kref_read(&service->ref_count) - 1;
3544 	len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3545 			service->localport, srvstate_names[service->srvstate],
3546 			ref_count);
3547 
3548 	if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3549 		char remoteport[30];
3550 		struct vchiq_service_quota *quota =
3551 			&service->state->service_quotas[service->localport];
3552 		int fourcc = service->base.fourcc;
3553 		int tx_pending, rx_pending;
3554 
3555 		if (service->remoteport != VCHIQ_PORT_FREE) {
3556 			int len2 = scnprintf(remoteport, sizeof(remoteport),
3557 				"%u", service->remoteport);
3558 
3559 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3560 				scnprintf(remoteport + len2, sizeof(remoteport) - len2,
3561 					  " (client %x)", service->client_id);
3562 		} else {
3563 			strscpy(remoteport, "n/a", sizeof(remoteport));
3564 		}
3565 
3566 		len += scnprintf(buf + len, sizeof(buf) - len,
3567 				 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3568 				 VCHIQ_FOURCC_AS_4CHARS(fourcc), remoteport,
3569 				 quota->message_use_count, quota->message_quota,
3570 				 quota->slot_use_count, quota->slot_quota);
3571 
3572 		err = vchiq_dump(dump_context, buf, len + 1);
3573 		if (err)
3574 			return err;
3575 
3576 		tx_pending = service->bulk_tx.local_insert -
3577 			service->bulk_tx.remote_insert;
3578 
3579 		rx_pending = service->bulk_rx.local_insert -
3580 			service->bulk_rx.remote_insert;
3581 
3582 		len = scnprintf(buf, sizeof(buf),
3583 				"  Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3584 				tx_pending,
3585 				tx_pending ?
3586 				service->bulk_tx.bulks[BULK_INDEX(service->bulk_tx.remove)].size :
3587 				0, rx_pending, rx_pending ?
3588 				service->bulk_rx.bulks[BULK_INDEX(service->bulk_rx.remove)].size :
3589 				0);
3590 
3591 		if (VCHIQ_ENABLE_STATS) {
3592 			err = vchiq_dump(dump_context, buf, len + 1);
3593 			if (err)
3594 				return err;
3595 
3596 			len = scnprintf(buf, sizeof(buf),
3597 					"  Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3598 					service->stats.ctrl_tx_count, service->stats.ctrl_tx_bytes,
3599 					service->stats.ctrl_rx_count, service->stats.ctrl_rx_bytes);
3600 			err = vchiq_dump(dump_context, buf, len + 1);
3601 			if (err)
3602 				return err;
3603 
3604 			len = scnprintf(buf, sizeof(buf),
3605 					"  Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3606 					service->stats.bulk_tx_count, service->stats.bulk_tx_bytes,
3607 					service->stats.bulk_rx_count, service->stats.bulk_rx_bytes);
3608 			err = vchiq_dump(dump_context, buf, len + 1);
3609 			if (err)
3610 				return err;
3611 
3612 			len = scnprintf(buf, sizeof(buf),
3613 					"  %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3614 					service->stats.quota_stalls, service->stats.slot_stalls,
3615 					service->stats.bulk_stalls,
3616 					service->stats.bulk_aborted_count,
3617 					service->stats.error_count);
3618 		}
3619 	}
3620 
3621 	err = vchiq_dump(dump_context, buf, len + 1);
3622 	if (err)
3623 		return err;
3624 
3625 	if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3626 		err = vchiq_dump_platform_service_state(dump_context, service);
3627 	return err;
3628 }
3629 
3630 void
3631 vchiq_loud_error_header(void)
3632 {
3633 	vchiq_log_error(vchiq_core_log_level,
3634 			"============================================================================");
3635 	vchiq_log_error(vchiq_core_log_level,
3636 			"============================================================================");
3637 	vchiq_log_error(vchiq_core_log_level, "=====");
3638 }
3639 
3640 void
3641 vchiq_loud_error_footer(void)
3642 {
3643 	vchiq_log_error(vchiq_core_log_level, "=====");
3644 	vchiq_log_error(vchiq_core_log_level,
3645 			"============================================================================");
3646 	vchiq_log_error(vchiq_core_log_level,
3647 			"============================================================================");
3648 }
3649 
3650 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3651 {
3652 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3653 		return VCHIQ_RETRY;
3654 
3655 	return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
3656 }
3657 
3658 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3659 {
3660 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3661 		return VCHIQ_RETRY;
3662 
3663 	return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
3664 			     NULL, NULL, 0, 0);
3665 }
3666 
3667 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes)
3668 {
3669 	const u8  *mem = void_mem;
3670 	size_t          offset;
3671 	char            line_buf[100];
3672 	char           *s;
3673 
3674 	while (num_bytes > 0) {
3675 		s = line_buf;
3676 
3677 		for (offset = 0; offset < 16; offset++) {
3678 			if (offset < num_bytes)
3679 				s += scnprintf(s, 4, "%02x ", mem[offset]);
3680 			else
3681 				s += scnprintf(s, 4, "   ");
3682 		}
3683 
3684 		for (offset = 0; offset < 16; offset++) {
3685 			if (offset < num_bytes) {
3686 				u8 ch = mem[offset];
3687 
3688 				if ((ch < ' ') || (ch > '~'))
3689 					ch = '.';
3690 				*s++ = (char)ch;
3691 			}
3692 		}
3693 		*s++ = '\0';
3694 
3695 		if (label && (*label != '\0'))
3696 			vchiq_log_trace(VCHIQ_LOG_TRACE, "%s: %08x: %s", label, addr, line_buf);
3697 		else
3698 			vchiq_log_trace(VCHIQ_LOG_TRACE, "%08x: %s", addr, line_buf);
3699 
3700 		addr += 16;
3701 		mem += 16;
3702 		if (num_bytes > 16)
3703 			num_bytes -= 16;
3704 		else
3705 			num_bytes = 0;
3706 	}
3707 }
3708