1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15 
16 #include "vchiq_core.h"
17 
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
19 
20 #define HANDLE_STATE_SHIFT 12
21 
22 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
23 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
24 #define SLOT_INDEX_FROM_DATA(state, data) \
25 	(((unsigned int)((char *)data - (char *)state->slot_data)) / \
26 	VCHIQ_SLOT_SIZE)
27 #define SLOT_INDEX_FROM_INFO(state, info) \
28 	((unsigned int)(info - state->slot_info))
29 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
30 	((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
31 
32 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
33 
34 #define SRVTRACE_LEVEL(srv) \
35 	(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
36 #define SRVTRACE_ENABLED(srv, lev) \
37 	(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
38 
39 struct vchiq_open_payload {
40 	int fourcc;
41 	int client_id;
42 	short version;
43 	short version_min;
44 };
45 
46 struct vchiq_openack_payload {
47 	short version;
48 };
49 
50 enum {
51 	QMFLAGS_IS_BLOCKING     = BIT(0),
52 	QMFLAGS_NO_MUTEX_LOCK   = BIT(1),
53 	QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
54 };
55 
56 /* we require this for consistency between endpoints */
57 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
58 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
59 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
60 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
61 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
62 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
63 
64 /* Run time control of log level, based on KERN_XXX level. */
65 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
66 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
67 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
68 
69 DEFINE_SPINLOCK(bulk_waiter_spinlock);
70 static DEFINE_SPINLOCK(quota_spinlock);
71 
72 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
73 static unsigned int handle_seq;
74 
75 static const char *const srvstate_names[] = {
76 	"FREE",
77 	"HIDDEN",
78 	"LISTENING",
79 	"OPENING",
80 	"OPEN",
81 	"OPENSYNC",
82 	"CLOSESENT",
83 	"CLOSERECVD",
84 	"CLOSEWAIT",
85 	"CLOSED"
86 };
87 
88 static const char *const reason_names[] = {
89 	"SERVICE_OPENED",
90 	"SERVICE_CLOSED",
91 	"MESSAGE_AVAILABLE",
92 	"BULK_TRANSMIT_DONE",
93 	"BULK_RECEIVE_DONE",
94 	"BULK_TRANSMIT_ABORTED",
95 	"BULK_RECEIVE_ABORTED"
96 };
97 
98 static const char *const conn_state_names[] = {
99 	"DISCONNECTED",
100 	"CONNECTING",
101 	"CONNECTED",
102 	"PAUSING",
103 	"PAUSE_SENT",
104 	"PAUSED",
105 	"RESUMING",
106 	"PAUSE_TIMEOUT",
107 	"RESUME_TIMEOUT"
108 };
109 
110 static void
111 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
112 
113 static const char *msg_type_str(unsigned int msg_type)
114 {
115 	switch (msg_type) {
116 	case VCHIQ_MSG_PADDING:       return "PADDING";
117 	case VCHIQ_MSG_CONNECT:       return "CONNECT";
118 	case VCHIQ_MSG_OPEN:          return "OPEN";
119 	case VCHIQ_MSG_OPENACK:       return "OPENACK";
120 	case VCHIQ_MSG_CLOSE:         return "CLOSE";
121 	case VCHIQ_MSG_DATA:          return "DATA";
122 	case VCHIQ_MSG_BULK_RX:       return "BULK_RX";
123 	case VCHIQ_MSG_BULK_TX:       return "BULK_TX";
124 	case VCHIQ_MSG_BULK_RX_DONE:  return "BULK_RX_DONE";
125 	case VCHIQ_MSG_BULK_TX_DONE:  return "BULK_TX_DONE";
126 	case VCHIQ_MSG_PAUSE:         return "PAUSE";
127 	case VCHIQ_MSG_RESUME:        return "RESUME";
128 	case VCHIQ_MSG_REMOTE_USE:    return "REMOTE_USE";
129 	case VCHIQ_MSG_REMOTE_RELEASE:      return "REMOTE_RELEASE";
130 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:   return "REMOTE_USE_ACTIVE";
131 	}
132 	return "???";
133 }
134 
135 static inline void
136 vchiq_set_service_state(struct vchiq_service *service, int newstate)
137 {
138 	vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
139 		service->state->id, service->localport,
140 		srvstate_names[service->srvstate],
141 		srvstate_names[newstate]);
142 	service->srvstate = newstate;
143 }
144 
145 struct vchiq_service *
146 find_service_by_handle(unsigned int handle)
147 {
148 	struct vchiq_service *service;
149 
150 	rcu_read_lock();
151 	service = handle_to_service(handle);
152 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
153 	    service->handle == handle &&
154 	    kref_get_unless_zero(&service->ref_count)) {
155 		service = rcu_pointer_handoff(service);
156 		rcu_read_unlock();
157 		return service;
158 	}
159 	rcu_read_unlock();
160 	vchiq_log_info(vchiq_core_log_level,
161 		       "Invalid service handle 0x%x", handle);
162 	return NULL;
163 }
164 
165 struct vchiq_service *
166 find_service_by_port(struct vchiq_state *state, int localport)
167 {
168 
169 	if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
170 		struct vchiq_service *service;
171 
172 		rcu_read_lock();
173 		service = rcu_dereference(state->services[localport]);
174 		if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
175 		    kref_get_unless_zero(&service->ref_count)) {
176 			service = rcu_pointer_handoff(service);
177 			rcu_read_unlock();
178 			return service;
179 		}
180 		rcu_read_unlock();
181 	}
182 	vchiq_log_info(vchiq_core_log_level,
183 		       "Invalid port %d", localport);
184 	return NULL;
185 }
186 
187 struct vchiq_service *
188 find_service_for_instance(struct vchiq_instance *instance,
189 	unsigned int handle)
190 {
191 	struct vchiq_service *service;
192 
193 	rcu_read_lock();
194 	service = handle_to_service(handle);
195 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
196 	    service->handle == handle &&
197 	    service->instance == instance &&
198 	    kref_get_unless_zero(&service->ref_count)) {
199 		service = rcu_pointer_handoff(service);
200 		rcu_read_unlock();
201 		return service;
202 	}
203 	rcu_read_unlock();
204 	vchiq_log_info(vchiq_core_log_level,
205 		       "Invalid service handle 0x%x", handle);
206 	return NULL;
207 }
208 
209 struct vchiq_service *
210 find_closed_service_for_instance(struct vchiq_instance *instance,
211 	unsigned int handle)
212 {
213 	struct vchiq_service *service;
214 
215 	rcu_read_lock();
216 	service = handle_to_service(handle);
217 	if (service &&
218 	    (service->srvstate == VCHIQ_SRVSTATE_FREE ||
219 	     service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
220 	    service->handle == handle &&
221 	    service->instance == instance &&
222 	    kref_get_unless_zero(&service->ref_count)) {
223 		service = rcu_pointer_handoff(service);
224 		rcu_read_unlock();
225 		return service;
226 	}
227 	rcu_read_unlock();
228 	vchiq_log_info(vchiq_core_log_level,
229 		       "Invalid service handle 0x%x", handle);
230 	return service;
231 }
232 
233 struct vchiq_service *
234 __next_service_by_instance(struct vchiq_state *state,
235 			   struct vchiq_instance *instance,
236 			   int *pidx)
237 {
238 	struct vchiq_service *service = NULL;
239 	int idx = *pidx;
240 
241 	while (idx < state->unused_service) {
242 		struct vchiq_service *srv;
243 
244 		srv = rcu_dereference(state->services[idx++]);
245 		if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
246 		    srv->instance == instance) {
247 			service = srv;
248 			break;
249 		}
250 	}
251 
252 	*pidx = idx;
253 	return service;
254 }
255 
256 struct vchiq_service *
257 next_service_by_instance(struct vchiq_state *state,
258 			 struct vchiq_instance *instance,
259 			 int *pidx)
260 {
261 	struct vchiq_service *service;
262 
263 	rcu_read_lock();
264 	while (1) {
265 		service = __next_service_by_instance(state, instance, pidx);
266 		if (!service)
267 			break;
268 		if (kref_get_unless_zero(&service->ref_count)) {
269 			service = rcu_pointer_handoff(service);
270 			break;
271 		}
272 	}
273 	rcu_read_unlock();
274 	return service;
275 }
276 
277 void
278 lock_service(struct vchiq_service *service)
279 {
280 	if (!service) {
281 		WARN(1, "%s service is NULL\n", __func__);
282 		return;
283 	}
284 	kref_get(&service->ref_count);
285 }
286 
287 static void service_release(struct kref *kref)
288 {
289 	struct vchiq_service *service =
290 		container_of(kref, struct vchiq_service, ref_count);
291 	struct vchiq_state *state = service->state;
292 
293 	WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
294 	rcu_assign_pointer(state->services[service->localport], NULL);
295 	if (service->userdata_term)
296 		service->userdata_term(service->base.userdata);
297 	kfree_rcu(service, rcu);
298 }
299 
300 void
301 unlock_service(struct vchiq_service *service)
302 {
303 	if (!service) {
304 		WARN(1, "%s: service is NULL\n", __func__);
305 		return;
306 	}
307 	kref_put(&service->ref_count, service_release);
308 }
309 
310 int
311 vchiq_get_client_id(unsigned int handle)
312 {
313 	struct vchiq_service *service;
314 	int id;
315 
316 	rcu_read_lock();
317 	service = handle_to_service(handle);
318 	id = service ? service->client_id : 0;
319 	rcu_read_unlock();
320 	return id;
321 }
322 
323 void *
324 vchiq_get_service_userdata(unsigned int handle)
325 {
326 	void *userdata;
327 	struct vchiq_service *service;
328 
329 	rcu_read_lock();
330 	service = handle_to_service(handle);
331 	userdata = service ? service->base.userdata : NULL;
332 	rcu_read_unlock();
333 	return userdata;
334 }
335 EXPORT_SYMBOL(vchiq_get_service_userdata);
336 
337 static void
338 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
339 {
340 	struct vchiq_state *state = service->state;
341 	struct vchiq_service_quota *service_quota;
342 
343 	service->closing = 1;
344 
345 	/* Synchronise with other threads. */
346 	mutex_lock(&state->recycle_mutex);
347 	mutex_unlock(&state->recycle_mutex);
348 	if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
349 		/*
350 		 * If we're pausing then the slot_mutex is held until resume
351 		 * by the slot handler.  Therefore don't try to acquire this
352 		 * mutex if we're the slot handler and in the pause sent state.
353 		 * We don't need to in this case anyway.
354 		 */
355 		mutex_lock(&state->slot_mutex);
356 		mutex_unlock(&state->slot_mutex);
357 	}
358 
359 	/* Unblock any sending thread. */
360 	service_quota = &state->service_quotas[service->localport];
361 	complete(&service_quota->quota_event);
362 }
363 
364 static void
365 mark_service_closing(struct vchiq_service *service)
366 {
367 	mark_service_closing_internal(service, 0);
368 }
369 
370 static inline enum vchiq_status
371 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
372 		      struct vchiq_header *header, void *bulk_userdata)
373 {
374 	enum vchiq_status status;
375 
376 	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
377 		service->state->id, service->localport, reason_names[reason],
378 		header, bulk_userdata);
379 	status = service->base.callback(reason, header, service->handle,
380 		bulk_userdata);
381 	if (status == VCHIQ_ERROR) {
382 		vchiq_log_warning(vchiq_core_log_level,
383 			"%d: ignoring ERROR from callback to service %x",
384 			service->state->id, service->handle);
385 		status = VCHIQ_SUCCESS;
386 	}
387 
388 	if (reason != VCHIQ_MESSAGE_AVAILABLE)
389 		vchiq_release_message(service->handle, header);
390 
391 	return status;
392 }
393 
394 inline void
395 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
396 {
397 	enum vchiq_connstate oldstate = state->conn_state;
398 
399 	vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
400 		conn_state_names[oldstate],
401 		conn_state_names[newstate]);
402 	state->conn_state = newstate;
403 	vchiq_platform_conn_state_changed(state, oldstate, newstate);
404 }
405 
406 static inline void
407 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
408 {
409 	event->armed = 0;
410 	/*
411 	 * Don't clear the 'fired' flag because it may already have been set
412 	 * by the other side.
413 	 */
414 	init_waitqueue_head(wq);
415 }
416 
417 /*
418  * All the event waiting routines in VCHIQ used a custom semaphore
419  * implementation that filtered most signals. This achieved a behaviour similar
420  * to the "killable" family of functions. While cleaning up this code all the
421  * routines where switched to the "interruptible" family of functions, as the
422  * former was deemed unjustified and the use "killable" set all VCHIQ's
423  * threads in D state.
424  */
425 static inline int
426 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
427 {
428 	if (!event->fired) {
429 		event->armed = 1;
430 		dsb(sy);
431 		if (wait_event_interruptible(*wq, event->fired)) {
432 			event->armed = 0;
433 			return 0;
434 		}
435 		event->armed = 0;
436 		wmb();
437 	}
438 
439 	event->fired = 0;
440 	return 1;
441 }
442 
443 static inline void
444 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
445 {
446 	event->fired = 1;
447 	event->armed = 0;
448 	wake_up_all(wq);
449 }
450 
451 static inline void
452 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
453 {
454 	if (event->fired && event->armed)
455 		remote_event_signal_local(wq, event);
456 }
457 
458 void
459 remote_event_pollall(struct vchiq_state *state)
460 {
461 	remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
462 	remote_event_poll(&state->sync_release_event, &state->local->sync_release);
463 	remote_event_poll(&state->trigger_event, &state->local->trigger);
464 	remote_event_poll(&state->recycle_event, &state->local->recycle);
465 }
466 
467 /*
468  * Round up message sizes so that any space at the end of a slot is always big
469  * enough for a header. This relies on header size being a power of two, which
470  * has been verified earlier by a static assertion.
471  */
472 
473 static inline size_t
474 calc_stride(size_t size)
475 {
476 	/* Allow room for the header */
477 	size += sizeof(struct vchiq_header);
478 
479 	/* Round up */
480 	return (size + sizeof(struct vchiq_header) - 1) &
481 		~(sizeof(struct vchiq_header) - 1);
482 }
483 
484 /* Called by the slot handler thread */
485 static struct vchiq_service *
486 get_listening_service(struct vchiq_state *state, int fourcc)
487 {
488 	int i;
489 
490 	WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
491 
492 	rcu_read_lock();
493 	for (i = 0; i < state->unused_service; i++) {
494 		struct vchiq_service *service;
495 
496 		service = rcu_dereference(state->services[i]);
497 		if (service &&
498 		    service->public_fourcc == fourcc &&
499 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
500 		     (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
501 		      service->remoteport == VCHIQ_PORT_FREE)) &&
502 		    kref_get_unless_zero(&service->ref_count)) {
503 			service = rcu_pointer_handoff(service);
504 			rcu_read_unlock();
505 			return service;
506 		}
507 	}
508 	rcu_read_unlock();
509 	return NULL;
510 }
511 
512 /* Called by the slot handler thread */
513 static struct vchiq_service *
514 get_connected_service(struct vchiq_state *state, unsigned int port)
515 {
516 	int i;
517 
518 	rcu_read_lock();
519 	for (i = 0; i < state->unused_service; i++) {
520 		struct vchiq_service *service =
521 			rcu_dereference(state->services[i]);
522 
523 		if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
524 		    service->remoteport == port &&
525 		    kref_get_unless_zero(&service->ref_count)) {
526 			service = rcu_pointer_handoff(service);
527 			rcu_read_unlock();
528 			return service;
529 		}
530 	}
531 	rcu_read_unlock();
532 	return NULL;
533 }
534 
535 inline void
536 request_poll(struct vchiq_state *state, struct vchiq_service *service,
537 	     int poll_type)
538 {
539 	u32 value;
540 
541 	if (service) {
542 		do {
543 			value = atomic_read(&service->poll_flags);
544 		} while (atomic_cmpxchg(&service->poll_flags, value,
545 			value | BIT(poll_type)) != value);
546 
547 		do {
548 			value = atomic_read(&state->poll_services[
549 				service->localport>>5]);
550 		} while (atomic_cmpxchg(
551 			&state->poll_services[service->localport>>5],
552 			value, value | BIT(service->localport & 0x1f))
553 			!= value);
554 	}
555 
556 	state->poll_needed = 1;
557 	wmb();
558 
559 	/* ... and ensure the slot handler runs. */
560 	remote_event_signal_local(&state->trigger_event, &state->local->trigger);
561 }
562 
563 /*
564  * Called from queue_message, by the slot handler and application threads,
565  * with slot_mutex held
566  */
567 static struct vchiq_header *
568 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
569 {
570 	struct vchiq_shared_state *local = state->local;
571 	int tx_pos = state->local_tx_pos;
572 	int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
573 
574 	if (space > slot_space) {
575 		struct vchiq_header *header;
576 		/* Fill the remaining space with padding */
577 		WARN_ON(!state->tx_data);
578 		header = (struct vchiq_header *)
579 			(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
580 		header->msgid = VCHIQ_MSGID_PADDING;
581 		header->size = slot_space - sizeof(struct vchiq_header);
582 
583 		tx_pos += slot_space;
584 	}
585 
586 	/* If necessary, get the next slot. */
587 	if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
588 		int slot_index;
589 
590 		/* If there is no free slot... */
591 
592 		if (!try_wait_for_completion(&state->slot_available_event)) {
593 			/* ...wait for one. */
594 
595 			VCHIQ_STATS_INC(state, slot_stalls);
596 
597 			/* But first, flush through the last slot. */
598 			state->local_tx_pos = tx_pos;
599 			local->tx_pos = tx_pos;
600 			remote_event_signal(&state->remote->trigger);
601 
602 			if (!is_blocking ||
603 				(wait_for_completion_interruptible(
604 				&state->slot_available_event)))
605 				return NULL; /* No space available */
606 		}
607 
608 		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
609 			complete(&state->slot_available_event);
610 			pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
611 			return NULL;
612 		}
613 
614 		slot_index = local->slot_queue[
615 			SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
616 			VCHIQ_SLOT_QUEUE_MASK];
617 		state->tx_data =
618 			(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
619 	}
620 
621 	state->local_tx_pos = tx_pos + space;
622 
623 	return (struct vchiq_header *)(state->tx_data +
624 						(tx_pos & VCHIQ_SLOT_MASK));
625 }
626 
627 /* Called by the recycle thread. */
628 static void
629 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
630 		   size_t length)
631 {
632 	struct vchiq_shared_state *local = state->local;
633 	int slot_queue_available;
634 
635 	/*
636 	 * Find slots which have been freed by the other side, and return them
637 	 * to the available queue.
638 	 */
639 	slot_queue_available = state->slot_queue_available;
640 
641 	/*
642 	 * Use a memory barrier to ensure that any state that may have been
643 	 * modified by another thread is not masked by stale prefetched
644 	 * values.
645 	 */
646 	mb();
647 
648 	while (slot_queue_available != local->slot_queue_recycle) {
649 		unsigned int pos;
650 		int slot_index = local->slot_queue[slot_queue_available++ &
651 			VCHIQ_SLOT_QUEUE_MASK];
652 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
653 		int data_found = 0;
654 
655 		/*
656 		 * Beware of the address dependency - data is calculated
657 		 * using an index written by the other side.
658 		 */
659 		rmb();
660 
661 		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
662 			state->id, slot_index, data,
663 			local->slot_queue_recycle, slot_queue_available);
664 
665 		/* Initialise the bitmask for services which have used this slot */
666 		memset(service_found, 0, length);
667 
668 		pos = 0;
669 
670 		while (pos < VCHIQ_SLOT_SIZE) {
671 			struct vchiq_header *header =
672 				(struct vchiq_header *)(data + pos);
673 			int msgid = header->msgid;
674 
675 			if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
676 				int port = VCHIQ_MSG_SRCPORT(msgid);
677 				struct vchiq_service_quota *service_quota =
678 					&state->service_quotas[port];
679 				int count;
680 
681 				spin_lock(&quota_spinlock);
682 				count = service_quota->message_use_count;
683 				if (count > 0)
684 					service_quota->message_use_count =
685 						count - 1;
686 				spin_unlock(&quota_spinlock);
687 
688 				if (count == service_quota->message_quota)
689 					/*
690 					 * Signal the service that it
691 					 * has dropped below its quota
692 					 */
693 					complete(&service_quota->quota_event);
694 				else if (count == 0) {
695 					vchiq_log_error(vchiq_core_log_level,
696 						"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
697 						port,
698 						service_quota->message_use_count,
699 						header, msgid, header->msgid,
700 						header->size);
701 					WARN(1, "invalid message use count\n");
702 				}
703 				if (!BITSET_IS_SET(service_found, port)) {
704 					/* Set the found bit for this service */
705 					BITSET_SET(service_found, port);
706 
707 					spin_lock(&quota_spinlock);
708 					count = service_quota->slot_use_count;
709 					if (count > 0)
710 						service_quota->slot_use_count =
711 							count - 1;
712 					spin_unlock(&quota_spinlock);
713 
714 					if (count > 0) {
715 						/*
716 						 * Signal the service in case
717 						 * it has dropped below its quota
718 						 */
719 						complete(&service_quota->quota_event);
720 						vchiq_log_trace(
721 							vchiq_core_log_level,
722 							"%d: pfq:%d %x@%pK - slot_use->%d",
723 							state->id, port,
724 							header->size, header,
725 							count - 1);
726 					} else {
727 						vchiq_log_error(
728 							vchiq_core_log_level,
729 								"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
730 							port, count, header,
731 							msgid, header->msgid,
732 							header->size);
733 						WARN(1, "bad slot use count\n");
734 					}
735 				}
736 
737 				data_found = 1;
738 			}
739 
740 			pos += calc_stride(header->size);
741 			if (pos > VCHIQ_SLOT_SIZE) {
742 				vchiq_log_error(vchiq_core_log_level,
743 					"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
744 					pos, header, msgid, header->msgid,
745 					header->size);
746 				WARN(1, "invalid slot position\n");
747 			}
748 		}
749 
750 		if (data_found) {
751 			int count;
752 
753 			spin_lock(&quota_spinlock);
754 			count = state->data_use_count;
755 			if (count > 0)
756 				state->data_use_count =
757 					count - 1;
758 			spin_unlock(&quota_spinlock);
759 			if (count == state->data_quota)
760 				complete(&state->data_quota_event);
761 		}
762 
763 		/*
764 		 * Don't allow the slot to be reused until we are no
765 		 * longer interested in it.
766 		 */
767 		mb();
768 
769 		state->slot_queue_available = slot_queue_available;
770 		complete(&state->slot_available_event);
771 	}
772 }
773 
774 static ssize_t
775 memcpy_copy_callback(
776 	void *context, void *dest,
777 	size_t offset, size_t maxsize)
778 {
779 	memcpy(dest + offset, context + offset, maxsize);
780 	return maxsize;
781 }
782 
783 static ssize_t
784 copy_message_data(
785 	ssize_t (*copy_callback)(void *context, void *dest,
786 				 size_t offset, size_t maxsize),
787 	void *context,
788 	void *dest,
789 	size_t size)
790 {
791 	size_t pos = 0;
792 
793 	while (pos < size) {
794 		ssize_t callback_result;
795 		size_t max_bytes = size - pos;
796 
797 		callback_result =
798 			copy_callback(context, dest + pos,
799 				      pos, max_bytes);
800 
801 		if (callback_result < 0)
802 			return callback_result;
803 
804 		if (!callback_result)
805 			return -EIO;
806 
807 		if (callback_result > max_bytes)
808 			return -EIO;
809 
810 		pos += callback_result;
811 	}
812 
813 	return size;
814 }
815 
816 /* Called by the slot handler and application threads */
817 static enum vchiq_status
818 queue_message(struct vchiq_state *state, struct vchiq_service *service,
819 	      int msgid,
820 	      ssize_t (*copy_callback)(void *context, void *dest,
821 				       size_t offset, size_t maxsize),
822 	      void *context, size_t size, int flags)
823 {
824 	struct vchiq_shared_state *local;
825 	struct vchiq_service_quota *service_quota = NULL;
826 	struct vchiq_header *header;
827 	int type = VCHIQ_MSG_TYPE(msgid);
828 
829 	size_t stride;
830 
831 	local = state->local;
832 
833 	stride = calc_stride(size);
834 
835 	WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
836 
837 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
838 	    mutex_lock_killable(&state->slot_mutex))
839 		return VCHIQ_RETRY;
840 
841 	if (type == VCHIQ_MSG_DATA) {
842 		int tx_end_index;
843 
844 		if (!service) {
845 			WARN(1, "%s: service is NULL\n", __func__);
846 			mutex_unlock(&state->slot_mutex);
847 			return VCHIQ_ERROR;
848 		}
849 
850 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
851 				 QMFLAGS_NO_MUTEX_UNLOCK));
852 
853 		if (service->closing) {
854 			/* The service has been closed */
855 			mutex_unlock(&state->slot_mutex);
856 			return VCHIQ_ERROR;
857 		}
858 
859 		service_quota = &state->service_quotas[service->localport];
860 
861 		spin_lock(&quota_spinlock);
862 
863 		/*
864 		 * Ensure this service doesn't use more than its quota of
865 		 * messages or slots
866 		 */
867 		tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
868 			state->local_tx_pos + stride - 1);
869 
870 		/*
871 		 * Ensure data messages don't use more than their quota of
872 		 * slots
873 		 */
874 		while ((tx_end_index != state->previous_data_index) &&
875 			(state->data_use_count == state->data_quota)) {
876 			VCHIQ_STATS_INC(state, data_stalls);
877 			spin_unlock(&quota_spinlock);
878 			mutex_unlock(&state->slot_mutex);
879 
880 			if (wait_for_completion_interruptible(
881 						&state->data_quota_event))
882 				return VCHIQ_RETRY;
883 
884 			mutex_lock(&state->slot_mutex);
885 			spin_lock(&quota_spinlock);
886 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
887 				state->local_tx_pos + stride - 1);
888 			if ((tx_end_index == state->previous_data_index) ||
889 				(state->data_use_count < state->data_quota)) {
890 				/* Pass the signal on to other waiters */
891 				complete(&state->data_quota_event);
892 				break;
893 			}
894 		}
895 
896 		while ((service_quota->message_use_count ==
897 				service_quota->message_quota) ||
898 			((tx_end_index != service_quota->previous_tx_index) &&
899 			(service_quota->slot_use_count ==
900 				service_quota->slot_quota))) {
901 			spin_unlock(&quota_spinlock);
902 			vchiq_log_trace(vchiq_core_log_level,
903 				"%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
904 				state->id, service->localport,
905 				msg_type_str(type), size,
906 				service_quota->message_use_count,
907 				service_quota->slot_use_count);
908 			VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
909 			mutex_unlock(&state->slot_mutex);
910 			if (wait_for_completion_interruptible(
911 						&service_quota->quota_event))
912 				return VCHIQ_RETRY;
913 			if (service->closing)
914 				return VCHIQ_ERROR;
915 			if (mutex_lock_killable(&state->slot_mutex))
916 				return VCHIQ_RETRY;
917 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
918 				/* The service has been closed */
919 				mutex_unlock(&state->slot_mutex);
920 				return VCHIQ_ERROR;
921 			}
922 			spin_lock(&quota_spinlock);
923 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
924 				state->local_tx_pos + stride - 1);
925 		}
926 
927 		spin_unlock(&quota_spinlock);
928 	}
929 
930 	header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
931 
932 	if (!header) {
933 		if (service)
934 			VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
935 		/*
936 		 * In the event of a failure, return the mutex to the
937 		 * state it was in
938 		 */
939 		if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
940 			mutex_unlock(&state->slot_mutex);
941 		return VCHIQ_RETRY;
942 	}
943 
944 	if (type == VCHIQ_MSG_DATA) {
945 		ssize_t callback_result;
946 		int tx_end_index;
947 		int slot_use_count;
948 
949 		vchiq_log_info(vchiq_core_log_level,
950 			"%d: qm %s@%pK,%zx (%d->%d)",
951 			state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
952 			header, size, VCHIQ_MSG_SRCPORT(msgid),
953 			VCHIQ_MSG_DSTPORT(msgid));
954 
955 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
956 				 QMFLAGS_NO_MUTEX_UNLOCK));
957 
958 		callback_result =
959 			copy_message_data(copy_callback, context,
960 					  header->data, size);
961 
962 		if (callback_result < 0) {
963 			mutex_unlock(&state->slot_mutex);
964 			VCHIQ_SERVICE_STATS_INC(service,
965 						error_count);
966 			return VCHIQ_ERROR;
967 		}
968 
969 		if (SRVTRACE_ENABLED(service,
970 				     VCHIQ_LOG_INFO))
971 			vchiq_log_dump_mem("Sent", 0,
972 					   header->data,
973 					   min((size_t)16,
974 					       (size_t)callback_result));
975 
976 		spin_lock(&quota_spinlock);
977 		service_quota->message_use_count++;
978 
979 		tx_end_index =
980 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
981 
982 		/*
983 		 * If this transmission can't fit in the last slot used by any
984 		 * service, the data_use_count must be increased.
985 		 */
986 		if (tx_end_index != state->previous_data_index) {
987 			state->previous_data_index = tx_end_index;
988 			state->data_use_count++;
989 		}
990 
991 		/*
992 		 * If this isn't the same slot last used by this service,
993 		 * the service's slot_use_count must be increased.
994 		 */
995 		if (tx_end_index != service_quota->previous_tx_index) {
996 			service_quota->previous_tx_index = tx_end_index;
997 			slot_use_count = ++service_quota->slot_use_count;
998 		} else {
999 			slot_use_count = 0;
1000 		}
1001 
1002 		spin_unlock(&quota_spinlock);
1003 
1004 		if (slot_use_count)
1005 			vchiq_log_trace(vchiq_core_log_level,
1006 				"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
1007 				state->id, service->localport,
1008 				msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
1009 				slot_use_count, header);
1010 
1011 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1012 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1013 	} else {
1014 		vchiq_log_info(vchiq_core_log_level,
1015 			"%d: qm %s@%pK,%zx (%d->%d)", state->id,
1016 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1017 			header, size, VCHIQ_MSG_SRCPORT(msgid),
1018 			VCHIQ_MSG_DSTPORT(msgid));
1019 		if (size != 0) {
1020 			/*
1021 			 * It is assumed for now that this code path
1022 			 * only happens from calls inside this file.
1023 			 *
1024 			 * External callers are through the vchiq_queue_message
1025 			 * path which always sets the type to be VCHIQ_MSG_DATA
1026 			 *
1027 			 * At first glance this appears to be correct but
1028 			 * more review is needed.
1029 			 */
1030 			copy_message_data(copy_callback, context,
1031 					  header->data, size);
1032 		}
1033 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1034 	}
1035 
1036 	header->msgid = msgid;
1037 	header->size = size;
1038 
1039 	{
1040 		int svc_fourcc;
1041 
1042 		svc_fourcc = service
1043 			? service->base.fourcc
1044 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1045 
1046 		vchiq_log_info(SRVTRACE_LEVEL(service),
1047 			"Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1048 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1049 			VCHIQ_MSG_TYPE(msgid),
1050 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1051 			VCHIQ_MSG_SRCPORT(msgid),
1052 			VCHIQ_MSG_DSTPORT(msgid),
1053 			size);
1054 	}
1055 
1056 	/* Make sure the new header is visible to the peer. */
1057 	wmb();
1058 
1059 	/* Make the new tx_pos visible to the peer. */
1060 	local->tx_pos = state->local_tx_pos;
1061 	wmb();
1062 
1063 	if (service && (type == VCHIQ_MSG_CLOSE))
1064 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1065 
1066 	if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1067 		mutex_unlock(&state->slot_mutex);
1068 
1069 	remote_event_signal(&state->remote->trigger);
1070 
1071 	return VCHIQ_SUCCESS;
1072 }
1073 
1074 /* Called by the slot handler and application threads */
1075 static enum vchiq_status
1076 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1077 		   int msgid,
1078 		   ssize_t (*copy_callback)(void *context, void *dest,
1079 					    size_t offset, size_t maxsize),
1080 		   void *context, int size, int is_blocking)
1081 {
1082 	struct vchiq_shared_state *local;
1083 	struct vchiq_header *header;
1084 	ssize_t callback_result;
1085 
1086 	local = state->local;
1087 
1088 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1089 	    mutex_lock_killable(&state->sync_mutex))
1090 		return VCHIQ_RETRY;
1091 
1092 	remote_event_wait(&state->sync_release_event, &local->sync_release);
1093 
1094 	rmb();
1095 
1096 	header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1097 		local->slot_sync);
1098 
1099 	{
1100 		int oldmsgid = header->msgid;
1101 
1102 		if (oldmsgid != VCHIQ_MSGID_PADDING)
1103 			vchiq_log_error(vchiq_core_log_level,
1104 				"%d: qms - msgid %x, not PADDING",
1105 				state->id, oldmsgid);
1106 	}
1107 
1108 	vchiq_log_info(vchiq_sync_log_level,
1109 		       "%d: qms %s@%pK,%x (%d->%d)", state->id,
1110 		       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1111 		       header, size, VCHIQ_MSG_SRCPORT(msgid),
1112 		       VCHIQ_MSG_DSTPORT(msgid));
1113 
1114 	callback_result =
1115 		copy_message_data(copy_callback, context,
1116 				  header->data, size);
1117 
1118 	if (callback_result < 0) {
1119 		mutex_unlock(&state->slot_mutex);
1120 		VCHIQ_SERVICE_STATS_INC(service,
1121 					error_count);
1122 		return VCHIQ_ERROR;
1123 	}
1124 
1125 	if (service) {
1126 		if (SRVTRACE_ENABLED(service,
1127 				     VCHIQ_LOG_INFO))
1128 			vchiq_log_dump_mem("Sent", 0,
1129 					   header->data,
1130 					   min((size_t)16,
1131 					       (size_t)callback_result));
1132 
1133 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1134 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1135 	} else {
1136 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1137 	}
1138 
1139 	header->size = size;
1140 	header->msgid = msgid;
1141 
1142 	if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1143 		int svc_fourcc;
1144 
1145 		svc_fourcc = service
1146 			? service->base.fourcc
1147 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1148 
1149 		vchiq_log_trace(vchiq_sync_log_level,
1150 			"Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1151 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1152 			VCHIQ_MSG_TYPE(msgid),
1153 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1154 			VCHIQ_MSG_SRCPORT(msgid),
1155 			VCHIQ_MSG_DSTPORT(msgid),
1156 			size);
1157 	}
1158 
1159 	remote_event_signal(&state->remote->sync_trigger);
1160 
1161 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1162 		mutex_unlock(&state->sync_mutex);
1163 
1164 	return VCHIQ_SUCCESS;
1165 }
1166 
1167 static inline void
1168 claim_slot(struct vchiq_slot_info *slot)
1169 {
1170 	slot->use_count++;
1171 }
1172 
1173 static void
1174 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1175 	     struct vchiq_header *header, struct vchiq_service *service)
1176 {
1177 	int release_count;
1178 
1179 	mutex_lock(&state->recycle_mutex);
1180 
1181 	if (header) {
1182 		int msgid = header->msgid;
1183 
1184 		if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1185 			(service && service->closing)) {
1186 			mutex_unlock(&state->recycle_mutex);
1187 			return;
1188 		}
1189 
1190 		/* Rewrite the message header to prevent a double release */
1191 		header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1192 	}
1193 
1194 	release_count = slot_info->release_count;
1195 	slot_info->release_count = ++release_count;
1196 
1197 	if (release_count == slot_info->use_count) {
1198 		int slot_queue_recycle;
1199 		/* Add to the freed queue */
1200 
1201 		/*
1202 		 * A read barrier is necessary here to prevent speculative
1203 		 * fetches of remote->slot_queue_recycle from overtaking the
1204 		 * mutex.
1205 		 */
1206 		rmb();
1207 
1208 		slot_queue_recycle = state->remote->slot_queue_recycle;
1209 		state->remote->slot_queue[slot_queue_recycle &
1210 			VCHIQ_SLOT_QUEUE_MASK] =
1211 			SLOT_INDEX_FROM_INFO(state, slot_info);
1212 		state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1213 		vchiq_log_info(vchiq_core_log_level,
1214 			"%d: %s %d - recycle->%x", state->id, __func__,
1215 			SLOT_INDEX_FROM_INFO(state, slot_info),
1216 			state->remote->slot_queue_recycle);
1217 
1218 		/*
1219 		 * A write barrier is necessary, but remote_event_signal
1220 		 * contains one.
1221 		 */
1222 		remote_event_signal(&state->remote->recycle);
1223 	}
1224 
1225 	mutex_unlock(&state->recycle_mutex);
1226 }
1227 
1228 /* Called by the slot handler - don't hold the bulk mutex */
1229 static enum vchiq_status
1230 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1231 	     int retry_poll)
1232 {
1233 	enum vchiq_status status = VCHIQ_SUCCESS;
1234 
1235 	vchiq_log_trace(vchiq_core_log_level,
1236 		"%d: nb:%d %cx - p=%x rn=%x r=%x",
1237 		service->state->id, service->localport,
1238 		(queue == &service->bulk_tx) ? 't' : 'r',
1239 		queue->process, queue->remote_notify, queue->remove);
1240 
1241 	queue->remote_notify = queue->process;
1242 
1243 	if (status == VCHIQ_SUCCESS) {
1244 		while (queue->remove != queue->remote_notify) {
1245 			struct vchiq_bulk *bulk =
1246 				&queue->bulks[BULK_INDEX(queue->remove)];
1247 
1248 			/*
1249 			 * Only generate callbacks for non-dummy bulk
1250 			 * requests, and non-terminated services
1251 			 */
1252 			if (bulk->data && service->instance) {
1253 				if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1254 					if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1255 						VCHIQ_SERVICE_STATS_INC(service,
1256 							bulk_tx_count);
1257 						VCHIQ_SERVICE_STATS_ADD(service,
1258 							bulk_tx_bytes,
1259 							bulk->actual);
1260 					} else {
1261 						VCHIQ_SERVICE_STATS_INC(service,
1262 							bulk_rx_count);
1263 						VCHIQ_SERVICE_STATS_ADD(service,
1264 							bulk_rx_bytes,
1265 							bulk->actual);
1266 					}
1267 				} else {
1268 					VCHIQ_SERVICE_STATS_INC(service,
1269 						bulk_aborted_count);
1270 				}
1271 				if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1272 					struct bulk_waiter *waiter;
1273 
1274 					spin_lock(&bulk_waiter_spinlock);
1275 					waiter = bulk->userdata;
1276 					if (waiter) {
1277 						waiter->actual = bulk->actual;
1278 						complete(&waiter->event);
1279 					}
1280 					spin_unlock(&bulk_waiter_spinlock);
1281 				} else if (bulk->mode ==
1282 					VCHIQ_BULK_MODE_CALLBACK) {
1283 					enum vchiq_reason reason = (bulk->dir ==
1284 						VCHIQ_BULK_TRANSMIT) ?
1285 						((bulk->actual ==
1286 						VCHIQ_BULK_ACTUAL_ABORTED) ?
1287 						VCHIQ_BULK_TRANSMIT_ABORTED :
1288 						VCHIQ_BULK_TRANSMIT_DONE) :
1289 						((bulk->actual ==
1290 						VCHIQ_BULK_ACTUAL_ABORTED) ?
1291 						VCHIQ_BULK_RECEIVE_ABORTED :
1292 						VCHIQ_BULK_RECEIVE_DONE);
1293 					status = make_service_callback(service,
1294 						reason,	NULL, bulk->userdata);
1295 					if (status == VCHIQ_RETRY)
1296 						break;
1297 				}
1298 			}
1299 
1300 			queue->remove++;
1301 			complete(&service->bulk_remove_event);
1302 		}
1303 		if (!retry_poll)
1304 			status = VCHIQ_SUCCESS;
1305 	}
1306 
1307 	if (status == VCHIQ_RETRY)
1308 		request_poll(service->state, service,
1309 			(queue == &service->bulk_tx) ?
1310 			VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1311 
1312 	return status;
1313 }
1314 
1315 /* Called by the slot handler thread */
1316 static void
1317 poll_services(struct vchiq_state *state)
1318 {
1319 	int group, i;
1320 
1321 	for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
1322 		u32 flags;
1323 
1324 		flags = atomic_xchg(&state->poll_services[group], 0);
1325 		for (i = 0; flags; i++) {
1326 			if (flags & BIT(i)) {
1327 				struct vchiq_service *service =
1328 					find_service_by_port(state,
1329 						(group<<5) + i);
1330 				u32 service_flags;
1331 
1332 				flags &= ~BIT(i);
1333 				if (!service)
1334 					continue;
1335 				service_flags =
1336 					atomic_xchg(&service->poll_flags, 0);
1337 				if (service_flags &
1338 					BIT(VCHIQ_POLL_REMOVE)) {
1339 					vchiq_log_info(vchiq_core_log_level,
1340 						"%d: ps - remove %d<->%d",
1341 						state->id, service->localport,
1342 						service->remoteport);
1343 
1344 					/*
1345 					 * Make it look like a client, because
1346 					 * it must be removed and not left in
1347 					 * the LISTENING state.
1348 					 */
1349 					service->public_fourcc =
1350 						VCHIQ_FOURCC_INVALID;
1351 
1352 					if (vchiq_close_service_internal(
1353 						service, 0/*!close_recvd*/) !=
1354 						VCHIQ_SUCCESS)
1355 						request_poll(state, service,
1356 							VCHIQ_POLL_REMOVE);
1357 				} else if (service_flags &
1358 					BIT(VCHIQ_POLL_TERMINATE)) {
1359 					vchiq_log_info(vchiq_core_log_level,
1360 						"%d: ps - terminate %d<->%d",
1361 						state->id, service->localport,
1362 						service->remoteport);
1363 					if (vchiq_close_service_internal(
1364 						service, 0/*!close_recvd*/) !=
1365 						VCHIQ_SUCCESS)
1366 						request_poll(state, service,
1367 							VCHIQ_POLL_TERMINATE);
1368 				}
1369 				if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1370 					notify_bulks(service,
1371 						&service->bulk_tx,
1372 						1/*retry_poll*/);
1373 				if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1374 					notify_bulks(service,
1375 						&service->bulk_rx,
1376 						1/*retry_poll*/);
1377 				unlock_service(service);
1378 			}
1379 		}
1380 	}
1381 }
1382 
1383 /* Called with the bulk_mutex held */
1384 static void
1385 abort_outstanding_bulks(struct vchiq_service *service,
1386 			struct vchiq_bulk_queue *queue)
1387 {
1388 	int is_tx = (queue == &service->bulk_tx);
1389 
1390 	vchiq_log_trace(vchiq_core_log_level,
1391 		"%d: aob:%d %cx - li=%x ri=%x p=%x",
1392 		service->state->id, service->localport, is_tx ? 't' : 'r',
1393 		queue->local_insert, queue->remote_insert, queue->process);
1394 
1395 	WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1396 	WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1397 
1398 	while ((queue->process != queue->local_insert) ||
1399 		(queue->process != queue->remote_insert)) {
1400 		struct vchiq_bulk *bulk =
1401 				&queue->bulks[BULK_INDEX(queue->process)];
1402 
1403 		if (queue->process == queue->remote_insert) {
1404 			/* fabricate a matching dummy bulk */
1405 			bulk->remote_data = NULL;
1406 			bulk->remote_size = 0;
1407 			queue->remote_insert++;
1408 		}
1409 
1410 		if (queue->process != queue->local_insert) {
1411 			vchiq_complete_bulk(bulk);
1412 
1413 			vchiq_log_info(SRVTRACE_LEVEL(service),
1414 				"%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1415 				is_tx ? "Send Bulk to" : "Recv Bulk from",
1416 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1417 				service->remoteport,
1418 				bulk->size,
1419 				bulk->remote_size);
1420 		} else {
1421 			/* fabricate a matching dummy bulk */
1422 			bulk->data = 0;
1423 			bulk->size = 0;
1424 			bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1425 			bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1426 				VCHIQ_BULK_RECEIVE;
1427 			queue->local_insert++;
1428 		}
1429 
1430 		queue->process++;
1431 	}
1432 }
1433 
1434 static int
1435 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1436 {
1437 	struct vchiq_service *service = NULL;
1438 	int msgid, size;
1439 	unsigned int localport, remoteport;
1440 
1441 	msgid = header->msgid;
1442 	size = header->size;
1443 	localport = VCHIQ_MSG_DSTPORT(msgid);
1444 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1445 	if (size >= sizeof(struct vchiq_open_payload)) {
1446 		const struct vchiq_open_payload *payload =
1447 			(struct vchiq_open_payload *)header->data;
1448 		unsigned int fourcc;
1449 
1450 		fourcc = payload->fourcc;
1451 		vchiq_log_info(vchiq_core_log_level,
1452 			"%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1453 			state->id, header, localport,
1454 			VCHIQ_FOURCC_AS_4CHARS(fourcc));
1455 
1456 		service = get_listening_service(state, fourcc);
1457 
1458 		if (service) {
1459 			/* A matching service exists */
1460 			short version = payload->version;
1461 			short version_min = payload->version_min;
1462 
1463 			if ((service->version < version_min) ||
1464 				(version < service->version_min)) {
1465 				/* Version mismatch */
1466 				vchiq_loud_error_header();
1467 				vchiq_loud_error("%d: service %d (%c%c%c%c) "
1468 					"version mismatch - local (%d, min %d)"
1469 					" vs. remote (%d, min %d)",
1470 					state->id, service->localport,
1471 					VCHIQ_FOURCC_AS_4CHARS(fourcc),
1472 					service->version, service->version_min,
1473 					version, version_min);
1474 				vchiq_loud_error_footer();
1475 				unlock_service(service);
1476 				service = NULL;
1477 				goto fail_open;
1478 			}
1479 			service->peer_version = version;
1480 
1481 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1482 				struct vchiq_openack_payload ack_payload = {
1483 					service->version
1484 				};
1485 
1486 				if (state->version_common <
1487 				    VCHIQ_VERSION_SYNCHRONOUS_MODE)
1488 					service->sync = 0;
1489 
1490 				/* Acknowledge the OPEN */
1491 				if (service->sync) {
1492 					if (queue_message_sync(
1493 						state,
1494 						NULL,
1495 						VCHIQ_MAKE_MSG(
1496 							VCHIQ_MSG_OPENACK,
1497 							service->localport,
1498 							remoteport),
1499 						memcpy_copy_callback,
1500 						&ack_payload,
1501 						sizeof(ack_payload),
1502 						0) == VCHIQ_RETRY)
1503 						goto bail_not_ready;
1504 				} else {
1505 					if (queue_message(state,
1506 							NULL,
1507 							VCHIQ_MAKE_MSG(
1508 							VCHIQ_MSG_OPENACK,
1509 							service->localport,
1510 							remoteport),
1511 						memcpy_copy_callback,
1512 						&ack_payload,
1513 						sizeof(ack_payload),
1514 						0) == VCHIQ_RETRY)
1515 						goto bail_not_ready;
1516 				}
1517 
1518 				/* The service is now open */
1519 				vchiq_set_service_state(service,
1520 					service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1521 					: VCHIQ_SRVSTATE_OPEN);
1522 			}
1523 
1524 			/* Success - the message has been dealt with */
1525 			unlock_service(service);
1526 			return 1;
1527 		}
1528 	}
1529 
1530 fail_open:
1531 	/* No available service, or an invalid request - send a CLOSE */
1532 	if (queue_message(state, NULL,
1533 		VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1534 		NULL, NULL, 0, 0) == VCHIQ_RETRY)
1535 		goto bail_not_ready;
1536 
1537 	return 1;
1538 
1539 bail_not_ready:
1540 	if (service)
1541 		unlock_service(service);
1542 
1543 	return 0;
1544 }
1545 
1546 /* Called by the slot handler thread */
1547 static void
1548 parse_rx_slots(struct vchiq_state *state)
1549 {
1550 	struct vchiq_shared_state *remote = state->remote;
1551 	struct vchiq_service *service = NULL;
1552 	int tx_pos;
1553 
1554 	DEBUG_INITIALISE(state->local)
1555 
1556 	tx_pos = remote->tx_pos;
1557 
1558 	while (state->rx_pos != tx_pos) {
1559 		struct vchiq_header *header;
1560 		int msgid, size;
1561 		int type;
1562 		unsigned int localport, remoteport;
1563 
1564 		DEBUG_TRACE(PARSE_LINE);
1565 		if (!state->rx_data) {
1566 			int rx_index;
1567 
1568 			WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1569 			rx_index = remote->slot_queue[
1570 				SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
1571 				VCHIQ_SLOT_QUEUE_MASK];
1572 			state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1573 				rx_index);
1574 			state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1575 
1576 			/*
1577 			 * Initialise use_count to one, and increment
1578 			 * release_count at the end of the slot to avoid
1579 			 * releasing the slot prematurely.
1580 			 */
1581 			state->rx_info->use_count = 1;
1582 			state->rx_info->release_count = 0;
1583 		}
1584 
1585 		header = (struct vchiq_header *)(state->rx_data +
1586 			(state->rx_pos & VCHIQ_SLOT_MASK));
1587 		DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1588 		msgid = header->msgid;
1589 		DEBUG_VALUE(PARSE_MSGID, msgid);
1590 		size = header->size;
1591 		type = VCHIQ_MSG_TYPE(msgid);
1592 		localport = VCHIQ_MSG_DSTPORT(msgid);
1593 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
1594 
1595 		if (type != VCHIQ_MSG_DATA)
1596 			VCHIQ_STATS_INC(state, ctrl_rx_count);
1597 
1598 		switch (type) {
1599 		case VCHIQ_MSG_OPENACK:
1600 		case VCHIQ_MSG_CLOSE:
1601 		case VCHIQ_MSG_DATA:
1602 		case VCHIQ_MSG_BULK_RX:
1603 		case VCHIQ_MSG_BULK_TX:
1604 		case VCHIQ_MSG_BULK_RX_DONE:
1605 		case VCHIQ_MSG_BULK_TX_DONE:
1606 			service = find_service_by_port(state, localport);
1607 			if ((!service ||
1608 			     ((service->remoteport != remoteport) &&
1609 			      (service->remoteport != VCHIQ_PORT_FREE))) &&
1610 			    (localport == 0) &&
1611 			    (type == VCHIQ_MSG_CLOSE)) {
1612 				/*
1613 				 * This could be a CLOSE from a client which
1614 				 * hadn't yet received the OPENACK - look for
1615 				 * the connected service
1616 				 */
1617 				if (service)
1618 					unlock_service(service);
1619 				service = get_connected_service(state,
1620 					remoteport);
1621 				if (service)
1622 					vchiq_log_warning(vchiq_core_log_level,
1623 						"%d: prs %s@%pK (%d->%d) - found connected service %d",
1624 						state->id, msg_type_str(type),
1625 						header, remoteport, localport,
1626 						service->localport);
1627 			}
1628 
1629 			if (!service) {
1630 				vchiq_log_error(vchiq_core_log_level,
1631 					"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1632 					state->id, msg_type_str(type),
1633 					header, remoteport, localport,
1634 					localport);
1635 				goto skip_message;
1636 			}
1637 			break;
1638 		default:
1639 			break;
1640 		}
1641 
1642 		if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1643 			int svc_fourcc;
1644 
1645 			svc_fourcc = service
1646 				? service->base.fourcc
1647 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1648 			vchiq_log_info(SRVTRACE_LEVEL(service),
1649 				"Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1650 				msg_type_str(type), type,
1651 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1652 				remoteport, localport, size);
1653 			if (size > 0)
1654 				vchiq_log_dump_mem("Rcvd", 0, header->data,
1655 					min(16, size));
1656 		}
1657 
1658 		if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1659 		    calc_stride(size) > VCHIQ_SLOT_SIZE) {
1660 			vchiq_log_error(vchiq_core_log_level,
1661 				"header %pK (msgid %x) - size %x too big for slot",
1662 				header, (unsigned int)msgid,
1663 				(unsigned int)size);
1664 			WARN(1, "oversized for slot\n");
1665 		}
1666 
1667 		switch (type) {
1668 		case VCHIQ_MSG_OPEN:
1669 			WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1670 			if (!parse_open(state, header))
1671 				goto bail_not_ready;
1672 			break;
1673 		case VCHIQ_MSG_OPENACK:
1674 			if (size >= sizeof(struct vchiq_openack_payload)) {
1675 				const struct vchiq_openack_payload *payload =
1676 					(struct vchiq_openack_payload *)
1677 					header->data;
1678 				service->peer_version = payload->version;
1679 			}
1680 			vchiq_log_info(vchiq_core_log_level,
1681 				"%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1682 				state->id, header, size, remoteport, localport,
1683 				service->peer_version);
1684 			if (service->srvstate ==
1685 				VCHIQ_SRVSTATE_OPENING) {
1686 				service->remoteport = remoteport;
1687 				vchiq_set_service_state(service,
1688 					VCHIQ_SRVSTATE_OPEN);
1689 				complete(&service->remove_event);
1690 			} else
1691 				vchiq_log_error(vchiq_core_log_level,
1692 					"OPENACK received in state %s",
1693 					srvstate_names[service->srvstate]);
1694 			break;
1695 		case VCHIQ_MSG_CLOSE:
1696 			WARN_ON(size != 0); /* There should be no data */
1697 
1698 			vchiq_log_info(vchiq_core_log_level,
1699 				"%d: prs CLOSE@%pK (%d->%d)",
1700 				state->id, header, remoteport, localport);
1701 
1702 			mark_service_closing_internal(service, 1);
1703 
1704 			if (vchiq_close_service_internal(service,
1705 				1/*close_recvd*/) == VCHIQ_RETRY)
1706 				goto bail_not_ready;
1707 
1708 			vchiq_log_info(vchiq_core_log_level,
1709 				"Close Service %c%c%c%c s:%u d:%d",
1710 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1711 				service->localport,
1712 				service->remoteport);
1713 			break;
1714 		case VCHIQ_MSG_DATA:
1715 			vchiq_log_info(vchiq_core_log_level,
1716 				"%d: prs DATA@%pK,%x (%d->%d)",
1717 				state->id, header, size, remoteport, localport);
1718 
1719 			if ((service->remoteport == remoteport)
1720 				&& (service->srvstate ==
1721 				VCHIQ_SRVSTATE_OPEN)) {
1722 				header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1723 				claim_slot(state->rx_info);
1724 				DEBUG_TRACE(PARSE_LINE);
1725 				if (make_service_callback(service,
1726 					VCHIQ_MESSAGE_AVAILABLE, header,
1727 					NULL) == VCHIQ_RETRY) {
1728 					DEBUG_TRACE(PARSE_LINE);
1729 					goto bail_not_ready;
1730 				}
1731 				VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1732 				VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1733 					size);
1734 			} else {
1735 				VCHIQ_STATS_INC(state, error_count);
1736 			}
1737 			break;
1738 		case VCHIQ_MSG_CONNECT:
1739 			vchiq_log_info(vchiq_core_log_level,
1740 				"%d: prs CONNECT@%pK", state->id, header);
1741 			state->version_common =	((struct vchiq_slot_zero *)
1742 						 state->slot_data)->version;
1743 			complete(&state->connect);
1744 			break;
1745 		case VCHIQ_MSG_BULK_RX:
1746 		case VCHIQ_MSG_BULK_TX:
1747 			/*
1748 			 * We should never receive a bulk request from the
1749 			 * other side since we're not setup to perform as the
1750 			 * master.
1751 			 */
1752 			WARN_ON(1);
1753 			break;
1754 		case VCHIQ_MSG_BULK_RX_DONE:
1755 		case VCHIQ_MSG_BULK_TX_DONE:
1756 			if ((service->remoteport == remoteport)
1757 				&& (service->srvstate !=
1758 				VCHIQ_SRVSTATE_FREE)) {
1759 				struct vchiq_bulk_queue *queue;
1760 				struct vchiq_bulk *bulk;
1761 
1762 				queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1763 					&service->bulk_rx : &service->bulk_tx;
1764 
1765 				DEBUG_TRACE(PARSE_LINE);
1766 				if (mutex_lock_killable(&service->bulk_mutex)) {
1767 					DEBUG_TRACE(PARSE_LINE);
1768 					goto bail_not_ready;
1769 				}
1770 				if ((int)(queue->remote_insert -
1771 					queue->local_insert) >= 0) {
1772 					vchiq_log_error(vchiq_core_log_level,
1773 						"%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1774 						state->id, msg_type_str(type),
1775 						header, remoteport, localport,
1776 						queue->remote_insert,
1777 						queue->local_insert);
1778 					mutex_unlock(&service->bulk_mutex);
1779 					break;
1780 				}
1781 				if (queue->process != queue->remote_insert) {
1782 					pr_err("%s: p %x != ri %x\n",
1783 					       __func__,
1784 					       queue->process,
1785 					       queue->remote_insert);
1786 					mutex_unlock(&service->bulk_mutex);
1787 					goto bail_not_ready;
1788 				}
1789 
1790 				bulk = &queue->bulks[
1791 					BULK_INDEX(queue->remote_insert)];
1792 				bulk->actual = *(int *)header->data;
1793 				queue->remote_insert++;
1794 
1795 				vchiq_log_info(vchiq_core_log_level,
1796 					"%d: prs %s@%pK (%d->%d) %x@%pad",
1797 					state->id, msg_type_str(type),
1798 					header, remoteport, localport,
1799 					bulk->actual, &bulk->data);
1800 
1801 				vchiq_log_trace(vchiq_core_log_level,
1802 					"%d: prs:%d %cx li=%x ri=%x p=%x",
1803 					state->id, localport,
1804 					(type == VCHIQ_MSG_BULK_RX_DONE) ?
1805 						'r' : 't',
1806 					queue->local_insert,
1807 					queue->remote_insert, queue->process);
1808 
1809 				DEBUG_TRACE(PARSE_LINE);
1810 				WARN_ON(queue->process == queue->local_insert);
1811 				vchiq_complete_bulk(bulk);
1812 				queue->process++;
1813 				mutex_unlock(&service->bulk_mutex);
1814 				DEBUG_TRACE(PARSE_LINE);
1815 				notify_bulks(service, queue, 1/*retry_poll*/);
1816 				DEBUG_TRACE(PARSE_LINE);
1817 			}
1818 			break;
1819 		case VCHIQ_MSG_PADDING:
1820 			vchiq_log_trace(vchiq_core_log_level,
1821 				"%d: prs PADDING@%pK,%x",
1822 				state->id, header, size);
1823 			break;
1824 		case VCHIQ_MSG_PAUSE:
1825 			/* If initiated, signal the application thread */
1826 			vchiq_log_trace(vchiq_core_log_level,
1827 				"%d: prs PAUSE@%pK,%x",
1828 				state->id, header, size);
1829 			if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1830 				vchiq_log_error(vchiq_core_log_level,
1831 					"%d: PAUSE received in state PAUSED",
1832 					state->id);
1833 				break;
1834 			}
1835 			if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1836 				/* Send a PAUSE in response */
1837 				if (queue_message(state, NULL,
1838 					VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1839 					NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1840 				    == VCHIQ_RETRY)
1841 					goto bail_not_ready;
1842 			}
1843 			/* At this point slot_mutex is held */
1844 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1845 			break;
1846 		case VCHIQ_MSG_RESUME:
1847 			vchiq_log_trace(vchiq_core_log_level,
1848 				"%d: prs RESUME@%pK,%x",
1849 				state->id, header, size);
1850 			/* Release the slot mutex */
1851 			mutex_unlock(&state->slot_mutex);
1852 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1853 			break;
1854 
1855 		case VCHIQ_MSG_REMOTE_USE:
1856 			vchiq_on_remote_use(state);
1857 			break;
1858 		case VCHIQ_MSG_REMOTE_RELEASE:
1859 			vchiq_on_remote_release(state);
1860 			break;
1861 		case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1862 			break;
1863 
1864 		default:
1865 			vchiq_log_error(vchiq_core_log_level,
1866 				"%d: prs invalid msgid %x@%pK,%x",
1867 				state->id, msgid, header, size);
1868 			WARN(1, "invalid message\n");
1869 			break;
1870 		}
1871 
1872 skip_message:
1873 		if (service) {
1874 			unlock_service(service);
1875 			service = NULL;
1876 		}
1877 
1878 		state->rx_pos += calc_stride(size);
1879 
1880 		DEBUG_TRACE(PARSE_LINE);
1881 		/*
1882 		 * Perform some housekeeping when the end of the slot is
1883 		 * reached.
1884 		 */
1885 		if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1886 			/* Remove the extra reference count. */
1887 			release_slot(state, state->rx_info, NULL, NULL);
1888 			state->rx_data = NULL;
1889 		}
1890 	}
1891 
1892 bail_not_ready:
1893 	if (service)
1894 		unlock_service(service);
1895 }
1896 
1897 /* Called by the slot handler thread */
1898 static int
1899 slot_handler_func(void *v)
1900 {
1901 	struct vchiq_state *state = v;
1902 	struct vchiq_shared_state *local = state->local;
1903 
1904 	DEBUG_INITIALISE(local)
1905 
1906 	while (1) {
1907 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
1908 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1909 		remote_event_wait(&state->trigger_event, &local->trigger);
1910 
1911 		rmb();
1912 
1913 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1914 		if (state->poll_needed) {
1915 
1916 			state->poll_needed = 0;
1917 
1918 			/*
1919 			 * Handle service polling and other rare conditions here
1920 			 * out of the mainline code
1921 			 */
1922 			switch (state->conn_state) {
1923 			case VCHIQ_CONNSTATE_CONNECTED:
1924 				/* Poll the services as requested */
1925 				poll_services(state);
1926 				break;
1927 
1928 			case VCHIQ_CONNSTATE_PAUSING:
1929 				if (queue_message(state, NULL,
1930 					VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1931 					NULL, NULL, 0,
1932 					QMFLAGS_NO_MUTEX_UNLOCK)
1933 				    != VCHIQ_RETRY) {
1934 					vchiq_set_conn_state(state,
1935 						VCHIQ_CONNSTATE_PAUSE_SENT);
1936 				} else {
1937 					/* Retry later */
1938 					state->poll_needed = 1;
1939 				}
1940 				break;
1941 
1942 			case VCHIQ_CONNSTATE_RESUMING:
1943 				if (queue_message(state, NULL,
1944 					VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1945 					NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1946 					!= VCHIQ_RETRY) {
1947 					vchiq_set_conn_state(state,
1948 						VCHIQ_CONNSTATE_CONNECTED);
1949 				} else {
1950 					/*
1951 					 * This should really be impossible,
1952 					 * since the PAUSE should have flushed
1953 					 * through outstanding messages.
1954 					 */
1955 					vchiq_log_error(vchiq_core_log_level,
1956 						"Failed to send RESUME message");
1957 				}
1958 				break;
1959 			default:
1960 				break;
1961 			}
1962 
1963 		}
1964 
1965 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1966 		parse_rx_slots(state);
1967 	}
1968 	return 0;
1969 }
1970 
1971 /* Called by the recycle thread */
1972 static int
1973 recycle_func(void *v)
1974 {
1975 	struct vchiq_state *state = v;
1976 	struct vchiq_shared_state *local = state->local;
1977 	BITSET_T *found;
1978 	size_t length;
1979 
1980 	length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1981 
1982 	found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1983 			      GFP_KERNEL);
1984 	if (!found)
1985 		return -ENOMEM;
1986 
1987 	while (1) {
1988 		remote_event_wait(&state->recycle_event, &local->recycle);
1989 
1990 		process_free_queue(state, found, length);
1991 	}
1992 	return 0;
1993 }
1994 
1995 /* Called by the sync thread */
1996 static int
1997 sync_func(void *v)
1998 {
1999 	struct vchiq_state *state = v;
2000 	struct vchiq_shared_state *local = state->local;
2001 	struct vchiq_header *header =
2002 		(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2003 			state->remote->slot_sync);
2004 
2005 	while (1) {
2006 		struct vchiq_service *service;
2007 		int msgid, size;
2008 		int type;
2009 		unsigned int localport, remoteport;
2010 
2011 		remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2012 
2013 		rmb();
2014 
2015 		msgid = header->msgid;
2016 		size = header->size;
2017 		type = VCHIQ_MSG_TYPE(msgid);
2018 		localport = VCHIQ_MSG_DSTPORT(msgid);
2019 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
2020 
2021 		service = find_service_by_port(state, localport);
2022 
2023 		if (!service) {
2024 			vchiq_log_error(vchiq_sync_log_level,
2025 				"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2026 				state->id, msg_type_str(type),
2027 				header, remoteport, localport, localport);
2028 			release_message_sync(state, header);
2029 			continue;
2030 		}
2031 
2032 		if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2033 			int svc_fourcc;
2034 
2035 			svc_fourcc = service
2036 				? service->base.fourcc
2037 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2038 			vchiq_log_trace(vchiq_sync_log_level,
2039 				"Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2040 				msg_type_str(type),
2041 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2042 				remoteport, localport, size);
2043 			if (size > 0)
2044 				vchiq_log_dump_mem("Rcvd", 0, header->data,
2045 					min(16, size));
2046 		}
2047 
2048 		switch (type) {
2049 		case VCHIQ_MSG_OPENACK:
2050 			if (size >= sizeof(struct vchiq_openack_payload)) {
2051 				const struct vchiq_openack_payload *payload =
2052 					(struct vchiq_openack_payload *)
2053 					header->data;
2054 				service->peer_version = payload->version;
2055 			}
2056 			vchiq_log_info(vchiq_sync_log_level,
2057 				"%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2058 				state->id, header, size, remoteport, localport,
2059 				service->peer_version);
2060 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2061 				service->remoteport = remoteport;
2062 				vchiq_set_service_state(service,
2063 					VCHIQ_SRVSTATE_OPENSYNC);
2064 				service->sync = 1;
2065 				complete(&service->remove_event);
2066 			}
2067 			release_message_sync(state, header);
2068 			break;
2069 
2070 		case VCHIQ_MSG_DATA:
2071 			vchiq_log_trace(vchiq_sync_log_level,
2072 				"%d: sf DATA@%pK,%x (%d->%d)",
2073 				state->id, header, size, remoteport, localport);
2074 
2075 			if ((service->remoteport == remoteport) &&
2076 				(service->srvstate ==
2077 				VCHIQ_SRVSTATE_OPENSYNC)) {
2078 				if (make_service_callback(service,
2079 					VCHIQ_MESSAGE_AVAILABLE, header,
2080 					NULL) == VCHIQ_RETRY)
2081 					vchiq_log_error(vchiq_sync_log_level,
2082 						"synchronous callback to service %d returns VCHIQ_RETRY",
2083 						localport);
2084 			}
2085 			break;
2086 
2087 		default:
2088 			vchiq_log_error(vchiq_sync_log_level,
2089 				"%d: sf unexpected msgid %x@%pK,%x",
2090 				state->id, msgid, header, size);
2091 			release_message_sync(state, header);
2092 			break;
2093 		}
2094 
2095 		unlock_service(service);
2096 	}
2097 
2098 	return 0;
2099 }
2100 
2101 static void
2102 init_bulk_queue(struct vchiq_bulk_queue *queue)
2103 {
2104 	queue->local_insert = 0;
2105 	queue->remote_insert = 0;
2106 	queue->process = 0;
2107 	queue->remote_notify = 0;
2108 	queue->remove = 0;
2109 }
2110 
2111 inline const char *
2112 get_conn_state_name(enum vchiq_connstate conn_state)
2113 {
2114 	return conn_state_names[conn_state];
2115 }
2116 
2117 struct vchiq_slot_zero *
2118 vchiq_init_slots(void *mem_base, int mem_size)
2119 {
2120 	int mem_align =
2121 		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2122 	struct vchiq_slot_zero *slot_zero =
2123 		(struct vchiq_slot_zero *)(mem_base + mem_align);
2124 	int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2125 	int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2126 
2127 	/* Ensure there is enough memory to run an absolutely minimum system */
2128 	num_slots -= first_data_slot;
2129 
2130 	if (num_slots < 4) {
2131 		vchiq_log_error(vchiq_core_log_level,
2132 			"%s - insufficient memory %x bytes",
2133 			__func__, mem_size);
2134 		return NULL;
2135 	}
2136 
2137 	memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2138 
2139 	slot_zero->magic = VCHIQ_MAGIC;
2140 	slot_zero->version = VCHIQ_VERSION;
2141 	slot_zero->version_min = VCHIQ_VERSION_MIN;
2142 	slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2143 	slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2144 	slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2145 	slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2146 
2147 	slot_zero->master.slot_sync = first_data_slot;
2148 	slot_zero->master.slot_first = first_data_slot + 1;
2149 	slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2150 	slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2151 	slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2152 	slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2153 
2154 	return slot_zero;
2155 }
2156 
2157 enum vchiq_status
2158 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2159 {
2160 	struct vchiq_shared_state *local;
2161 	struct vchiq_shared_state *remote;
2162 	enum vchiq_status status;
2163 	char threadname[16];
2164 	int i;
2165 
2166 	if (vchiq_states[0]) {
2167 		pr_err("%s: VCHIQ state already initialized\n", __func__);
2168 		return VCHIQ_ERROR;
2169 	}
2170 
2171 	local = &slot_zero->slave;
2172 	remote = &slot_zero->master;
2173 
2174 	if (local->initialised) {
2175 		vchiq_loud_error_header();
2176 		if (remote->initialised)
2177 			vchiq_loud_error("local state has already been initialised");
2178 		else
2179 			vchiq_loud_error("master/slave mismatch two slaves");
2180 		vchiq_loud_error_footer();
2181 		return VCHIQ_ERROR;
2182 	}
2183 
2184 	memset(state, 0, sizeof(struct vchiq_state));
2185 
2186 	/*
2187 	 * initialize shared state pointers
2188 	 */
2189 
2190 	state->local = local;
2191 	state->remote = remote;
2192 	state->slot_data = (struct vchiq_slot *)slot_zero;
2193 
2194 	/*
2195 	 * initialize events and mutexes
2196 	 */
2197 
2198 	init_completion(&state->connect);
2199 	mutex_init(&state->mutex);
2200 	mutex_init(&state->slot_mutex);
2201 	mutex_init(&state->recycle_mutex);
2202 	mutex_init(&state->sync_mutex);
2203 	mutex_init(&state->bulk_transfer_mutex);
2204 
2205 	init_completion(&state->slot_available_event);
2206 	init_completion(&state->slot_remove_event);
2207 	init_completion(&state->data_quota_event);
2208 
2209 	state->slot_queue_available = 0;
2210 
2211 	for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2212 		struct vchiq_service_quota *service_quota =
2213 			&state->service_quotas[i];
2214 		init_completion(&service_quota->quota_event);
2215 	}
2216 
2217 	for (i = local->slot_first; i <= local->slot_last; i++) {
2218 		local->slot_queue[state->slot_queue_available++] = i;
2219 		complete(&state->slot_available_event);
2220 	}
2221 
2222 	state->default_slot_quota = state->slot_queue_available/2;
2223 	state->default_message_quota =
2224 		min((unsigned short)(state->default_slot_quota * 256),
2225 		(unsigned short)~0);
2226 
2227 	state->previous_data_index = -1;
2228 	state->data_use_count = 0;
2229 	state->data_quota = state->slot_queue_available - 1;
2230 
2231 	remote_event_create(&state->trigger_event, &local->trigger);
2232 	local->tx_pos = 0;
2233 	remote_event_create(&state->recycle_event, &local->recycle);
2234 	local->slot_queue_recycle = state->slot_queue_available;
2235 	remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2236 	remote_event_create(&state->sync_release_event, &local->sync_release);
2237 
2238 	/* At start-of-day, the slot is empty and available */
2239 	((struct vchiq_header *)
2240 		SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2241 							VCHIQ_MSGID_PADDING;
2242 	remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2243 
2244 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2245 
2246 	status = vchiq_platform_init_state(state);
2247 	if (status != VCHIQ_SUCCESS)
2248 		return VCHIQ_ERROR;
2249 
2250 	/*
2251 	 * bring up slot handler thread
2252 	 */
2253 	snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2254 	state->slot_handler_thread = kthread_create(&slot_handler_func,
2255 		(void *)state,
2256 		threadname);
2257 
2258 	if (IS_ERR(state->slot_handler_thread)) {
2259 		vchiq_loud_error_header();
2260 		vchiq_loud_error("couldn't create thread %s", threadname);
2261 		vchiq_loud_error_footer();
2262 		return VCHIQ_ERROR;
2263 	}
2264 	set_user_nice(state->slot_handler_thread, -19);
2265 
2266 	snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2267 	state->recycle_thread = kthread_create(&recycle_func,
2268 		(void *)state,
2269 		threadname);
2270 	if (IS_ERR(state->recycle_thread)) {
2271 		vchiq_loud_error_header();
2272 		vchiq_loud_error("couldn't create thread %s", threadname);
2273 		vchiq_loud_error_footer();
2274 		goto fail_free_handler_thread;
2275 	}
2276 	set_user_nice(state->recycle_thread, -19);
2277 
2278 	snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2279 	state->sync_thread = kthread_create(&sync_func,
2280 		(void *)state,
2281 		threadname);
2282 	if (IS_ERR(state->sync_thread)) {
2283 		vchiq_loud_error_header();
2284 		vchiq_loud_error("couldn't create thread %s", threadname);
2285 		vchiq_loud_error_footer();
2286 		goto fail_free_recycle_thread;
2287 	}
2288 	set_user_nice(state->sync_thread, -20);
2289 
2290 	wake_up_process(state->slot_handler_thread);
2291 	wake_up_process(state->recycle_thread);
2292 	wake_up_process(state->sync_thread);
2293 
2294 	vchiq_states[0] = state;
2295 
2296 	/* Indicate readiness to the other side */
2297 	local->initialised = 1;
2298 
2299 	return status;
2300 
2301 fail_free_recycle_thread:
2302 	kthread_stop(state->recycle_thread);
2303 fail_free_handler_thread:
2304 	kthread_stop(state->slot_handler_thread);
2305 
2306 	return VCHIQ_ERROR;
2307 }
2308 
2309 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2310 {
2311 	struct vchiq_service *service = find_service_by_handle(handle);
2312 	int pos;
2313 
2314 	while (service->msg_queue_write == service->msg_queue_read +
2315 		VCHIQ_MAX_SLOTS) {
2316 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
2317 			flush_signals(current);
2318 	}
2319 
2320 	pos = service->msg_queue_write++ & (VCHIQ_MAX_SLOTS - 1);
2321 	service->msg_queue[pos] = header;
2322 
2323 	complete(&service->msg_queue_push);
2324 }
2325 EXPORT_SYMBOL(vchiq_msg_queue_push);
2326 
2327 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2328 {
2329 	struct vchiq_service *service = find_service_by_handle(handle);
2330 	struct vchiq_header *header;
2331 	int pos;
2332 
2333 	if (service->msg_queue_write == service->msg_queue_read)
2334 		return NULL;
2335 
2336 	while (service->msg_queue_write == service->msg_queue_read) {
2337 		if (wait_for_completion_interruptible(&service->msg_queue_push))
2338 			flush_signals(current);
2339 	}
2340 
2341 	pos = service->msg_queue_read++ & (VCHIQ_MAX_SLOTS - 1);
2342 	header = service->msg_queue[pos];
2343 
2344 	complete(&service->msg_queue_pop);
2345 
2346 	return header;
2347 }
2348 EXPORT_SYMBOL(vchiq_msg_hold);
2349 
2350 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2351 {
2352 	if (!params->callback || !params->fourcc) {
2353 		vchiq_loud_error("Can't add service, invalid params\n");
2354 		return -EINVAL;
2355 	}
2356 
2357 	return 0;
2358 }
2359 
2360 /* Called from application thread when a client or server service is created. */
2361 struct vchiq_service *
2362 vchiq_add_service_internal(struct vchiq_state *state,
2363 			   const struct vchiq_service_params_kernel *params,
2364 			   int srvstate, struct vchiq_instance *instance,
2365 			   vchiq_userdata_term userdata_term)
2366 {
2367 	struct vchiq_service *service;
2368 	struct vchiq_service __rcu **pservice = NULL;
2369 	struct vchiq_service_quota *service_quota;
2370 	int ret;
2371 	int i;
2372 
2373 	ret = vchiq_validate_params(params);
2374 	if (ret)
2375 		return NULL;
2376 
2377 	service = kmalloc(sizeof(*service), GFP_KERNEL);
2378 	if (!service)
2379 		return service;
2380 
2381 	service->base.fourcc   = params->fourcc;
2382 	service->base.callback = params->callback;
2383 	service->base.userdata = params->userdata;
2384 	service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
2385 	kref_init(&service->ref_count);
2386 	service->srvstate      = VCHIQ_SRVSTATE_FREE;
2387 	service->userdata_term = userdata_term;
2388 	service->localport     = VCHIQ_PORT_FREE;
2389 	service->remoteport    = VCHIQ_PORT_FREE;
2390 
2391 	service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2392 		VCHIQ_FOURCC_INVALID : params->fourcc;
2393 	service->client_id     = 0;
2394 	service->auto_close    = 1;
2395 	service->sync          = 0;
2396 	service->closing       = 0;
2397 	service->trace         = 0;
2398 	atomic_set(&service->poll_flags, 0);
2399 	service->version       = params->version;
2400 	service->version_min   = params->version_min;
2401 	service->state         = state;
2402 	service->instance      = instance;
2403 	service->service_use_count = 0;
2404 	service->msg_queue_read = 0;
2405 	service->msg_queue_write = 0;
2406 	init_bulk_queue(&service->bulk_tx);
2407 	init_bulk_queue(&service->bulk_rx);
2408 	init_completion(&service->remove_event);
2409 	init_completion(&service->bulk_remove_event);
2410 	init_completion(&service->msg_queue_pop);
2411 	init_completion(&service->msg_queue_push);
2412 	mutex_init(&service->bulk_mutex);
2413 	memset(&service->stats, 0, sizeof(service->stats));
2414 	memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2415 
2416 	/*
2417 	 * Although it is perfectly possible to use a spinlock
2418 	 * to protect the creation of services, it is overkill as it
2419 	 * disables interrupts while the array is searched.
2420 	 * The only danger is of another thread trying to create a
2421 	 * service - service deletion is safe.
2422 	 * Therefore it is preferable to use state->mutex which,
2423 	 * although slower to claim, doesn't block interrupts while
2424 	 * it is held.
2425 	 */
2426 
2427 	mutex_lock(&state->mutex);
2428 
2429 	/* Prepare to use a previously unused service */
2430 	if (state->unused_service < VCHIQ_MAX_SERVICES)
2431 		pservice = &state->services[state->unused_service];
2432 
2433 	if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2434 		for (i = 0; i < state->unused_service; i++) {
2435 			if (!rcu_access_pointer(state->services[i])) {
2436 				pservice = &state->services[i];
2437 				break;
2438 			}
2439 		}
2440 	} else {
2441 		rcu_read_lock();
2442 		for (i = (state->unused_service - 1); i >= 0; i--) {
2443 			struct vchiq_service *srv;
2444 
2445 			srv = rcu_dereference(state->services[i]);
2446 			if (!srv)
2447 				pservice = &state->services[i];
2448 			else if ((srv->public_fourcc == params->fourcc)
2449 				&& ((srv->instance != instance) ||
2450 				(srv->base.callback !=
2451 				params->callback))) {
2452 				/*
2453 				 * There is another server using this
2454 				 * fourcc which doesn't match.
2455 				 */
2456 				pservice = NULL;
2457 				break;
2458 			}
2459 		}
2460 		rcu_read_unlock();
2461 	}
2462 
2463 	if (pservice) {
2464 		service->localport = (pservice - state->services);
2465 		if (!handle_seq)
2466 			handle_seq = VCHIQ_MAX_STATES *
2467 				 VCHIQ_MAX_SERVICES;
2468 		service->handle = handle_seq |
2469 			(state->id * VCHIQ_MAX_SERVICES) |
2470 			service->localport;
2471 		handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2472 		rcu_assign_pointer(*pservice, service);
2473 		if (pservice == &state->services[state->unused_service])
2474 			state->unused_service++;
2475 	}
2476 
2477 	mutex_unlock(&state->mutex);
2478 
2479 	if (!pservice) {
2480 		kfree(service);
2481 		return NULL;
2482 	}
2483 
2484 	service_quota = &state->service_quotas[service->localport];
2485 	service_quota->slot_quota = state->default_slot_quota;
2486 	service_quota->message_quota = state->default_message_quota;
2487 	if (service_quota->slot_use_count == 0)
2488 		service_quota->previous_tx_index =
2489 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2490 			- 1;
2491 
2492 	/* Bring this service online */
2493 	vchiq_set_service_state(service, srvstate);
2494 
2495 	vchiq_log_info(vchiq_core_msg_log_level,
2496 		"%s Service %c%c%c%c SrcPort:%d",
2497 		(srvstate == VCHIQ_SRVSTATE_OPENING)
2498 		? "Open" : "Add",
2499 		VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2500 		service->localport);
2501 
2502 	/* Don't unlock the service - leave it with a ref_count of 1. */
2503 
2504 	return service;
2505 }
2506 
2507 enum vchiq_status
2508 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2509 {
2510 	struct vchiq_open_payload payload = {
2511 		service->base.fourcc,
2512 		client_id,
2513 		service->version,
2514 		service->version_min
2515 	};
2516 	enum vchiq_status status = VCHIQ_SUCCESS;
2517 
2518 	service->client_id = client_id;
2519 	vchiq_use_service_internal(service);
2520 	status = queue_message(service->state,
2521 			       NULL,
2522 			       VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2523 					      service->localport,
2524 					      0),
2525 			       memcpy_copy_callback,
2526 			       &payload,
2527 			       sizeof(payload),
2528 			       QMFLAGS_IS_BLOCKING);
2529 	if (status == VCHIQ_SUCCESS) {
2530 		/* Wait for the ACK/NAK */
2531 		if (wait_for_completion_interruptible(&service->remove_event)) {
2532 			status = VCHIQ_RETRY;
2533 			vchiq_release_service_internal(service);
2534 		} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2535 			   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2536 			if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2537 				vchiq_log_error(vchiq_core_log_level,
2538 						"%d: osi - srvstate = %s (ref %u)",
2539 						service->state->id,
2540 						srvstate_names[service->srvstate],
2541 						kref_read(&service->ref_count));
2542 			status = VCHIQ_ERROR;
2543 			VCHIQ_SERVICE_STATS_INC(service, error_count);
2544 			vchiq_release_service_internal(service);
2545 		}
2546 	}
2547 	return status;
2548 }
2549 
2550 static void
2551 release_service_messages(struct vchiq_service *service)
2552 {
2553 	struct vchiq_state *state = service->state;
2554 	int slot_last = state->remote->slot_last;
2555 	int i;
2556 
2557 	/* Release any claimed messages aimed at this service */
2558 
2559 	if (service->sync) {
2560 		struct vchiq_header *header =
2561 			(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2562 						state->remote->slot_sync);
2563 		if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2564 			release_message_sync(state, header);
2565 
2566 		return;
2567 	}
2568 
2569 	for (i = state->remote->slot_first; i <= slot_last; i++) {
2570 		struct vchiq_slot_info *slot_info =
2571 			SLOT_INFO_FROM_INDEX(state, i);
2572 		if (slot_info->release_count != slot_info->use_count) {
2573 			char *data =
2574 				(char *)SLOT_DATA_FROM_INDEX(state, i);
2575 			unsigned int pos, end;
2576 
2577 			end = VCHIQ_SLOT_SIZE;
2578 			if (data == state->rx_data)
2579 				/*
2580 				 * This buffer is still being read from - stop
2581 				 * at the current read position
2582 				 */
2583 				end = state->rx_pos & VCHIQ_SLOT_MASK;
2584 
2585 			pos = 0;
2586 
2587 			while (pos < end) {
2588 				struct vchiq_header *header =
2589 					(struct vchiq_header *)(data + pos);
2590 				int msgid = header->msgid;
2591 				int port = VCHIQ_MSG_DSTPORT(msgid);
2592 
2593 				if ((port == service->localport) &&
2594 					(msgid & VCHIQ_MSGID_CLAIMED)) {
2595 					vchiq_log_info(vchiq_core_log_level,
2596 						"  fsi - hdr %pK", header);
2597 					release_slot(state, slot_info, header,
2598 						NULL);
2599 				}
2600 				pos += calc_stride(header->size);
2601 				if (pos > VCHIQ_SLOT_SIZE) {
2602 					vchiq_log_error(vchiq_core_log_level,
2603 						"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2604 						pos, header, msgid,
2605 						header->msgid, header->size);
2606 					WARN(1, "invalid slot position\n");
2607 				}
2608 			}
2609 		}
2610 	}
2611 }
2612 
2613 static int
2614 do_abort_bulks(struct vchiq_service *service)
2615 {
2616 	enum vchiq_status status;
2617 
2618 	/* Abort any outstanding bulk transfers */
2619 	if (mutex_lock_killable(&service->bulk_mutex))
2620 		return 0;
2621 	abort_outstanding_bulks(service, &service->bulk_tx);
2622 	abort_outstanding_bulks(service, &service->bulk_rx);
2623 	mutex_unlock(&service->bulk_mutex);
2624 
2625 	status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2626 	if (status == VCHIQ_SUCCESS)
2627 		status = notify_bulks(service, &service->bulk_rx,
2628 			0/*!retry_poll*/);
2629 	return (status == VCHIQ_SUCCESS);
2630 }
2631 
2632 static enum vchiq_status
2633 close_service_complete(struct vchiq_service *service, int failstate)
2634 {
2635 	enum vchiq_status status;
2636 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2637 	int newstate;
2638 
2639 	switch (service->srvstate) {
2640 	case VCHIQ_SRVSTATE_OPEN:
2641 	case VCHIQ_SRVSTATE_CLOSESENT:
2642 	case VCHIQ_SRVSTATE_CLOSERECVD:
2643 		if (is_server) {
2644 			if (service->auto_close) {
2645 				service->client_id = 0;
2646 				service->remoteport = VCHIQ_PORT_FREE;
2647 				newstate = VCHIQ_SRVSTATE_LISTENING;
2648 			} else
2649 				newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2650 		} else
2651 			newstate = VCHIQ_SRVSTATE_CLOSED;
2652 		vchiq_set_service_state(service, newstate);
2653 		break;
2654 	case VCHIQ_SRVSTATE_LISTENING:
2655 		break;
2656 	default:
2657 		vchiq_log_error(vchiq_core_log_level,
2658 			"%s(%x) called in state %s", __func__,
2659 			service->handle, srvstate_names[service->srvstate]);
2660 		WARN(1, "%s in unexpected state\n", __func__);
2661 		return VCHIQ_ERROR;
2662 	}
2663 
2664 	status = make_service_callback(service,
2665 		VCHIQ_SERVICE_CLOSED, NULL, NULL);
2666 
2667 	if (status != VCHIQ_RETRY) {
2668 		int uc = service->service_use_count;
2669 		int i;
2670 		/* Complete the close process */
2671 		for (i = 0; i < uc; i++)
2672 			/*
2673 			 * cater for cases where close is forced and the
2674 			 * client may not close all it's handles
2675 			 */
2676 			vchiq_release_service_internal(service);
2677 
2678 		service->client_id = 0;
2679 		service->remoteport = VCHIQ_PORT_FREE;
2680 
2681 		if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
2682 			vchiq_free_service_internal(service);
2683 		else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2684 			if (is_server)
2685 				service->closing = 0;
2686 
2687 			complete(&service->remove_event);
2688 		}
2689 	} else
2690 		vchiq_set_service_state(service, failstate);
2691 
2692 	return status;
2693 }
2694 
2695 /* Called by the slot handler */
2696 enum vchiq_status
2697 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2698 {
2699 	struct vchiq_state *state = service->state;
2700 	enum vchiq_status status = VCHIQ_SUCCESS;
2701 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2702 
2703 	vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2704 		service->state->id, service->localport, close_recvd,
2705 		srvstate_names[service->srvstate]);
2706 
2707 	switch (service->srvstate) {
2708 	case VCHIQ_SRVSTATE_CLOSED:
2709 	case VCHIQ_SRVSTATE_HIDDEN:
2710 	case VCHIQ_SRVSTATE_LISTENING:
2711 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2712 		if (close_recvd)
2713 			vchiq_log_error(vchiq_core_log_level,
2714 				"%s(1) called in state %s",
2715 				__func__, srvstate_names[service->srvstate]);
2716 		else if (is_server) {
2717 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2718 				status = VCHIQ_ERROR;
2719 			} else {
2720 				service->client_id = 0;
2721 				service->remoteport = VCHIQ_PORT_FREE;
2722 				if (service->srvstate ==
2723 					VCHIQ_SRVSTATE_CLOSEWAIT)
2724 					vchiq_set_service_state(service,
2725 						VCHIQ_SRVSTATE_LISTENING);
2726 			}
2727 			complete(&service->remove_event);
2728 		} else
2729 			vchiq_free_service_internal(service);
2730 		break;
2731 	case VCHIQ_SRVSTATE_OPENING:
2732 		if (close_recvd) {
2733 			/* The open was rejected - tell the user */
2734 			vchiq_set_service_state(service,
2735 				VCHIQ_SRVSTATE_CLOSEWAIT);
2736 			complete(&service->remove_event);
2737 		} else {
2738 			/* Shutdown mid-open - let the other side know */
2739 			status = queue_message(state, service,
2740 				VCHIQ_MAKE_MSG
2741 				(VCHIQ_MSG_CLOSE,
2742 				service->localport,
2743 				VCHIQ_MSG_DSTPORT(service->remoteport)),
2744 				NULL, NULL, 0, 0);
2745 		}
2746 		break;
2747 
2748 	case VCHIQ_SRVSTATE_OPENSYNC:
2749 		mutex_lock(&state->sync_mutex);
2750 		fallthrough;
2751 	case VCHIQ_SRVSTATE_OPEN:
2752 		if (close_recvd) {
2753 			if (!do_abort_bulks(service))
2754 				status = VCHIQ_RETRY;
2755 		}
2756 
2757 		release_service_messages(service);
2758 
2759 		if (status == VCHIQ_SUCCESS)
2760 			status = queue_message(state, service,
2761 				VCHIQ_MAKE_MSG
2762 				(VCHIQ_MSG_CLOSE,
2763 				service->localport,
2764 				VCHIQ_MSG_DSTPORT(service->remoteport)),
2765 				NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2766 
2767 		if (status == VCHIQ_SUCCESS) {
2768 			if (!close_recvd) {
2769 				/* Change the state while the mutex is still held */
2770 				vchiq_set_service_state(service,
2771 							VCHIQ_SRVSTATE_CLOSESENT);
2772 				mutex_unlock(&state->slot_mutex);
2773 				if (service->sync)
2774 					mutex_unlock(&state->sync_mutex);
2775 				break;
2776 			}
2777 		} else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
2778 			mutex_unlock(&state->sync_mutex);
2779 			break;
2780 		} else
2781 			break;
2782 
2783 		/* Change the state while the mutex is still held */
2784 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2785 		mutex_unlock(&state->slot_mutex);
2786 		if (service->sync)
2787 			mutex_unlock(&state->sync_mutex);
2788 
2789 		status = close_service_complete(service,
2790 				VCHIQ_SRVSTATE_CLOSERECVD);
2791 		break;
2792 
2793 	case VCHIQ_SRVSTATE_CLOSESENT:
2794 		if (!close_recvd)
2795 			/* This happens when a process is killed mid-close */
2796 			break;
2797 
2798 		if (!do_abort_bulks(service)) {
2799 			status = VCHIQ_RETRY;
2800 			break;
2801 		}
2802 
2803 		if (status == VCHIQ_SUCCESS)
2804 			status = close_service_complete(service,
2805 				VCHIQ_SRVSTATE_CLOSERECVD);
2806 		break;
2807 
2808 	case VCHIQ_SRVSTATE_CLOSERECVD:
2809 		if (!close_recvd && is_server)
2810 			/* Force into LISTENING mode */
2811 			vchiq_set_service_state(service,
2812 				VCHIQ_SRVSTATE_LISTENING);
2813 		status = close_service_complete(service,
2814 			VCHIQ_SRVSTATE_CLOSERECVD);
2815 		break;
2816 
2817 	default:
2818 		vchiq_log_error(vchiq_core_log_level,
2819 			"%s(%d) called in state %s", __func__,
2820 			close_recvd, srvstate_names[service->srvstate]);
2821 		break;
2822 	}
2823 
2824 	return status;
2825 }
2826 
2827 /* Called from the application process upon process death */
2828 void
2829 vchiq_terminate_service_internal(struct vchiq_service *service)
2830 {
2831 	struct vchiq_state *state = service->state;
2832 
2833 	vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2834 		state->id, service->localport, service->remoteport);
2835 
2836 	mark_service_closing(service);
2837 
2838 	/* Mark the service for removal by the slot handler */
2839 	request_poll(state, service, VCHIQ_POLL_REMOVE);
2840 }
2841 
2842 /* Called from the slot handler */
2843 void
2844 vchiq_free_service_internal(struct vchiq_service *service)
2845 {
2846 	struct vchiq_state *state = service->state;
2847 
2848 	vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2849 		state->id, service->localport);
2850 
2851 	switch (service->srvstate) {
2852 	case VCHIQ_SRVSTATE_OPENING:
2853 	case VCHIQ_SRVSTATE_CLOSED:
2854 	case VCHIQ_SRVSTATE_HIDDEN:
2855 	case VCHIQ_SRVSTATE_LISTENING:
2856 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2857 		break;
2858 	default:
2859 		vchiq_log_error(vchiq_core_log_level,
2860 			"%d: fsi - (%d) in state %s",
2861 			state->id, service->localport,
2862 			srvstate_names[service->srvstate]);
2863 		return;
2864 	}
2865 
2866 	vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2867 
2868 	complete(&service->remove_event);
2869 
2870 	/* Release the initial lock */
2871 	unlock_service(service);
2872 }
2873 
2874 enum vchiq_status
2875 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2876 {
2877 	struct vchiq_service *service;
2878 	int i;
2879 
2880 	/* Find all services registered to this client and enable them. */
2881 	i = 0;
2882 	while ((service = next_service_by_instance(state, instance,
2883 		&i)) !=	NULL) {
2884 		if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2885 			vchiq_set_service_state(service,
2886 				VCHIQ_SRVSTATE_LISTENING);
2887 		unlock_service(service);
2888 	}
2889 
2890 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2891 		if (queue_message(state, NULL,
2892 			VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2893 			0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2894 			return VCHIQ_RETRY;
2895 
2896 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2897 	}
2898 
2899 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2900 		if (wait_for_completion_interruptible(&state->connect))
2901 			return VCHIQ_RETRY;
2902 
2903 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2904 		complete(&state->connect);
2905 	}
2906 
2907 	return VCHIQ_SUCCESS;
2908 }
2909 
2910 enum vchiq_status
2911 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2912 {
2913 	struct vchiq_service *service;
2914 	int i;
2915 
2916 	/* Find all services registered to this client and enable them. */
2917 	i = 0;
2918 	while ((service = next_service_by_instance(state, instance,
2919 		&i)) !=	NULL) {
2920 		(void)vchiq_remove_service(service->handle);
2921 		unlock_service(service);
2922 	}
2923 
2924 	return VCHIQ_SUCCESS;
2925 }
2926 
2927 enum vchiq_status
2928 vchiq_close_service(unsigned int handle)
2929 {
2930 	/* Unregister the service */
2931 	struct vchiq_service *service = find_service_by_handle(handle);
2932 	enum vchiq_status status = VCHIQ_SUCCESS;
2933 
2934 	if (!service)
2935 		return VCHIQ_ERROR;
2936 
2937 	vchiq_log_info(vchiq_core_log_level,
2938 		"%d: close_service:%d",
2939 		service->state->id, service->localport);
2940 
2941 	if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2942 		(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2943 		(service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2944 		unlock_service(service);
2945 		return VCHIQ_ERROR;
2946 	}
2947 
2948 	mark_service_closing(service);
2949 
2950 	if (current == service->state->slot_handler_thread) {
2951 		status = vchiq_close_service_internal(service,
2952 			0/*!close_recvd*/);
2953 		WARN_ON(status == VCHIQ_RETRY);
2954 	} else {
2955 	/* Mark the service for termination by the slot handler */
2956 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2957 	}
2958 
2959 	while (1) {
2960 		if (wait_for_completion_interruptible(&service->remove_event)) {
2961 			status = VCHIQ_RETRY;
2962 			break;
2963 		}
2964 
2965 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2966 			(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2967 			(service->srvstate == VCHIQ_SRVSTATE_OPEN))
2968 			break;
2969 
2970 		vchiq_log_warning(vchiq_core_log_level,
2971 			"%d: close_service:%d - waiting in state %s",
2972 			service->state->id, service->localport,
2973 			srvstate_names[service->srvstate]);
2974 	}
2975 
2976 	if ((status == VCHIQ_SUCCESS) &&
2977 		(service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2978 		(service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2979 		status = VCHIQ_ERROR;
2980 
2981 	unlock_service(service);
2982 
2983 	return status;
2984 }
2985 EXPORT_SYMBOL(vchiq_close_service);
2986 
2987 enum vchiq_status
2988 vchiq_remove_service(unsigned int handle)
2989 {
2990 	/* Unregister the service */
2991 	struct vchiq_service *service = find_service_by_handle(handle);
2992 	enum vchiq_status status = VCHIQ_SUCCESS;
2993 
2994 	if (!service)
2995 		return VCHIQ_ERROR;
2996 
2997 	vchiq_log_info(vchiq_core_log_level,
2998 		"%d: remove_service:%d",
2999 		service->state->id, service->localport);
3000 
3001 	if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3002 		unlock_service(service);
3003 		return VCHIQ_ERROR;
3004 	}
3005 
3006 	mark_service_closing(service);
3007 
3008 	if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3009 		(current == service->state->slot_handler_thread)) {
3010 		/*
3011 		 * Make it look like a client, because it must be removed and
3012 		 * not left in the LISTENING state.
3013 		 */
3014 		service->public_fourcc = VCHIQ_FOURCC_INVALID;
3015 
3016 		status = vchiq_close_service_internal(service,
3017 			0/*!close_recvd*/);
3018 		WARN_ON(status == VCHIQ_RETRY);
3019 	} else {
3020 		/* Mark the service for removal by the slot handler */
3021 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3022 	}
3023 	while (1) {
3024 		if (wait_for_completion_interruptible(&service->remove_event)) {
3025 			status = VCHIQ_RETRY;
3026 			break;
3027 		}
3028 
3029 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3030 			(service->srvstate == VCHIQ_SRVSTATE_OPEN))
3031 			break;
3032 
3033 		vchiq_log_warning(vchiq_core_log_level,
3034 			"%d: remove_service:%d - waiting in state %s",
3035 			service->state->id, service->localport,
3036 			srvstate_names[service->srvstate]);
3037 	}
3038 
3039 	if ((status == VCHIQ_SUCCESS) &&
3040 		(service->srvstate != VCHIQ_SRVSTATE_FREE))
3041 		status = VCHIQ_ERROR;
3042 
3043 	unlock_service(service);
3044 
3045 	return status;
3046 }
3047 
3048 /*
3049  * This function may be called by kernel threads or user threads.
3050  * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3051  * received and the call should be retried after being returned to user
3052  * context.
3053  * When called in blocking mode, the userdata field points to a bulk_waiter
3054  * structure.
3055  */
3056 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3057 				   void *offset, void __user *uoffset,
3058 				   int size, void *userdata,
3059 				   enum vchiq_bulk_mode mode,
3060 				   enum vchiq_bulk_dir dir)
3061 {
3062 	struct vchiq_service *service = find_service_by_handle(handle);
3063 	struct vchiq_bulk_queue *queue;
3064 	struct vchiq_bulk *bulk;
3065 	struct vchiq_state *state;
3066 	struct bulk_waiter *bulk_waiter = NULL;
3067 	const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3068 	const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3069 		VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3070 	enum vchiq_status status = VCHIQ_ERROR;
3071 	int payload[2];
3072 
3073 	if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
3074 	    (!offset && !uoffset) ||
3075 	    vchiq_check_service(service) != VCHIQ_SUCCESS)
3076 		goto error_exit;
3077 
3078 	switch (mode) {
3079 	case VCHIQ_BULK_MODE_NOCALLBACK:
3080 	case VCHIQ_BULK_MODE_CALLBACK:
3081 		break;
3082 	case VCHIQ_BULK_MODE_BLOCKING:
3083 		bulk_waiter = userdata;
3084 		init_completion(&bulk_waiter->event);
3085 		bulk_waiter->actual = 0;
3086 		bulk_waiter->bulk = NULL;
3087 		break;
3088 	case VCHIQ_BULK_MODE_WAITING:
3089 		bulk_waiter = userdata;
3090 		bulk = bulk_waiter->bulk;
3091 		goto waiting;
3092 	default:
3093 		goto error_exit;
3094 	}
3095 
3096 	state = service->state;
3097 
3098 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3099 		&service->bulk_tx : &service->bulk_rx;
3100 
3101 	if (mutex_lock_killable(&service->bulk_mutex)) {
3102 		status = VCHIQ_RETRY;
3103 		goto error_exit;
3104 	}
3105 
3106 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3107 		VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3108 		do {
3109 			mutex_unlock(&service->bulk_mutex);
3110 			if (wait_for_completion_interruptible(
3111 						&service->bulk_remove_event)) {
3112 				status = VCHIQ_RETRY;
3113 				goto error_exit;
3114 			}
3115 			if (mutex_lock_killable(&service->bulk_mutex)) {
3116 				status = VCHIQ_RETRY;
3117 				goto error_exit;
3118 			}
3119 		} while (queue->local_insert == queue->remove +
3120 				VCHIQ_NUM_SERVICE_BULKS);
3121 	}
3122 
3123 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3124 
3125 	bulk->mode = mode;
3126 	bulk->dir = dir;
3127 	bulk->userdata = userdata;
3128 	bulk->size = size;
3129 	bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3130 
3131 	if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir)
3132 			!= VCHIQ_SUCCESS)
3133 		goto unlock_error_exit;
3134 
3135 	wmb();
3136 
3137 	vchiq_log_info(vchiq_core_log_level,
3138 		"%d: bt (%d->%d) %cx %x@%pad %pK",
3139 		state->id, service->localport, service->remoteport, dir_char,
3140 		size, &bulk->data, userdata);
3141 
3142 	/*
3143 	 * The slot mutex must be held when the service is being closed, so
3144 	 * claim it here to ensure that isn't happening
3145 	 */
3146 	if (mutex_lock_killable(&state->slot_mutex)) {
3147 		status = VCHIQ_RETRY;
3148 		goto cancel_bulk_error_exit;
3149 	}
3150 
3151 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3152 		goto unlock_both_error_exit;
3153 
3154 	payload[0] = lower_32_bits(bulk->data);
3155 	payload[1] = bulk->size;
3156 	status = queue_message(state,
3157 			       NULL,
3158 			       VCHIQ_MAKE_MSG(dir_msgtype,
3159 					      service->localport,
3160 					      service->remoteport),
3161 			       memcpy_copy_callback,
3162 			       &payload,
3163 			       sizeof(payload),
3164 			       QMFLAGS_IS_BLOCKING |
3165 			       QMFLAGS_NO_MUTEX_LOCK |
3166 			       QMFLAGS_NO_MUTEX_UNLOCK);
3167 	if (status != VCHIQ_SUCCESS)
3168 		goto unlock_both_error_exit;
3169 
3170 	queue->local_insert++;
3171 
3172 	mutex_unlock(&state->slot_mutex);
3173 	mutex_unlock(&service->bulk_mutex);
3174 
3175 	vchiq_log_trace(vchiq_core_log_level,
3176 		"%d: bt:%d %cx li=%x ri=%x p=%x",
3177 		state->id,
3178 		service->localport, dir_char,
3179 		queue->local_insert, queue->remote_insert, queue->process);
3180 
3181 waiting:
3182 	unlock_service(service);
3183 
3184 	status = VCHIQ_SUCCESS;
3185 
3186 	if (bulk_waiter) {
3187 		bulk_waiter->bulk = bulk;
3188 		if (wait_for_completion_interruptible(&bulk_waiter->event))
3189 			status = VCHIQ_RETRY;
3190 		else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3191 			status = VCHIQ_ERROR;
3192 	}
3193 
3194 	return status;
3195 
3196 unlock_both_error_exit:
3197 	mutex_unlock(&state->slot_mutex);
3198 cancel_bulk_error_exit:
3199 	vchiq_complete_bulk(bulk);
3200 unlock_error_exit:
3201 	mutex_unlock(&service->bulk_mutex);
3202 
3203 error_exit:
3204 	if (service)
3205 		unlock_service(service);
3206 	return status;
3207 }
3208 
3209 enum vchiq_status
3210 vchiq_queue_message(unsigned int handle,
3211 		    ssize_t (*copy_callback)(void *context, void *dest,
3212 					     size_t offset, size_t maxsize),
3213 		    void *context,
3214 		    size_t size)
3215 {
3216 	struct vchiq_service *service = find_service_by_handle(handle);
3217 	enum vchiq_status status = VCHIQ_ERROR;
3218 
3219 	if (!service ||
3220 		(vchiq_check_service(service) != VCHIQ_SUCCESS))
3221 		goto error_exit;
3222 
3223 	if (!size) {
3224 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3225 		goto error_exit;
3226 
3227 	}
3228 
3229 	if (size > VCHIQ_MAX_MSG_SIZE) {
3230 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3231 		goto error_exit;
3232 	}
3233 
3234 	switch (service->srvstate) {
3235 	case VCHIQ_SRVSTATE_OPEN:
3236 		status = queue_message(service->state, service,
3237 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3238 					service->localport,
3239 					service->remoteport),
3240 				copy_callback, context, size, 1);
3241 		break;
3242 	case VCHIQ_SRVSTATE_OPENSYNC:
3243 		status = queue_message_sync(service->state, service,
3244 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3245 					service->localport,
3246 					service->remoteport),
3247 				copy_callback, context, size, 1);
3248 		break;
3249 	default:
3250 		status = VCHIQ_ERROR;
3251 		break;
3252 	}
3253 
3254 error_exit:
3255 	if (service)
3256 		unlock_service(service);
3257 
3258 	return status;
3259 }
3260 
3261 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3262 {
3263 	enum vchiq_status status;
3264 
3265 	while (1) {
3266 		status = vchiq_queue_message(handle, memcpy_copy_callback,
3267 					     data, size);
3268 
3269 		/*
3270 		 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3271 		 * implement a retry mechanism since this function is supposed
3272 		 * to block until queued
3273 		 */
3274 		if (status != VCHIQ_RETRY)
3275 			break;
3276 
3277 		msleep(1);
3278 	}
3279 
3280 	return status;
3281 }
3282 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3283 
3284 void
3285 vchiq_release_message(unsigned int handle,
3286 		      struct vchiq_header *header)
3287 {
3288 	struct vchiq_service *service = find_service_by_handle(handle);
3289 	struct vchiq_shared_state *remote;
3290 	struct vchiq_state *state;
3291 	int slot_index;
3292 
3293 	if (!service)
3294 		return;
3295 
3296 	state = service->state;
3297 	remote = state->remote;
3298 
3299 	slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3300 
3301 	if ((slot_index >= remote->slot_first) &&
3302 		(slot_index <= remote->slot_last)) {
3303 		int msgid = header->msgid;
3304 
3305 		if (msgid & VCHIQ_MSGID_CLAIMED) {
3306 			struct vchiq_slot_info *slot_info =
3307 				SLOT_INFO_FROM_INDEX(state, slot_index);
3308 
3309 			release_slot(state, slot_info, header, service);
3310 		}
3311 	} else if (slot_index == remote->slot_sync)
3312 		release_message_sync(state, header);
3313 
3314 	unlock_service(service);
3315 }
3316 EXPORT_SYMBOL(vchiq_release_message);
3317 
3318 static void
3319 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3320 {
3321 	header->msgid = VCHIQ_MSGID_PADDING;
3322 	remote_event_signal(&state->remote->sync_release);
3323 }
3324 
3325 enum vchiq_status
3326 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3327 {
3328 	enum vchiq_status status = VCHIQ_ERROR;
3329 	struct vchiq_service *service = find_service_by_handle(handle);
3330 
3331 	if (!service ||
3332 	    (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
3333 	    !peer_version)
3334 		goto exit;
3335 	*peer_version = service->peer_version;
3336 	status = VCHIQ_SUCCESS;
3337 
3338 exit:
3339 	if (service)
3340 		unlock_service(service);
3341 	return status;
3342 }
3343 EXPORT_SYMBOL(vchiq_get_peer_version);
3344 
3345 void vchiq_get_config(struct vchiq_config *config)
3346 {
3347 	config->max_msg_size           = VCHIQ_MAX_MSG_SIZE;
3348 	config->bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
3349 	config->max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
3350 	config->max_services           = VCHIQ_MAX_SERVICES;
3351 	config->version                = VCHIQ_VERSION;
3352 	config->version_min            = VCHIQ_VERSION_MIN;
3353 }
3354 
3355 enum vchiq_status
3356 vchiq_set_service_option(unsigned int handle,
3357 	enum vchiq_service_option option, int value)
3358 {
3359 	struct vchiq_service *service = find_service_by_handle(handle);
3360 	enum vchiq_status status = VCHIQ_ERROR;
3361 
3362 	if (service) {
3363 		switch (option) {
3364 		case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3365 			service->auto_close = value;
3366 			status = VCHIQ_SUCCESS;
3367 			break;
3368 
3369 		case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
3370 			struct vchiq_service_quota *service_quota =
3371 				&service->state->service_quotas[
3372 					service->localport];
3373 			if (value == 0)
3374 				value = service->state->default_slot_quota;
3375 			if ((value >= service_quota->slot_use_count) &&
3376 				 (value < (unsigned short)~0)) {
3377 				service_quota->slot_quota = value;
3378 				if ((value >= service_quota->slot_use_count) &&
3379 					(service_quota->message_quota >=
3380 					 service_quota->message_use_count)) {
3381 					/*
3382 					 * Signal the service that it may have
3383 					 * dropped below its quota
3384 					 */
3385 					complete(&service_quota->quota_event);
3386 				}
3387 				status = VCHIQ_SUCCESS;
3388 			}
3389 		} break;
3390 
3391 		case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
3392 			struct vchiq_service_quota *service_quota =
3393 				&service->state->service_quotas[
3394 					service->localport];
3395 			if (value == 0)
3396 				value = service->state->default_message_quota;
3397 			if ((value >= service_quota->message_use_count) &&
3398 				 (value < (unsigned short)~0)) {
3399 				service_quota->message_quota = value;
3400 				if ((value >=
3401 					service_quota->message_use_count) &&
3402 					(service_quota->slot_quota >=
3403 					service_quota->slot_use_count))
3404 					/*
3405 					 * Signal the service that it may have
3406 					 * dropped below its quota
3407 					 */
3408 					complete(&service_quota->quota_event);
3409 				status = VCHIQ_SUCCESS;
3410 			}
3411 		} break;
3412 
3413 		case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3414 			if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3415 				(service->srvstate ==
3416 				VCHIQ_SRVSTATE_LISTENING)) {
3417 				service->sync = value;
3418 				status = VCHIQ_SUCCESS;
3419 			}
3420 			break;
3421 
3422 		case VCHIQ_SERVICE_OPTION_TRACE:
3423 			service->trace = value;
3424 			status = VCHIQ_SUCCESS;
3425 			break;
3426 
3427 		default:
3428 			break;
3429 		}
3430 		unlock_service(service);
3431 	}
3432 
3433 	return status;
3434 }
3435 
3436 static int
3437 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3438 			struct vchiq_shared_state *shared, const char *label)
3439 {
3440 	static const char *const debug_names[] = {
3441 		"<entries>",
3442 		"SLOT_HANDLER_COUNT",
3443 		"SLOT_HANDLER_LINE",
3444 		"PARSE_LINE",
3445 		"PARSE_HEADER",
3446 		"PARSE_MSGID",
3447 		"AWAIT_COMPLETION_LINE",
3448 		"DEQUEUE_MESSAGE_LINE",
3449 		"SERVICE_CALLBACK_LINE",
3450 		"MSG_QUEUE_FULL_COUNT",
3451 		"COMPLETION_QUEUE_FULL_COUNT"
3452 	};
3453 	int i;
3454 	char buf[80];
3455 	int len;
3456 	int err;
3457 
3458 	len = scnprintf(buf, sizeof(buf),
3459 		"  %s: slots %d-%d tx_pos=%x recycle=%x",
3460 		label, shared->slot_first, shared->slot_last,
3461 		shared->tx_pos, shared->slot_queue_recycle);
3462 	err = vchiq_dump(dump_context, buf, len + 1);
3463 	if (err)
3464 		return err;
3465 
3466 	len = scnprintf(buf, sizeof(buf),
3467 		"    Slots claimed:");
3468 	err = vchiq_dump(dump_context, buf, len + 1);
3469 	if (err)
3470 		return err;
3471 
3472 	for (i = shared->slot_first; i <= shared->slot_last; i++) {
3473 		struct vchiq_slot_info slot_info =
3474 						*SLOT_INFO_FROM_INDEX(state, i);
3475 		if (slot_info.use_count != slot_info.release_count) {
3476 			len = scnprintf(buf, sizeof(buf),
3477 				"      %d: %d/%d", i, slot_info.use_count,
3478 				slot_info.release_count);
3479 			err = vchiq_dump(dump_context, buf, len + 1);
3480 			if (err)
3481 				return err;
3482 		}
3483 	}
3484 
3485 	for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3486 		len = scnprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
3487 			debug_names[i], shared->debug[i], shared->debug[i]);
3488 		err = vchiq_dump(dump_context, buf, len + 1);
3489 		if (err)
3490 			return err;
3491 	}
3492 	return 0;
3493 }
3494 
3495 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3496 {
3497 	char buf[80];
3498 	int len;
3499 	int i;
3500 	int err;
3501 
3502 	len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3503 		conn_state_names[state->conn_state]);
3504 	err = vchiq_dump(dump_context, buf, len + 1);
3505 	if (err)
3506 		return err;
3507 
3508 	len = scnprintf(buf, sizeof(buf),
3509 		"  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3510 		state->local->tx_pos,
3511 		state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3512 		state->rx_pos,
3513 		state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3514 	err = vchiq_dump(dump_context, buf, len + 1);
3515 	if (err)
3516 		return err;
3517 
3518 	len = scnprintf(buf, sizeof(buf),
3519 		"  Version: %d (min %d)",
3520 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3521 	err = vchiq_dump(dump_context, buf, len + 1);
3522 	if (err)
3523 		return err;
3524 
3525 	if (VCHIQ_ENABLE_STATS) {
3526 		len = scnprintf(buf, sizeof(buf),
3527 			"  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3528 			state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3529 			state->stats.error_count);
3530 		err = vchiq_dump(dump_context, buf, len + 1);
3531 		if (err)
3532 			return err;
3533 	}
3534 
3535 	len = scnprintf(buf, sizeof(buf),
3536 		"  Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3537 		((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3538 			state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3539 		state->data_quota - state->data_use_count,
3540 		state->local->slot_queue_recycle - state->slot_queue_available,
3541 		state->stats.slot_stalls, state->stats.data_stalls);
3542 	err = vchiq_dump(dump_context, buf, len + 1);
3543 	if (err)
3544 		return err;
3545 
3546 	err = vchiq_dump_platform_state(dump_context);
3547 	if (err)
3548 		return err;
3549 
3550 	err = vchiq_dump_shared_state(dump_context,
3551 				      state,
3552 				      state->local,
3553 				      "Local");
3554 	if (err)
3555 		return err;
3556 	err = vchiq_dump_shared_state(dump_context,
3557 				      state,
3558 				      state->remote,
3559 				      "Remote");
3560 	if (err)
3561 		return err;
3562 
3563 	err = vchiq_dump_platform_instances(dump_context);
3564 	if (err)
3565 		return err;
3566 
3567 	for (i = 0; i < state->unused_service; i++) {
3568 		struct vchiq_service *service = find_service_by_port(state, i);
3569 
3570 		if (service) {
3571 			err = vchiq_dump_service_state(dump_context, service);
3572 			unlock_service(service);
3573 			if (err)
3574 				return err;
3575 		}
3576 	}
3577 	return 0;
3578 }
3579 
3580 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3581 {
3582 	char buf[80];
3583 	int len;
3584 	int err;
3585 	unsigned int ref_count;
3586 
3587 	/*Don't include the lock just taken*/
3588 	ref_count = kref_read(&service->ref_count) - 1;
3589 	len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3590 			service->localport, srvstate_names[service->srvstate],
3591 			ref_count);
3592 
3593 	if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3594 		char remoteport[30];
3595 		struct vchiq_service_quota *service_quota =
3596 			&service->state->service_quotas[service->localport];
3597 		int fourcc = service->base.fourcc;
3598 		int tx_pending, rx_pending;
3599 
3600 		if (service->remoteport != VCHIQ_PORT_FREE) {
3601 			int len2 = scnprintf(remoteport, sizeof(remoteport),
3602 				"%u", service->remoteport);
3603 
3604 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3605 				scnprintf(remoteport + len2,
3606 					sizeof(remoteport) - len2,
3607 					" (client %x)", service->client_id);
3608 		} else
3609 			strcpy(remoteport, "n/a");
3610 
3611 		len += scnprintf(buf + len, sizeof(buf) - len,
3612 			" '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3613 			VCHIQ_FOURCC_AS_4CHARS(fourcc),
3614 			remoteport,
3615 			service_quota->message_use_count,
3616 			service_quota->message_quota,
3617 			service_quota->slot_use_count,
3618 			service_quota->slot_quota);
3619 
3620 		err = vchiq_dump(dump_context, buf, len + 1);
3621 		if (err)
3622 			return err;
3623 
3624 		tx_pending = service->bulk_tx.local_insert -
3625 			service->bulk_tx.remote_insert;
3626 
3627 		rx_pending = service->bulk_rx.local_insert -
3628 			service->bulk_rx.remote_insert;
3629 
3630 		len = scnprintf(buf, sizeof(buf),
3631 			"  Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3632 			tx_pending,
3633 			tx_pending ? service->bulk_tx.bulks[
3634 			BULK_INDEX(service->bulk_tx.remove)].size : 0,
3635 			rx_pending,
3636 			rx_pending ? service->bulk_rx.bulks[
3637 			BULK_INDEX(service->bulk_rx.remove)].size : 0);
3638 
3639 		if (VCHIQ_ENABLE_STATS) {
3640 			err = vchiq_dump(dump_context, buf, len + 1);
3641 			if (err)
3642 				return err;
3643 
3644 			len = scnprintf(buf, sizeof(buf),
3645 				"  Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3646 				service->stats.ctrl_tx_count,
3647 				service->stats.ctrl_tx_bytes,
3648 				service->stats.ctrl_rx_count,
3649 				service->stats.ctrl_rx_bytes);
3650 			err = vchiq_dump(dump_context, buf, len + 1);
3651 			if (err)
3652 				return err;
3653 
3654 			len = scnprintf(buf, sizeof(buf),
3655 				"  Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3656 				service->stats.bulk_tx_count,
3657 				service->stats.bulk_tx_bytes,
3658 				service->stats.bulk_rx_count,
3659 				service->stats.bulk_rx_bytes);
3660 			err = vchiq_dump(dump_context, buf, len + 1);
3661 			if (err)
3662 				return err;
3663 
3664 			len = scnprintf(buf, sizeof(buf),
3665 				"  %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3666 				service->stats.quota_stalls,
3667 				service->stats.slot_stalls,
3668 				service->stats.bulk_stalls,
3669 				service->stats.bulk_aborted_count,
3670 				service->stats.error_count);
3671 		}
3672 	}
3673 
3674 	err = vchiq_dump(dump_context, buf, len + 1);
3675 	if (err)
3676 		return err;
3677 
3678 	if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3679 		err = vchiq_dump_platform_service_state(dump_context, service);
3680 	return err;
3681 }
3682 
3683 void
3684 vchiq_loud_error_header(void)
3685 {
3686 	vchiq_log_error(vchiq_core_log_level,
3687 		"============================================================================");
3688 	vchiq_log_error(vchiq_core_log_level,
3689 		"============================================================================");
3690 	vchiq_log_error(vchiq_core_log_level, "=====");
3691 }
3692 
3693 void
3694 vchiq_loud_error_footer(void)
3695 {
3696 	vchiq_log_error(vchiq_core_log_level, "=====");
3697 	vchiq_log_error(vchiq_core_log_level,
3698 		"============================================================================");
3699 	vchiq_log_error(vchiq_core_log_level,
3700 		"============================================================================");
3701 }
3702 
3703 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3704 {
3705 	enum vchiq_status status = VCHIQ_RETRY;
3706 
3707 	if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3708 		status = queue_message(state, NULL,
3709 			VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3710 			NULL, NULL, 0, 0);
3711 	return status;
3712 }
3713 
3714 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3715 {
3716 	enum vchiq_status status = VCHIQ_RETRY;
3717 
3718 	if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3719 		status = queue_message(state, NULL,
3720 			VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3721 			NULL, NULL, 0, 0);
3722 	return status;
3723 }
3724 
3725 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3726 	size_t num_bytes)
3727 {
3728 	const u8  *mem = void_mem;
3729 	size_t          offset;
3730 	char            line_buf[100];
3731 	char           *s;
3732 
3733 	while (num_bytes > 0) {
3734 		s = line_buf;
3735 
3736 		for (offset = 0; offset < 16; offset++) {
3737 			if (offset < num_bytes)
3738 				s += scnprintf(s, 4, "%02x ", mem[offset]);
3739 			else
3740 				s += scnprintf(s, 4, "   ");
3741 		}
3742 
3743 		for (offset = 0; offset < 16; offset++) {
3744 			if (offset < num_bytes) {
3745 				u8 ch = mem[offset];
3746 
3747 				if ((ch < ' ') || (ch > '~'))
3748 					ch = '.';
3749 				*s++ = (char)ch;
3750 			}
3751 		}
3752 		*s++ = '\0';
3753 
3754 		if (label && (*label != '\0'))
3755 			vchiq_log_trace(VCHIQ_LOG_TRACE,
3756 				"%s: %08x: %s", label, addr, line_buf);
3757 		else
3758 			vchiq_log_trace(VCHIQ_LOG_TRACE,
3759 				"%08x: %s", addr, line_buf);
3760 
3761 		addr += 16;
3762 		mem += 16;
3763 		if (num_bytes > 16)
3764 			num_bytes -= 16;
3765 		else
3766 			num_bytes = 0;
3767 	}
3768 }
3769