1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #ifndef VCHIQ_CORE_H
5 #define VCHIQ_CORE_H
6 
7 #include <linux/mutex.h>
8 #include <linux/completion.h>
9 #include <linux/kthread.h>
10 #include <linux/kref.h>
11 #include <linux/rcupdate.h>
12 #include <linux/wait.h>
13 #include <linux/raspberrypi/vchiq.h>
14 
15 #include "vchiq_cfg.h"
16 
17 /* Do this so that we can test-build the code on non-rpi systems */
18 #if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
19 
20 #else
21 
22 #ifndef dsb
23 #define dsb(a)
24 #endif
25 
26 #endif	/* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
27 
28 #define VCHIQ_SERVICE_HANDLE_INVALID 0
29 
30 #define VCHIQ_SLOT_SIZE     4096
31 #define VCHIQ_MAX_MSG_SIZE  (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
32 
33 /* Run time control of log level, based on KERN_XXX level. */
34 #define VCHIQ_LOG_DEFAULT  4
35 #define VCHIQ_LOG_ERROR    3
36 #define VCHIQ_LOG_WARNING  4
37 #define VCHIQ_LOG_INFO     6
38 #define VCHIQ_LOG_TRACE    7
39 
40 #define VCHIQ_LOG_PREFIX   KERN_INFO "vchiq: "
41 
42 #ifndef vchiq_log_error
43 #define vchiq_log_error(cat, fmt, ...) \
44 	do { if (cat >= VCHIQ_LOG_ERROR) \
45 		printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
46 #endif
47 #ifndef vchiq_log_warning
48 #define vchiq_log_warning(cat, fmt, ...) \
49 	do { if (cat >= VCHIQ_LOG_WARNING) \
50 		 printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
51 #endif
52 #ifndef vchiq_log_info
53 #define vchiq_log_info(cat, fmt, ...) \
54 	do { if (cat >= VCHIQ_LOG_INFO) \
55 		printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
56 #endif
57 #ifndef vchiq_log_trace
58 #define vchiq_log_trace(cat, fmt, ...) \
59 	do { if (cat >= VCHIQ_LOG_TRACE) \
60 		printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
61 #endif
62 
63 #define vchiq_loud_error(...) \
64 	vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
65 
66 #define VCHIQ_SLOT_MASK        (VCHIQ_SLOT_SIZE - 1)
67 #define VCHIQ_SLOT_QUEUE_MASK  (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
68 #define VCHIQ_SLOT_ZERO_SLOTS  DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
69 					    VCHIQ_SLOT_SIZE)
70 
71 #define VCHIQ_FOURCC_AS_4CHARS(fourcc)	\
72 	((fourcc) >> 24) & 0xff, \
73 	((fourcc) >> 16) & 0xff, \
74 	((fourcc) >>  8) & 0xff, \
75 	(fourcc) & 0xff
76 
77 #define BITSET_SIZE(b)        ((b + 31) >> 5)
78 #define BITSET_WORD(b)        (b >> 5)
79 #define BITSET_BIT(b)         (1 << (b & 31))
80 #define BITSET_IS_SET(bs, b)  (bs[BITSET_WORD(b)] & BITSET_BIT(b))
81 #define BITSET_SET(bs, b)     (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
82 
83 enum {
84 	DEBUG_ENTRIES,
85 #if VCHIQ_ENABLE_DEBUG
86 	DEBUG_SLOT_HANDLER_COUNT,
87 	DEBUG_SLOT_HANDLER_LINE,
88 	DEBUG_PARSE_LINE,
89 	DEBUG_PARSE_HEADER,
90 	DEBUG_PARSE_MSGID,
91 	DEBUG_AWAIT_COMPLETION_LINE,
92 	DEBUG_DEQUEUE_MESSAGE_LINE,
93 	DEBUG_SERVICE_CALLBACK_LINE,
94 	DEBUG_MSG_QUEUE_FULL_COUNT,
95 	DEBUG_COMPLETION_QUEUE_FULL_COUNT,
96 #endif
97 	DEBUG_MAX
98 };
99 
100 #if VCHIQ_ENABLE_DEBUG
101 
102 #define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug
103 #define DEBUG_TRACE(d) \
104 	do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
105 #define DEBUG_VALUE(d, v) \
106 	do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
107 #define DEBUG_COUNT(d) \
108 	do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
109 
110 #else /* VCHIQ_ENABLE_DEBUG */
111 
112 #define DEBUG_INITIALISE(local)
113 #define DEBUG_TRACE(d)
114 #define DEBUG_VALUE(d, v)
115 #define DEBUG_COUNT(d)
116 
117 #endif /* VCHIQ_ENABLE_DEBUG */
118 
119 enum vchiq_connstate {
120 	VCHIQ_CONNSTATE_DISCONNECTED,
121 	VCHIQ_CONNSTATE_CONNECTING,
122 	VCHIQ_CONNSTATE_CONNECTED,
123 	VCHIQ_CONNSTATE_PAUSING,
124 	VCHIQ_CONNSTATE_PAUSE_SENT,
125 	VCHIQ_CONNSTATE_PAUSED,
126 	VCHIQ_CONNSTATE_RESUMING,
127 	VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
128 	VCHIQ_CONNSTATE_RESUME_TIMEOUT
129 };
130 
131 enum {
132 	VCHIQ_SRVSTATE_FREE,
133 	VCHIQ_SRVSTATE_HIDDEN,
134 	VCHIQ_SRVSTATE_LISTENING,
135 	VCHIQ_SRVSTATE_OPENING,
136 	VCHIQ_SRVSTATE_OPEN,
137 	VCHIQ_SRVSTATE_OPENSYNC,
138 	VCHIQ_SRVSTATE_CLOSESENT,
139 	VCHIQ_SRVSTATE_CLOSERECVD,
140 	VCHIQ_SRVSTATE_CLOSEWAIT,
141 	VCHIQ_SRVSTATE_CLOSED
142 };
143 
144 enum vchiq_bulk_dir {
145 	VCHIQ_BULK_TRANSMIT,
146 	VCHIQ_BULK_RECEIVE
147 };
148 
149 struct vchiq_bulk {
150 	short mode;
151 	short dir;
152 	void *userdata;
153 	dma_addr_t data;
154 	int size;
155 	void *remote_data;
156 	int remote_size;
157 	int actual;
158 };
159 
160 struct vchiq_bulk_queue {
161 	int local_insert;  /* Where to insert the next local bulk */
162 	int remote_insert; /* Where to insert the next remote bulk (master) */
163 	int process;       /* Bulk to transfer next */
164 	int remote_notify; /* Bulk to notify the remote client of next (mstr) */
165 	int remove;        /* Bulk to notify the local client of, and remove, next */
166 	struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS];
167 };
168 
169 struct remote_event {
170 	int armed;
171 	int fired;
172 	u32 __unused;
173 };
174 
175 struct opaque_platform_state;
176 
177 struct vchiq_slot {
178 	char data[VCHIQ_SLOT_SIZE];
179 };
180 
181 struct vchiq_slot_info {
182 	/* Use two counters rather than one to avoid the need for a mutex. */
183 	short use_count;
184 	short release_count;
185 };
186 
187 struct vchiq_service {
188 	struct vchiq_service_base base;
189 	unsigned int handle;
190 	struct kref ref_count;
191 	struct rcu_head rcu;
192 	int srvstate;
193 	void (*userdata_term)(void *userdata);
194 	unsigned int localport;
195 	unsigned int remoteport;
196 	int public_fourcc;
197 	int client_id;
198 	char auto_close;
199 	char sync;
200 	char closing;
201 	char trace;
202 	atomic_t poll_flags;
203 	short version;
204 	short version_min;
205 	short peer_version;
206 
207 	struct vchiq_state *state;
208 	struct vchiq_instance *instance;
209 
210 	int service_use_count;
211 
212 	struct vchiq_bulk_queue bulk_tx;
213 	struct vchiq_bulk_queue bulk_rx;
214 
215 	struct completion remove_event;
216 	struct completion bulk_remove_event;
217 	struct mutex bulk_mutex;
218 
219 	struct service_stats_struct {
220 		int quota_stalls;
221 		int slot_stalls;
222 		int bulk_stalls;
223 		int error_count;
224 		int ctrl_tx_count;
225 		int ctrl_rx_count;
226 		int bulk_tx_count;
227 		int bulk_rx_count;
228 		int bulk_aborted_count;
229 		u64 ctrl_tx_bytes;
230 		u64 ctrl_rx_bytes;
231 		u64 bulk_tx_bytes;
232 		u64 bulk_rx_bytes;
233 	} stats;
234 
235 	int msg_queue_read;
236 	int msg_queue_write;
237 	struct completion msg_queue_pop;
238 	struct completion msg_queue_push;
239 	struct vchiq_header *msg_queue[VCHIQ_MAX_SLOTS];
240 };
241 
242 /*
243  * The quota information is outside struct vchiq_service so that it can
244  * be statically allocated, since for accounting reasons a service's slot
245  * usage is carried over between users of the same port number.
246  */
247 struct vchiq_service_quota {
248 	unsigned short slot_quota;
249 	unsigned short slot_use_count;
250 	unsigned short message_quota;
251 	unsigned short message_use_count;
252 	struct completion quota_event;
253 	int previous_tx_index;
254 };
255 
256 struct vchiq_shared_state {
257 	/* A non-zero value here indicates that the content is valid. */
258 	int initialised;
259 
260 	/* The first and last (inclusive) slots allocated to the owner. */
261 	int slot_first;
262 	int slot_last;
263 
264 	/* The slot allocated to synchronous messages from the owner. */
265 	int slot_sync;
266 
267 	/*
268 	 * Signalling this event indicates that owner's slot handler thread
269 	 * should run.
270 	 */
271 	struct remote_event trigger;
272 
273 	/*
274 	 * Indicates the byte position within the stream where the next message
275 	 * will be written. The least significant bits are an index into the
276 	 * slot. The next bits are the index of the slot in slot_queue.
277 	 */
278 	int tx_pos;
279 
280 	/* This event should be signalled when a slot is recycled. */
281 	struct remote_event recycle;
282 
283 	/* The slot_queue index where the next recycled slot will be written. */
284 	int slot_queue_recycle;
285 
286 	/* This event should be signalled when a synchronous message is sent. */
287 	struct remote_event sync_trigger;
288 
289 	/*
290 	 * This event should be signalled when a synchronous message has been
291 	 * released.
292 	 */
293 	struct remote_event sync_release;
294 
295 	/* A circular buffer of slot indexes. */
296 	int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
297 
298 	/* Debugging state */
299 	int debug[DEBUG_MAX];
300 };
301 
302 struct vchiq_slot_zero {
303 	int magic;
304 	short version;
305 	short version_min;
306 	int slot_zero_size;
307 	int slot_size;
308 	int max_slots;
309 	int max_slots_per_side;
310 	int platform_data[2];
311 	struct vchiq_shared_state master;
312 	struct vchiq_shared_state slave;
313 	struct vchiq_slot_info slots[VCHIQ_MAX_SLOTS];
314 };
315 
316 struct vchiq_state {
317 	struct device *dev;
318 	int id;
319 	int initialised;
320 	enum vchiq_connstate conn_state;
321 	short version_common;
322 
323 	struct vchiq_shared_state *local;
324 	struct vchiq_shared_state *remote;
325 	struct vchiq_slot *slot_data;
326 
327 	unsigned short default_slot_quota;
328 	unsigned short default_message_quota;
329 
330 	/* Event indicating connect message received */
331 	struct completion connect;
332 
333 	/* Mutex protecting services */
334 	struct mutex mutex;
335 	struct vchiq_instance **instance;
336 
337 	/* Processes incoming messages */
338 	struct task_struct *slot_handler_thread;
339 
340 	/* Processes recycled slots */
341 	struct task_struct *recycle_thread;
342 
343 	/* Processes synchronous messages */
344 	struct task_struct *sync_thread;
345 
346 	/* Local implementation of the trigger remote event */
347 	wait_queue_head_t trigger_event;
348 
349 	/* Local implementation of the recycle remote event */
350 	wait_queue_head_t recycle_event;
351 
352 	/* Local implementation of the sync trigger remote event */
353 	wait_queue_head_t sync_trigger_event;
354 
355 	/* Local implementation of the sync release remote event */
356 	wait_queue_head_t sync_release_event;
357 
358 	char *tx_data;
359 	char *rx_data;
360 	struct vchiq_slot_info *rx_info;
361 
362 	struct mutex slot_mutex;
363 
364 	struct mutex recycle_mutex;
365 
366 	struct mutex sync_mutex;
367 
368 	struct mutex bulk_transfer_mutex;
369 
370 	/*
371 	 * Indicates the byte position within the stream from where the next
372 	 * message will be read. The least significant bits are an index into
373 	 * the slot.The next bits are the index of the slot in
374 	 * remote->slot_queue.
375 	 */
376 	int rx_pos;
377 
378 	/*
379 	 * A cached copy of local->tx_pos. Only write to local->tx_pos, and read
380 	 * from remote->tx_pos.
381 	 */
382 	int local_tx_pos;
383 
384 	/* The slot_queue index of the slot to become available next. */
385 	int slot_queue_available;
386 
387 	/* A flag to indicate if any poll has been requested */
388 	int poll_needed;
389 
390 	/* Ths index of the previous slot used for data messages. */
391 	int previous_data_index;
392 
393 	/* The number of slots occupied by data messages. */
394 	unsigned short data_use_count;
395 
396 	/* The maximum number of slots to be occupied by data messages. */
397 	unsigned short data_quota;
398 
399 	/* An array of bit sets indicating which services must be polled. */
400 	atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
401 
402 	/* The number of the first unused service */
403 	int unused_service;
404 
405 	/* Signalled when a free slot becomes available. */
406 	struct completion slot_available_event;
407 
408 	struct completion slot_remove_event;
409 
410 	/* Signalled when a free data slot becomes available. */
411 	struct completion data_quota_event;
412 
413 	struct state_stats_struct {
414 		int slot_stalls;
415 		int data_stalls;
416 		int ctrl_tx_count;
417 		int ctrl_rx_count;
418 		int error_count;
419 	} stats;
420 
421 	struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES];
422 	struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
423 	struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
424 
425 	struct opaque_platform_state *platform_state;
426 };
427 
428 struct bulk_waiter {
429 	struct vchiq_bulk *bulk;
430 	struct completion event;
431 	int actual;
432 };
433 
434 struct vchiq_config {
435 	unsigned int max_msg_size;
436 	unsigned int bulk_threshold;	/* The message size above which it
437 					 * is better to use a bulk transfer
438 					 * (<= max_msg_size)
439 					 */
440 	unsigned int max_outstanding_bulks;
441 	unsigned int max_services;
442 	short version;      /* The version of VCHIQ */
443 	short version_min;  /* The minimum compatible version of VCHIQ */
444 };
445 
446 extern spinlock_t bulk_waiter_spinlock;
447 
448 extern int vchiq_core_log_level;
449 extern int vchiq_core_msg_log_level;
450 extern int vchiq_sync_log_level;
451 
452 extern const char *
453 get_conn_state_name(enum vchiq_connstate conn_state);
454 
455 extern struct vchiq_slot_zero *
456 vchiq_init_slots(void *mem_base, int mem_size);
457 
458 extern int
459 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
460 
461 extern enum vchiq_status
462 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
463 
464 struct vchiq_service *
465 vchiq_add_service_internal(struct vchiq_state *state,
466 			   const struct vchiq_service_params_kernel *params,
467 			   int srvstate, struct vchiq_instance *instance,
468 			   void (*userdata_term)(void *userdata));
469 
470 extern enum vchiq_status
471 vchiq_open_service_internal(struct vchiq_service *service, int client_id);
472 
473 extern enum vchiq_status
474 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
475 
476 extern void
477 vchiq_terminate_service_internal(struct vchiq_service *service);
478 
479 extern void
480 vchiq_free_service_internal(struct vchiq_service *service);
481 
482 extern void
483 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
484 
485 extern void
486 remote_event_pollall(struct vchiq_state *state);
487 
488 extern enum vchiq_status
489 vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *offset,
490 		    void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode,
491 		    enum vchiq_bulk_dir dir);
492 
493 extern int
494 vchiq_dump_state(void *dump_context, struct vchiq_state *state);
495 
496 extern int
497 vchiq_dump_service_state(void *dump_context, struct vchiq_service *service);
498 
499 extern void
500 vchiq_loud_error_header(void);
501 
502 extern void
503 vchiq_loud_error_footer(void);
504 
505 extern void
506 request_poll(struct vchiq_state *state, struct vchiq_service *service,
507 	     int poll_type);
508 
509 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle);
510 
511 extern struct vchiq_service *
512 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle);
513 
514 extern struct vchiq_service *
515 find_service_by_port(struct vchiq_state *state, unsigned int localport);
516 
517 extern struct vchiq_service *
518 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
519 
520 extern struct vchiq_service *
521 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
522 
523 extern struct vchiq_service *
524 __next_service_by_instance(struct vchiq_state *state,
525 			   struct vchiq_instance *instance,
526 			   int *pidx);
527 
528 extern struct vchiq_service *
529 next_service_by_instance(struct vchiq_state *state,
530 			 struct vchiq_instance *instance,
531 			 int *pidx);
532 
533 extern void
534 vchiq_service_get(struct vchiq_service *service);
535 
536 extern void
537 vchiq_service_put(struct vchiq_service *service);
538 
539 extern enum vchiq_status
540 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
541 		    ssize_t (*copy_callback)(void *context, void *dest,
542 					     size_t offset, size_t maxsize),
543 		    void *context,
544 		    size_t size);
545 
546 int vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
547 			    void __user *uoffset, int size, int dir);
548 
549 void vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk);
550 
551 void remote_event_signal(struct remote_event *event);
552 
553 int vchiq_dump(void *dump_context, const char *str, int len);
554 
555 int vchiq_dump_platform_state(void *dump_context);
556 
557 int vchiq_dump_platform_instances(void *dump_context);
558 
559 int vchiq_dump_platform_service_state(void *dump_context, struct vchiq_service *service);
560 
561 int vchiq_use_service_internal(struct vchiq_service *service);
562 
563 int vchiq_release_service_internal(struct vchiq_service *service);
564 
565 void vchiq_on_remote_use(struct vchiq_state *state);
566 
567 void vchiq_on_remote_release(struct vchiq_state *state);
568 
569 int vchiq_platform_init_state(struct vchiq_state *state);
570 
571 enum vchiq_status vchiq_check_service(struct vchiq_service *service);
572 
573 void vchiq_on_remote_use_active(struct vchiq_state *state);
574 
575 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state);
576 
577 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state);
578 
579 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
580 				       enum vchiq_connstate oldstate,
581 				  enum vchiq_connstate newstate);
582 
583 void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
584 
585 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes);
586 
587 enum vchiq_status vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
588 
589 int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service);
590 
591 void vchiq_get_config(struct vchiq_config *config);
592 
593 int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service,
594 			     enum vchiq_service_option option, int value);
595 
596 #endif
597