xref: /openbmc/linux/net/rxrpc/ar-internal.h (revision cf1a6474)
1 /* AF_RXRPC internal definitions
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/atomic.h>
13 #include <linux/seqlock.h>
14 #include <net/sock.h>
15 #include <net/af_rxrpc.h>
16 #include <rxrpc/packet.h>
17 
18 #if 0
19 #define CHECK_SLAB_OKAY(X)				     \
20 	BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
21 	       (POISON_FREE << 8 | POISON_FREE))
22 #else
23 #define CHECK_SLAB_OKAY(X) do {} while (0)
24 #endif
25 
26 #define FCRYPT_BSIZE 8
27 struct rxrpc_crypt {
28 	union {
29 		u8	x[FCRYPT_BSIZE];
30 		__be32	n[2];
31 	};
32 } __attribute__((aligned(8)));
33 
34 #define rxrpc_queue_work(WS)	queue_work(rxrpc_workqueue, (WS))
35 #define rxrpc_queue_delayed_work(WS,D)	\
36 	queue_delayed_work(rxrpc_workqueue, (WS), (D))
37 
38 struct rxrpc_connection;
39 
40 /*
41  * Mark applied to socket buffers.
42  */
43 enum rxrpc_skb_mark {
44 	RXRPC_SKB_MARK_DATA,		/* data message */
45 	RXRPC_SKB_MARK_FINAL_ACK,	/* final ACK received message */
46 	RXRPC_SKB_MARK_BUSY,		/* server busy message */
47 	RXRPC_SKB_MARK_REMOTE_ABORT,	/* remote abort message */
48 	RXRPC_SKB_MARK_LOCAL_ABORT,	/* local abort message */
49 	RXRPC_SKB_MARK_NET_ERROR,	/* network error message */
50 	RXRPC_SKB_MARK_LOCAL_ERROR,	/* local error message */
51 	RXRPC_SKB_MARK_NEW_CALL,	/* local error message */
52 };
53 
54 /*
55  * sk_state for RxRPC sockets
56  */
57 enum {
58 	RXRPC_UNBOUND = 0,
59 	RXRPC_CLIENT_UNBOUND,		/* Unbound socket used as client */
60 	RXRPC_CLIENT_BOUND,		/* client local address bound */
61 	RXRPC_SERVER_BOUND,		/* server local address bound */
62 	RXRPC_SERVER_LISTENING,		/* server listening for connections */
63 	RXRPC_CLOSE,			/* socket is being closed */
64 };
65 
66 /*
67  * Service backlog preallocation.
68  *
69  * This contains circular buffers of preallocated peers, connections and calls
70  * for incoming service calls and their head and tail pointers.  This allows
71  * calls to be set up in the data_ready handler, thereby avoiding the need to
72  * shuffle packets around so much.
73  */
74 struct rxrpc_backlog {
75 	unsigned short		peer_backlog_head;
76 	unsigned short		peer_backlog_tail;
77 	unsigned short		conn_backlog_head;
78 	unsigned short		conn_backlog_tail;
79 	unsigned short		call_backlog_head;
80 	unsigned short		call_backlog_tail;
81 #define RXRPC_BACKLOG_MAX	32
82 	struct rxrpc_peer	*peer_backlog[RXRPC_BACKLOG_MAX];
83 	struct rxrpc_connection	*conn_backlog[RXRPC_BACKLOG_MAX];
84 	struct rxrpc_call	*call_backlog[RXRPC_BACKLOG_MAX];
85 };
86 
87 /*
88  * RxRPC socket definition
89  */
90 struct rxrpc_sock {
91 	/* WARNING: sk has to be the first member */
92 	struct sock		sk;
93 	rxrpc_notify_new_call_t	notify_new_call; /* Func to notify of new call */
94 	rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
95 	struct rxrpc_local	*local;		/* local endpoint */
96 	struct hlist_node	listen_link;	/* link in the local endpoint's listen list */
97 	struct rxrpc_backlog	*backlog;	/* Preallocation for services */
98 	spinlock_t		incoming_lock;	/* Incoming call vs service shutdown lock */
99 	struct list_head	sock_calls;	/* List of calls owned by this socket */
100 	struct list_head	to_be_accepted;	/* calls awaiting acceptance */
101 	struct list_head	recvmsg_q;	/* Calls awaiting recvmsg's attention  */
102 	rwlock_t		recvmsg_lock;	/* Lock for recvmsg_q */
103 	struct key		*key;		/* security for this socket */
104 	struct key		*securities;	/* list of server security descriptors */
105 	struct rb_root		calls;		/* User ID -> call mapping */
106 	unsigned long		flags;
107 #define RXRPC_SOCK_CONNECTED		0	/* connect_srx is set */
108 	rwlock_t		call_lock;	/* lock for calls */
109 	u32			min_sec_level;	/* minimum security level */
110 #define RXRPC_SECURITY_MAX	RXRPC_SECURITY_ENCRYPT
111 	bool			exclusive;	/* Exclusive connection for a client socket */
112 	sa_family_t		family;		/* Protocol family created with */
113 	struct sockaddr_rxrpc	srx;		/* local address */
114 	struct sockaddr_rxrpc	connect_srx;	/* Default client address from connect() */
115 };
116 
117 #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
118 
119 /*
120  * CPU-byteorder normalised Rx packet header.
121  */
122 struct rxrpc_host_header {
123 	u32		epoch;		/* client boot timestamp */
124 	u32		cid;		/* connection and channel ID */
125 	u32		callNumber;	/* call ID (0 for connection-level packets) */
126 	u32		seq;		/* sequence number of pkt in call stream */
127 	u32		serial;		/* serial number of pkt sent to network */
128 	u8		type;		/* packet type */
129 	u8		flags;		/* packet flags */
130 	u8		userStatus;	/* app-layer defined status */
131 	u8		securityIndex;	/* security protocol ID */
132 	union {
133 		u16	_rsvd;		/* reserved */
134 		u16	cksum;		/* kerberos security checksum */
135 	};
136 	u16		serviceId;	/* service ID */
137 } __packed;
138 
139 /*
140  * RxRPC socket buffer private variables
141  * - max 48 bytes (struct sk_buff::cb)
142  */
143 struct rxrpc_skb_priv {
144 	union {
145 		unsigned long	resend_at;	/* time in jiffies at which to resend */
146 		struct {
147 			u8	nr_jumbo;	/* Number of jumbo subpackets */
148 		};
149 	};
150 	union {
151 		unsigned int	offset;		/* offset into buffer of next read */
152 		int		remain;		/* amount of space remaining for next write */
153 		u32		error;		/* network error code */
154 	};
155 
156 	struct rxrpc_host_header hdr;		/* RxRPC packet header from this packet */
157 };
158 
159 #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
160 
161 /*
162  * RxRPC security module interface
163  */
164 struct rxrpc_security {
165 	const char		*name;		/* name of this service */
166 	u8			security_index;	/* security type provided */
167 
168 	/* Initialise a security service */
169 	int (*init)(void);
170 
171 	/* Clean up a security service */
172 	void (*exit)(void);
173 
174 	/* initialise a connection's security */
175 	int (*init_connection_security)(struct rxrpc_connection *);
176 
177 	/* prime a connection's packet security */
178 	int (*prime_packet_security)(struct rxrpc_connection *);
179 
180 	/* impose security on a packet */
181 	int (*secure_packet)(struct rxrpc_call *,
182 			     struct sk_buff *,
183 			     size_t,
184 			     void *);
185 
186 	/* verify the security on a received packet */
187 	int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
188 			     unsigned int, unsigned int, rxrpc_seq_t, u16);
189 
190 	/* Locate the data in a received packet that has been verified. */
191 	void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
192 			    unsigned int *, unsigned int *);
193 
194 	/* issue a challenge */
195 	int (*issue_challenge)(struct rxrpc_connection *);
196 
197 	/* respond to a challenge */
198 	int (*respond_to_challenge)(struct rxrpc_connection *,
199 				    struct sk_buff *,
200 				    u32 *);
201 
202 	/* verify a response */
203 	int (*verify_response)(struct rxrpc_connection *,
204 			       struct sk_buff *,
205 			       u32 *);
206 
207 	/* clear connection security */
208 	void (*clear)(struct rxrpc_connection *);
209 };
210 
211 /*
212  * RxRPC local transport endpoint description
213  * - owned by a single AF_RXRPC socket
214  * - pointed to by transport socket struct sk_user_data
215  */
216 struct rxrpc_local {
217 	struct rcu_head		rcu;
218 	atomic_t		usage;
219 	struct list_head	link;
220 	struct socket		*socket;	/* my UDP socket */
221 	struct work_struct	processor;
222 	struct hlist_head	services;	/* services listening on this endpoint */
223 	struct rw_semaphore	defrag_sem;	/* control re-enablement of IP DF bit */
224 	struct sk_buff_head	reject_queue;	/* packets awaiting rejection */
225 	struct sk_buff_head	event_queue;	/* endpoint event packets awaiting processing */
226 	struct rb_root		client_conns;	/* Client connections by socket params */
227 	spinlock_t		client_conns_lock; /* Lock for client_conns */
228 	spinlock_t		lock;		/* access lock */
229 	rwlock_t		services_lock;	/* lock for services list */
230 	int			debug_id;	/* debug ID for printks */
231 	bool			dead;
232 	struct sockaddr_rxrpc	srx;		/* local address */
233 };
234 
235 /*
236  * RxRPC remote transport endpoint definition
237  * - matched by local endpoint, remote port, address and protocol type
238  */
239 struct rxrpc_peer {
240 	struct rcu_head		rcu;		/* This must be first */
241 	atomic_t		usage;
242 	unsigned long		hash_key;
243 	struct hlist_node	hash_link;
244 	struct rxrpc_local	*local;
245 	struct hlist_head	error_targets;	/* targets for net error distribution */
246 	struct work_struct	error_distributor;
247 	struct rb_root		service_conns;	/* Service connections */
248 	seqlock_t		service_conn_lock;
249 	spinlock_t		lock;		/* access lock */
250 	unsigned int		if_mtu;		/* interface MTU for this peer */
251 	unsigned int		mtu;		/* network MTU for this peer */
252 	unsigned int		maxdata;	/* data size (MTU - hdrsize) */
253 	unsigned short		hdrsize;	/* header size (IP + UDP + RxRPC) */
254 	int			debug_id;	/* debug ID for printks */
255 	int			error_report;	/* Net (+0) or local (+1000000) to distribute */
256 #define RXRPC_LOCAL_ERROR_OFFSET 1000000
257 	struct sockaddr_rxrpc	srx;		/* remote address */
258 
259 	/* calculated RTT cache */
260 #define RXRPC_RTT_CACHE_SIZE 32
261 	u64			rtt;		/* Current RTT estimate (in nS) */
262 	u64			rtt_sum;	/* Sum of cache contents */
263 	u64			rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
264 	u8			rtt_cursor;	/* next entry at which to insert */
265 	u8			rtt_usage;	/* amount of cache actually used */
266 };
267 
268 /*
269  * Keys for matching a connection.
270  */
271 struct rxrpc_conn_proto {
272 	union {
273 		struct {
274 			u32	epoch;		/* epoch of this connection */
275 			u32	cid;		/* connection ID */
276 		};
277 		u64		index_key;
278 	};
279 };
280 
281 struct rxrpc_conn_parameters {
282 	struct rxrpc_local	*local;		/* Representation of local endpoint */
283 	struct rxrpc_peer	*peer;		/* Remote endpoint */
284 	struct key		*key;		/* Security details */
285 	bool			exclusive;	/* T if conn is exclusive */
286 	u16			service_id;	/* Service ID for this connection */
287 	u32			security_level;	/* Security level selected */
288 };
289 
290 /*
291  * Bits in the connection flags.
292  */
293 enum rxrpc_conn_flag {
294 	RXRPC_CONN_HAS_IDR,		/* Has a client conn ID assigned */
295 	RXRPC_CONN_IN_SERVICE_CONNS,	/* Conn is in peer->service_conns */
296 	RXRPC_CONN_IN_CLIENT_CONNS,	/* Conn is in local->client_conns */
297 	RXRPC_CONN_EXPOSED,		/* Conn has extra ref for exposure */
298 	RXRPC_CONN_DONT_REUSE,		/* Don't reuse this connection */
299 	RXRPC_CONN_COUNTED,		/* Counted by rxrpc_nr_client_conns */
300 };
301 
302 /*
303  * Events that can be raised upon a connection.
304  */
305 enum rxrpc_conn_event {
306 	RXRPC_CONN_EV_CHALLENGE,	/* Send challenge packet */
307 };
308 
309 /*
310  * The connection cache state.
311  */
312 enum rxrpc_conn_cache_state {
313 	RXRPC_CONN_CLIENT_INACTIVE,	/* Conn is not yet listed */
314 	RXRPC_CONN_CLIENT_WAITING,	/* Conn is on wait list, waiting for capacity */
315 	RXRPC_CONN_CLIENT_ACTIVE,	/* Conn is on active list, doing calls */
316 	RXRPC_CONN_CLIENT_CULLED,	/* Conn is culled and delisted, doing calls */
317 	RXRPC_CONN_CLIENT_IDLE,		/* Conn is on idle list, doing mostly nothing */
318 	RXRPC_CONN__NR_CACHE_STATES
319 };
320 
321 /*
322  * The connection protocol state.
323  */
324 enum rxrpc_conn_proto_state {
325 	RXRPC_CONN_UNUSED,		/* Connection not yet attempted */
326 	RXRPC_CONN_CLIENT,		/* Client connection */
327 	RXRPC_CONN_SERVICE_PREALLOC,	/* Service connection preallocation */
328 	RXRPC_CONN_SERVICE_UNSECURED,	/* Service unsecured connection */
329 	RXRPC_CONN_SERVICE_CHALLENGING,	/* Service challenging for security */
330 	RXRPC_CONN_SERVICE,		/* Service secured connection */
331 	RXRPC_CONN_REMOTELY_ABORTED,	/* Conn aborted by peer */
332 	RXRPC_CONN_LOCALLY_ABORTED,	/* Conn aborted locally */
333 	RXRPC_CONN__NR_STATES
334 };
335 
336 /*
337  * RxRPC connection definition
338  * - matched by { local, peer, epoch, conn_id, direction }
339  * - each connection can only handle four simultaneous calls
340  */
341 struct rxrpc_connection {
342 	struct rxrpc_conn_proto	proto;
343 	struct rxrpc_conn_parameters params;
344 
345 	atomic_t		usage;
346 	struct rcu_head		rcu;
347 	struct list_head	cache_link;
348 
349 	spinlock_t		channel_lock;
350 	unsigned char		active_chans;	/* Mask of active channels */
351 #define RXRPC_ACTIVE_CHANS_MASK	((1 << RXRPC_MAXCALLS) - 1)
352 	struct list_head	waiting_calls;	/* Calls waiting for channels */
353 	struct rxrpc_channel {
354 		struct rxrpc_call __rcu	*call;		/* Active call */
355 		u32			call_id;	/* ID of current call */
356 		u32			call_counter;	/* Call ID counter */
357 		u32			last_call;	/* ID of last call */
358 		u8			last_type;	/* Type of last packet */
359 		u16			last_service_id;
360 		union {
361 			u32		last_seq;
362 			u32		last_abort;
363 		};
364 	} channels[RXRPC_MAXCALLS];
365 
366 	struct work_struct	processor;	/* connection event processor */
367 	union {
368 		struct rb_node	client_node;	/* Node in local->client_conns */
369 		struct rb_node	service_node;	/* Node in peer->service_conns */
370 	};
371 	struct list_head	proc_link;	/* link in procfs list */
372 	struct list_head	link;		/* link in master connection list */
373 	struct sk_buff_head	rx_queue;	/* received conn-level packets */
374 	const struct rxrpc_security *security;	/* applied security module */
375 	struct key		*server_key;	/* security for this service */
376 	struct crypto_skcipher	*cipher;	/* encryption handle */
377 	struct rxrpc_crypt	csum_iv;	/* packet checksum base */
378 	unsigned long		flags;
379 	unsigned long		events;
380 	unsigned long		idle_timestamp;	/* Time at which last became idle */
381 	spinlock_t		state_lock;	/* state-change lock */
382 	enum rxrpc_conn_cache_state cache_state;
383 	enum rxrpc_conn_proto_state state;	/* current state of connection */
384 	u32			local_abort;	/* local abort code */
385 	u32			remote_abort;	/* remote abort code */
386 	int			debug_id;	/* debug ID for printks */
387 	atomic_t		serial;		/* packet serial number counter */
388 	unsigned int		hi_serial;	/* highest serial number received */
389 	u32			security_nonce;	/* response re-use preventer */
390 	u8			size_align;	/* data size alignment (for security) */
391 	u8			security_size;	/* security header size */
392 	u8			security_ix;	/* security type */
393 	u8			out_clientflag;	/* RXRPC_CLIENT_INITIATED if we are client */
394 };
395 
396 /*
397  * Flags in call->flags.
398  */
399 enum rxrpc_call_flag {
400 	RXRPC_CALL_RELEASED,		/* call has been released - no more message to userspace */
401 	RXRPC_CALL_HAS_USERID,		/* has a user ID attached */
402 	RXRPC_CALL_IS_SERVICE,		/* Call is service call */
403 	RXRPC_CALL_EXPOSED,		/* The call was exposed to the world */
404 	RXRPC_CALL_RX_LAST,		/* Received the last packet (at rxtx_top) */
405 	RXRPC_CALL_TX_LAST,		/* Last packet in Tx buffer (at rxtx_top) */
406 };
407 
408 /*
409  * Events that can be raised on a call.
410  */
411 enum rxrpc_call_event {
412 	RXRPC_CALL_EV_ACK,		/* need to generate ACK */
413 	RXRPC_CALL_EV_ABORT,		/* need to generate abort */
414 	RXRPC_CALL_EV_TIMER,		/* Timer expired */
415 	RXRPC_CALL_EV_RESEND,		/* Tx resend required */
416 };
417 
418 /*
419  * The states that a call can be in.
420  */
421 enum rxrpc_call_state {
422 	RXRPC_CALL_UNINITIALISED,
423 	RXRPC_CALL_CLIENT_AWAIT_CONN,	/* - client waiting for connection to become available */
424 	RXRPC_CALL_CLIENT_SEND_REQUEST,	/* - client sending request phase */
425 	RXRPC_CALL_CLIENT_AWAIT_REPLY,	/* - client awaiting reply */
426 	RXRPC_CALL_CLIENT_RECV_REPLY,	/* - client receiving reply phase */
427 	RXRPC_CALL_SERVER_PREALLOC,	/* - service preallocation */
428 	RXRPC_CALL_SERVER_SECURING,	/* - server securing request connection */
429 	RXRPC_CALL_SERVER_ACCEPTING,	/* - server accepting request */
430 	RXRPC_CALL_SERVER_RECV_REQUEST,	/* - server receiving request */
431 	RXRPC_CALL_SERVER_ACK_REQUEST,	/* - server pending ACK of request */
432 	RXRPC_CALL_SERVER_SEND_REPLY,	/* - server sending reply */
433 	RXRPC_CALL_SERVER_AWAIT_ACK,	/* - server awaiting final ACK */
434 	RXRPC_CALL_COMPLETE,		/* - call complete */
435 	NR__RXRPC_CALL_STATES
436 };
437 
438 /*
439  * Call completion condition (state == RXRPC_CALL_COMPLETE).
440  */
441 enum rxrpc_call_completion {
442 	RXRPC_CALL_SUCCEEDED,		/* - Normal termination */
443 	RXRPC_CALL_REMOTELY_ABORTED,	/* - call aborted by peer */
444 	RXRPC_CALL_LOCALLY_ABORTED,	/* - call aborted locally on error or close */
445 	RXRPC_CALL_LOCAL_ERROR,		/* - call failed due to local error */
446 	RXRPC_CALL_NETWORK_ERROR,	/* - call terminated by network error */
447 	NR__RXRPC_CALL_COMPLETIONS
448 };
449 
450 /*
451  * RxRPC call definition
452  * - matched by { connection, call_id }
453  */
454 struct rxrpc_call {
455 	struct rcu_head		rcu;
456 	struct rxrpc_connection	*conn;		/* connection carrying call */
457 	struct rxrpc_peer	*peer;		/* Peer record for remote address */
458 	struct rxrpc_sock __rcu	*socket;	/* socket responsible */
459 	unsigned long		ack_at;		/* When deferred ACK needs to happen */
460 	unsigned long		resend_at;	/* When next resend needs to happen */
461 	unsigned long		expire_at;	/* When the call times out */
462 	struct timer_list	timer;		/* Combined event timer */
463 	struct work_struct	processor;	/* Event processor */
464 	rxrpc_notify_rx_t	notify_rx;	/* kernel service Rx notification function */
465 	struct list_head	link;		/* link in master call list */
466 	struct list_head	chan_wait_link;	/* Link in conn->waiting_calls */
467 	struct hlist_node	error_link;	/* link in error distribution list */
468 	struct list_head	accept_link;	/* Link in rx->acceptq */
469 	struct list_head	recvmsg_link;	/* Link in rx->recvmsg_q */
470 	struct list_head	sock_link;	/* Link in rx->sock_calls */
471 	struct rb_node		sock_node;	/* Node in rx->calls */
472 	struct sk_buff		*tx_pending;	/* Tx socket buffer being filled */
473 	wait_queue_head_t	waitq;		/* Wait queue for channel or Tx */
474 	__be32			crypto_buf[2];	/* Temporary packet crypto buffer */
475 	unsigned long		user_call_ID;	/* user-defined call ID */
476 	unsigned long		flags;
477 	unsigned long		events;
478 	spinlock_t		lock;
479 	rwlock_t		state_lock;	/* lock for state transition */
480 	u32			abort_code;	/* Local/remote abort code */
481 	int			error;		/* Local error incurred */
482 	enum rxrpc_call_state	state;		/* current state of call */
483 	enum rxrpc_call_completion completion;	/* Call completion condition */
484 	atomic_t		usage;
485 	u16			service_id;	/* service ID */
486 	u8			security_ix;	/* Security type */
487 	u32			call_id;	/* call ID on connection  */
488 	u32			cid;		/* connection ID plus channel index */
489 	int			debug_id;	/* debug ID for printks */
490 
491 	/* Rx/Tx circular buffer, depending on phase.
492 	 *
493 	 * In the Rx phase, packets are annotated with 0 or the number of the
494 	 * segment of a jumbo packet each buffer refers to.  There can be up to
495 	 * 47 segments in a maximum-size UDP packet.
496 	 *
497 	 * In the Tx phase, packets are annotated with which buffers have been
498 	 * acked.
499 	 */
500 #define RXRPC_RXTX_BUFF_SIZE	64
501 #define RXRPC_RXTX_BUFF_MASK	(RXRPC_RXTX_BUFF_SIZE - 1)
502 #define RXRPC_INIT_RX_WINDOW_SIZE 32
503 	struct sk_buff		**rxtx_buffer;
504 	u8			*rxtx_annotations;
505 #define RXRPC_TX_ANNO_ACK	0
506 #define RXRPC_TX_ANNO_UNACK	1
507 #define RXRPC_TX_ANNO_NAK	2
508 #define RXRPC_TX_ANNO_RETRANS	3
509 #define RXRPC_TX_ANNO_MASK	0x03
510 #define RXRPC_TX_ANNO_RESENT	0x04
511 #define RXRPC_RX_ANNO_JUMBO	0x3f		/* Jumbo subpacket number + 1 if not zero */
512 #define RXRPC_RX_ANNO_JLAST	0x40		/* Set if last element of a jumbo packet */
513 #define RXRPC_RX_ANNO_VERIFIED	0x80		/* Set if verified and decrypted */
514 	rxrpc_seq_t		tx_hard_ack;	/* Dead slot in buffer; the first transmitted but
515 						 * not hard-ACK'd packet follows this.
516 						 */
517 	rxrpc_seq_t		tx_top;		/* Highest Tx slot allocated. */
518 	rxrpc_seq_t		rx_hard_ack;	/* Dead slot in buffer; the first received but not
519 						 * consumed packet follows this.
520 						 */
521 	rxrpc_seq_t		rx_top;		/* Highest Rx slot allocated. */
522 	rxrpc_seq_t		rx_expect_next;	/* Expected next packet sequence number */
523 	u8			rx_winsize;	/* Size of Rx window */
524 	u8			tx_winsize;	/* Maximum size of Tx window */
525 	bool			tx_phase;	/* T if transmission phase, F if receive phase */
526 	u8			nr_jumbo_bad;	/* Number of jumbo dups/exceeds-windows */
527 
528 	/* receive-phase ACK management */
529 	u8			ackr_reason;	/* reason to ACK */
530 	u16			ackr_skew;	/* skew on packet being ACK'd */
531 	rxrpc_serial_t		ackr_serial;	/* serial of packet being ACK'd */
532 	rxrpc_seq_t		ackr_prev_seq;	/* previous sequence number received */
533 	unsigned short		rx_pkt_offset;	/* Current recvmsg packet offset */
534 	unsigned short		rx_pkt_len;	/* Current recvmsg packet len */
535 
536 	/* transmission-phase ACK management */
537 	rxrpc_serial_t		acks_latest;	/* serial number of latest ACK received */
538 };
539 
540 enum rxrpc_skb_trace {
541 	rxrpc_skb_rx_cleaned,
542 	rxrpc_skb_rx_freed,
543 	rxrpc_skb_rx_got,
544 	rxrpc_skb_rx_lost,
545 	rxrpc_skb_rx_received,
546 	rxrpc_skb_rx_rotated,
547 	rxrpc_skb_rx_purged,
548 	rxrpc_skb_rx_seen,
549 	rxrpc_skb_tx_cleaned,
550 	rxrpc_skb_tx_freed,
551 	rxrpc_skb_tx_got,
552 	rxrpc_skb_tx_lost,
553 	rxrpc_skb_tx_new,
554 	rxrpc_skb_tx_rotated,
555 	rxrpc_skb_tx_seen,
556 	rxrpc_skb__nr_trace
557 };
558 
559 extern const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7];
560 
561 enum rxrpc_conn_trace {
562 	rxrpc_conn_new_client,
563 	rxrpc_conn_new_service,
564 	rxrpc_conn_queued,
565 	rxrpc_conn_seen,
566 	rxrpc_conn_got,
567 	rxrpc_conn_put_client,
568 	rxrpc_conn_put_service,
569 	rxrpc_conn__nr_trace
570 };
571 
572 extern const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4];
573 
574 enum rxrpc_client_trace {
575 	rxrpc_client_activate_chans,
576 	rxrpc_client_alloc,
577 	rxrpc_client_chan_activate,
578 	rxrpc_client_chan_disconnect,
579 	rxrpc_client_chan_pass,
580 	rxrpc_client_chan_unstarted,
581 	rxrpc_client_cleanup,
582 	rxrpc_client_count,
583 	rxrpc_client_discard,
584 	rxrpc_client_duplicate,
585 	rxrpc_client_exposed,
586 	rxrpc_client_replace,
587 	rxrpc_client_to_active,
588 	rxrpc_client_to_culled,
589 	rxrpc_client_to_idle,
590 	rxrpc_client_to_inactive,
591 	rxrpc_client_to_waiting,
592 	rxrpc_client_uncount,
593 	rxrpc_client__nr_trace
594 };
595 
596 extern const char rxrpc_client_traces[rxrpc_client__nr_trace][7];
597 extern const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5];
598 
599 enum rxrpc_call_trace {
600 	rxrpc_call_new_client,
601 	rxrpc_call_new_service,
602 	rxrpc_call_queued,
603 	rxrpc_call_queued_ref,
604 	rxrpc_call_seen,
605 	rxrpc_call_connected,
606 	rxrpc_call_release,
607 	rxrpc_call_got,
608 	rxrpc_call_got_userid,
609 	rxrpc_call_got_kernel,
610 	rxrpc_call_put,
611 	rxrpc_call_put_userid,
612 	rxrpc_call_put_kernel,
613 	rxrpc_call_put_noqueue,
614 	rxrpc_call_error,
615 	rxrpc_call__nr_trace
616 };
617 
618 extern const char rxrpc_call_traces[rxrpc_call__nr_trace][4];
619 
620 enum rxrpc_transmit_trace {
621 	rxrpc_transmit_wait,
622 	rxrpc_transmit_queue,
623 	rxrpc_transmit_queue_reqack,
624 	rxrpc_transmit_queue_last,
625 	rxrpc_transmit_rotate,
626 	rxrpc_transmit_end,
627 	rxrpc_transmit__nr_trace
628 };
629 
630 extern const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4];
631 
632 enum rxrpc_receive_trace {
633 	rxrpc_receive_incoming,
634 	rxrpc_receive_queue,
635 	rxrpc_receive_queue_last,
636 	rxrpc_receive_front,
637 	rxrpc_receive_rotate,
638 	rxrpc_receive_end,
639 	rxrpc_receive__nr_trace
640 };
641 
642 extern const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4];
643 
644 enum rxrpc_recvmsg_trace {
645 	rxrpc_recvmsg_enter,
646 	rxrpc_recvmsg_wait,
647 	rxrpc_recvmsg_dequeue,
648 	rxrpc_recvmsg_hole,
649 	rxrpc_recvmsg_next,
650 	rxrpc_recvmsg_cont,
651 	rxrpc_recvmsg_full,
652 	rxrpc_recvmsg_data_return,
653 	rxrpc_recvmsg_terminal,
654 	rxrpc_recvmsg_to_be_accepted,
655 	rxrpc_recvmsg_return,
656 	rxrpc_recvmsg__nr_trace
657 };
658 
659 extern const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5];
660 
661 enum rxrpc_rtt_tx_trace {
662 	rxrpc_rtt_tx_ping,
663 	rxrpc_rtt_tx__nr_trace
664 };
665 
666 extern const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5];
667 
668 enum rxrpc_rtt_rx_trace {
669 	rxrpc_rtt_rx_ping_response,
670 	rxrpc_rtt_rx__nr_trace
671 };
672 
673 extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5];
674 
675 extern const char *const rxrpc_pkts[];
676 extern const char *rxrpc_acks(u8 reason);
677 
678 #include <trace/events/rxrpc.h>
679 
680 /*
681  * af_rxrpc.c
682  */
683 extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
684 extern u32 rxrpc_epoch;
685 extern atomic_t rxrpc_debug_id;
686 extern struct workqueue_struct *rxrpc_workqueue;
687 
688 /*
689  * call_accept.c
690  */
691 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
692 void rxrpc_discard_prealloc(struct rxrpc_sock *);
693 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
694 					   struct rxrpc_connection *,
695 					   struct sk_buff *);
696 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
697 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
698 				     rxrpc_notify_rx_t);
699 int rxrpc_reject_call(struct rxrpc_sock *);
700 
701 /*
702  * call_event.c
703  */
704 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool);
705 void rxrpc_process_call(struct work_struct *);
706 
707 /*
708  * call_object.c
709  */
710 extern const char *const rxrpc_call_states[];
711 extern const char *const rxrpc_call_completions[];
712 extern unsigned int rxrpc_max_call_lifetime;
713 extern struct kmem_cache *rxrpc_call_jar;
714 extern struct list_head rxrpc_calls;
715 extern rwlock_t rxrpc_call_lock;
716 
717 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
718 struct rxrpc_call *rxrpc_alloc_call(gfp_t);
719 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
720 					 struct rxrpc_conn_parameters *,
721 					 struct sockaddr_rxrpc *,
722 					 unsigned long, gfp_t);
723 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
724 			 struct sk_buff *);
725 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
726 void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
727 bool __rxrpc_queue_call(struct rxrpc_call *);
728 bool rxrpc_queue_call(struct rxrpc_call *);
729 void rxrpc_see_call(struct rxrpc_call *);
730 void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
731 void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
732 void rxrpc_cleanup_call(struct rxrpc_call *);
733 void __exit rxrpc_destroy_all_calls(void);
734 
735 static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
736 {
737 	return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
738 }
739 
740 static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
741 {
742 	return !rxrpc_is_service_call(call);
743 }
744 
745 /*
746  * Transition a call to the complete state.
747  */
748 static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
749 					       enum rxrpc_call_completion compl,
750 					       u32 abort_code,
751 					       int error)
752 {
753 	if (call->state < RXRPC_CALL_COMPLETE) {
754 		call->abort_code = abort_code;
755 		call->error = error;
756 		call->completion = compl,
757 		call->state = RXRPC_CALL_COMPLETE;
758 		return true;
759 	}
760 	return false;
761 }
762 
763 static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
764 					     enum rxrpc_call_completion compl,
765 					     u32 abort_code,
766 					     int error)
767 {
768 	bool ret;
769 
770 	write_lock_bh(&call->state_lock);
771 	ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
772 	write_unlock_bh(&call->state_lock);
773 	return ret;
774 }
775 
776 /*
777  * Record that a call successfully completed.
778  */
779 static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
780 {
781 	return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
782 }
783 
784 static inline bool rxrpc_call_completed(struct rxrpc_call *call)
785 {
786 	bool ret;
787 
788 	write_lock_bh(&call->state_lock);
789 	ret = __rxrpc_call_completed(call);
790 	write_unlock_bh(&call->state_lock);
791 	return ret;
792 }
793 
794 /*
795  * Record that a call is locally aborted.
796  */
797 static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
798 				      rxrpc_seq_t seq,
799 				      u32 abort_code, int error)
800 {
801 	trace_rxrpc_abort(why, call->cid, call->call_id, seq,
802 			  abort_code, error);
803 	return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
804 					   abort_code, error);
805 }
806 
807 static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
808 				    rxrpc_seq_t seq, u32 abort_code, int error)
809 {
810 	bool ret;
811 
812 	write_lock_bh(&call->state_lock);
813 	ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
814 	write_unlock_bh(&call->state_lock);
815 	return ret;
816 }
817 
818 /*
819  * conn_client.c
820  */
821 extern unsigned int rxrpc_max_client_connections;
822 extern unsigned int rxrpc_reap_client_connections;
823 extern unsigned int rxrpc_conn_idle_client_expiry;
824 extern unsigned int rxrpc_conn_idle_client_fast_expiry;
825 extern struct idr rxrpc_client_conn_ids;
826 
827 void rxrpc_destroy_client_conn_ids(void);
828 int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
829 		       struct sockaddr_rxrpc *, gfp_t);
830 void rxrpc_expose_client_call(struct rxrpc_call *);
831 void rxrpc_disconnect_client_call(struct rxrpc_call *);
832 void rxrpc_put_client_conn(struct rxrpc_connection *);
833 void __exit rxrpc_destroy_all_client_connections(void);
834 
835 /*
836  * conn_event.c
837  */
838 void rxrpc_process_connection(struct work_struct *);
839 
840 /*
841  * conn_object.c
842  */
843 extern unsigned int rxrpc_connection_expiry;
844 extern struct list_head rxrpc_connections;
845 extern struct list_head rxrpc_connection_proc_list;
846 extern rwlock_t rxrpc_connection_lock;
847 
848 int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
849 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
850 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
851 						   struct sk_buff *);
852 void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
853 void rxrpc_disconnect_call(struct rxrpc_call *);
854 void rxrpc_kill_connection(struct rxrpc_connection *);
855 bool rxrpc_queue_conn(struct rxrpc_connection *);
856 void rxrpc_see_connection(struct rxrpc_connection *);
857 void rxrpc_get_connection(struct rxrpc_connection *);
858 struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
859 void rxrpc_put_service_conn(struct rxrpc_connection *);
860 void __exit rxrpc_destroy_all_connections(void);
861 
862 static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
863 {
864 	return conn->out_clientflag;
865 }
866 
867 static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
868 {
869 	return !rxrpc_conn_is_client(conn);
870 }
871 
872 static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
873 {
874 	if (!conn)
875 		return;
876 
877 	if (rxrpc_conn_is_client(conn))
878 		rxrpc_put_client_conn(conn);
879 	else
880 		rxrpc_put_service_conn(conn);
881 }
882 
883 /*
884  * conn_service.c
885  */
886 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
887 						     struct sk_buff *);
888 struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
889 void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
890 void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
891 
892 /*
893  * input.c
894  */
895 void rxrpc_data_ready(struct sock *);
896 
897 /*
898  * insecure.c
899  */
900 extern const struct rxrpc_security rxrpc_no_security;
901 
902 /*
903  * key.c
904  */
905 extern struct key_type key_type_rxrpc;
906 extern struct key_type key_type_rxrpc_s;
907 
908 int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
909 int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
910 int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
911 			      u32);
912 
913 /*
914  * local_event.c
915  */
916 extern void rxrpc_process_local_events(struct rxrpc_local *);
917 
918 /*
919  * local_object.c
920  */
921 struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
922 void __rxrpc_put_local(struct rxrpc_local *);
923 void __exit rxrpc_destroy_all_locals(void);
924 
925 static inline void rxrpc_get_local(struct rxrpc_local *local)
926 {
927 	atomic_inc(&local->usage);
928 }
929 
930 static inline
931 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
932 {
933 	return atomic_inc_not_zero(&local->usage) ? local : NULL;
934 }
935 
936 static inline void rxrpc_put_local(struct rxrpc_local *local)
937 {
938 	if (local && atomic_dec_and_test(&local->usage))
939 		__rxrpc_put_local(local);
940 }
941 
942 static inline void rxrpc_queue_local(struct rxrpc_local *local)
943 {
944 	rxrpc_queue_work(&local->processor);
945 }
946 
947 /*
948  * misc.c
949  */
950 extern unsigned int rxrpc_max_backlog __read_mostly;
951 extern unsigned int rxrpc_requested_ack_delay;
952 extern unsigned int rxrpc_soft_ack_delay;
953 extern unsigned int rxrpc_idle_ack_delay;
954 extern unsigned int rxrpc_rx_window_size;
955 extern unsigned int rxrpc_rx_mtu;
956 extern unsigned int rxrpc_rx_jumbo_max;
957 extern unsigned int rxrpc_resend_timeout;
958 
959 extern const s8 rxrpc_ack_priority[];
960 
961 /*
962  * output.c
963  */
964 int rxrpc_send_call_packet(struct rxrpc_call *, u8);
965 int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *);
966 void rxrpc_reject_packets(struct rxrpc_local *);
967 
968 /*
969  * peer_event.c
970  */
971 void rxrpc_error_report(struct sock *);
972 void rxrpc_peer_error_distributor(struct work_struct *);
973 void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
974 			rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
975 
976 /*
977  * peer_object.c
978  */
979 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
980 					 const struct sockaddr_rxrpc *);
981 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
982 				     struct sockaddr_rxrpc *, gfp_t);
983 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
984 struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
985 					      struct rxrpc_peer *);
986 
987 static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
988 {
989 	atomic_inc(&peer->usage);
990 	return peer;
991 }
992 
993 static inline
994 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
995 {
996 	return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
997 }
998 
999 extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
1000 static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
1001 {
1002 	if (peer && atomic_dec_and_test(&peer->usage))
1003 		__rxrpc_put_peer(peer);
1004 }
1005 
1006 /*
1007  * proc.c
1008  */
1009 extern const struct file_operations rxrpc_call_seq_fops;
1010 extern const struct file_operations rxrpc_connection_seq_fops;
1011 
1012 /*
1013  * recvmsg.c
1014  */
1015 void rxrpc_notify_socket(struct rxrpc_call *);
1016 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
1017 
1018 /*
1019  * rxkad.c
1020  */
1021 #ifdef CONFIG_RXKAD
1022 extern const struct rxrpc_security rxkad;
1023 #endif
1024 
1025 /*
1026  * security.c
1027  */
1028 int __init rxrpc_init_security(void);
1029 void rxrpc_exit_security(void);
1030 int rxrpc_init_client_conn_security(struct rxrpc_connection *);
1031 int rxrpc_init_server_conn_security(struct rxrpc_connection *);
1032 
1033 /*
1034  * sendmsg.c
1035  */
1036 int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
1037 
1038 /*
1039  * skbuff.c
1040  */
1041 void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
1042 void rxrpc_packet_destructor(struct sk_buff *);
1043 void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
1044 void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
1045 void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
1046 void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
1047 void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
1048 void rxrpc_purge_queue(struct sk_buff_head *);
1049 
1050 /*
1051  * sysctl.c
1052  */
1053 #ifdef CONFIG_SYSCTL
1054 extern int __init rxrpc_sysctl_init(void);
1055 extern void rxrpc_sysctl_exit(void);
1056 #else
1057 static inline int __init rxrpc_sysctl_init(void) { return 0; }
1058 static inline void rxrpc_sysctl_exit(void) {}
1059 #endif
1060 
1061 /*
1062  * utils.c
1063  */
1064 int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
1065 
1066 static inline bool before(u32 seq1, u32 seq2)
1067 {
1068         return (s32)(seq1 - seq2) < 0;
1069 }
1070 static inline bool before_eq(u32 seq1, u32 seq2)
1071 {
1072         return (s32)(seq1 - seq2) <= 0;
1073 }
1074 static inline bool after(u32 seq1, u32 seq2)
1075 {
1076         return (s32)(seq1 - seq2) > 0;
1077 }
1078 static inline bool after_eq(u32 seq1, u32 seq2)
1079 {
1080         return (s32)(seq1 - seq2) >= 0;
1081 }
1082 
1083 /*
1084  * debug tracing
1085  */
1086 extern unsigned int rxrpc_debug;
1087 
1088 #define dbgprintk(FMT,...) \
1089 	printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
1090 
1091 #define kenter(FMT,...)	dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1092 #define kleave(FMT,...)	dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1093 #define kdebug(FMT,...)	dbgprintk("    "FMT ,##__VA_ARGS__)
1094 #define kproto(FMT,...)	dbgprintk("### "FMT ,##__VA_ARGS__)
1095 #define knet(FMT,...)	dbgprintk("@@@ "FMT ,##__VA_ARGS__)
1096 
1097 
1098 #if defined(__KDEBUG)
1099 #define _enter(FMT,...)	kenter(FMT,##__VA_ARGS__)
1100 #define _leave(FMT,...)	kleave(FMT,##__VA_ARGS__)
1101 #define _debug(FMT,...)	kdebug(FMT,##__VA_ARGS__)
1102 #define _proto(FMT,...)	kproto(FMT,##__VA_ARGS__)
1103 #define _net(FMT,...)	knet(FMT,##__VA_ARGS__)
1104 
1105 #elif defined(CONFIG_AF_RXRPC_DEBUG)
1106 #define RXRPC_DEBUG_KENTER	0x01
1107 #define RXRPC_DEBUG_KLEAVE	0x02
1108 #define RXRPC_DEBUG_KDEBUG	0x04
1109 #define RXRPC_DEBUG_KPROTO	0x08
1110 #define RXRPC_DEBUG_KNET	0x10
1111 
1112 #define _enter(FMT,...)					\
1113 do {							\
1114 	if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER))	\
1115 		kenter(FMT,##__VA_ARGS__);		\
1116 } while (0)
1117 
1118 #define _leave(FMT,...)					\
1119 do {							\
1120 	if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE))	\
1121 		kleave(FMT,##__VA_ARGS__);		\
1122 } while (0)
1123 
1124 #define _debug(FMT,...)					\
1125 do {							\
1126 	if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG))	\
1127 		kdebug(FMT,##__VA_ARGS__);		\
1128 } while (0)
1129 
1130 #define _proto(FMT,...)					\
1131 do {							\
1132 	if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO))	\
1133 		kproto(FMT,##__VA_ARGS__);		\
1134 } while (0)
1135 
1136 #define _net(FMT,...)					\
1137 do {							\
1138 	if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET))	\
1139 		knet(FMT,##__VA_ARGS__);		\
1140 } while (0)
1141 
1142 #else
1143 #define _enter(FMT,...)	no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1144 #define _leave(FMT,...)	no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1145 #define _debug(FMT,...)	no_printk("    "FMT ,##__VA_ARGS__)
1146 #define _proto(FMT,...)	no_printk("### "FMT ,##__VA_ARGS__)
1147 #define _net(FMT,...)	no_printk("@@@ "FMT ,##__VA_ARGS__)
1148 #endif
1149 
1150 /*
1151  * debug assertion checking
1152  */
1153 #if 1 // defined(__KDEBUGALL)
1154 
1155 #define ASSERT(X)						\
1156 do {								\
1157 	if (unlikely(!(X))) {					\
1158 		pr_err("Assertion failed\n");			\
1159 		BUG();						\
1160 	}							\
1161 } while (0)
1162 
1163 #define ASSERTCMP(X, OP, Y)						\
1164 do {									\
1165 	__typeof__(X) _x = (X);						\
1166 	__typeof__(Y) _y = (__typeof__(X))(Y);				\
1167 	if (unlikely(!(_x OP _y))) {					\
1168 		pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1169 		       (unsigned long)_x, (unsigned long)_x, #OP,	\
1170 		       (unsigned long)_y, (unsigned long)_y);		\
1171 		BUG();							\
1172 	}								\
1173 } while (0)
1174 
1175 #define ASSERTIF(C, X)						\
1176 do {								\
1177 	if (unlikely((C) && !(X))) {				\
1178 		pr_err("Assertion failed\n");			\
1179 		BUG();						\
1180 	}							\
1181 } while (0)
1182 
1183 #define ASSERTIFCMP(C, X, OP, Y)					\
1184 do {									\
1185 	__typeof__(X) _x = (X);						\
1186 	__typeof__(Y) _y = (__typeof__(X))(Y);				\
1187 	if (unlikely((C) && !(_x OP _y))) {				\
1188 		pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1189 		       (unsigned long)_x, (unsigned long)_x, #OP,	\
1190 		       (unsigned long)_y, (unsigned long)_y);		\
1191 		BUG();							\
1192 	}								\
1193 } while (0)
1194 
1195 #else
1196 
1197 #define ASSERT(X)				\
1198 do {						\
1199 } while (0)
1200 
1201 #define ASSERTCMP(X, OP, Y)			\
1202 do {						\
1203 } while (0)
1204 
1205 #define ASSERTIF(C, X)				\
1206 do {						\
1207 } while (0)
1208 
1209 #define ASSERTIFCMP(C, X, OP, Y)		\
1210 do {						\
1211 } while (0)
1212 
1213 #endif /* __KDEBUGALL */
1214