xref: /openbmc/linux/net/rds/rds.h (revision 7211ec63)
1 #ifndef _RDS_RDS_H
2 #define _RDS_RDS_H
3 
4 #include <net/sock.h>
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
9 #include <linux/rds.h>
10 #include <linux/rhashtable.h>
11 #include <linux/refcount.h>
12 
13 #include "info.h"
14 
15 /*
16  * RDS Network protocol version
17  */
18 #define RDS_PROTOCOL_3_0	0x0300
19 #define RDS_PROTOCOL_3_1	0x0301
20 #define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
21 #define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
22 #define RDS_PROTOCOL_MINOR(v)	((v) & 255)
23 #define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
24 
25 /*
26  * XXX randomly chosen, but at least seems to be unused:
27  * #               18464-18768 Unassigned
28  * We should do better.  We want a reserved port to discourage unpriv'ed
29  * userspace from listening.
30  */
31 #define RDS_PORT	18634
32 
33 #ifdef ATOMIC64_INIT
34 #define KERNEL_HAS_ATOMIC64
35 #endif
36 
37 #ifdef RDS_DEBUG
38 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
39 #else
40 /* sigh, pr_debug() causes unused variable warnings */
41 static inline __printf(1, 2)
42 void rdsdebug(char *fmt, ...)
43 {
44 }
45 #endif
46 
47 /* XXX is there one of these somewhere? */
48 #define ceil(x, y) \
49 	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
50 
51 #define RDS_FRAG_SHIFT	12
52 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
53 
54 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
55 #define RDS_MAX_MSG_SIZE	((unsigned int)(1 << 20))
56 
57 #define RDS_CONG_MAP_BYTES	(65536 / 8)
58 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
59 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
60 
61 struct rds_cong_map {
62 	struct rb_node		m_rb_node;
63 	__be32			m_addr;
64 	wait_queue_head_t	m_waitq;
65 	struct list_head	m_conn_list;
66 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
67 };
68 
69 
70 /*
71  * This is how we will track the connection state:
72  * A connection is always in one of the following
73  * states. Updates to the state are atomic and imply
74  * a memory barrier.
75  */
76 enum {
77 	RDS_CONN_DOWN = 0,
78 	RDS_CONN_CONNECTING,
79 	RDS_CONN_DISCONNECTING,
80 	RDS_CONN_UP,
81 	RDS_CONN_RESETTING,
82 	RDS_CONN_ERROR,
83 };
84 
85 /* Bits for c_flags */
86 #define RDS_LL_SEND_FULL	0
87 #define RDS_RECONNECT_PENDING	1
88 #define RDS_IN_XMIT		2
89 #define RDS_RECV_REFILL		3
90 
91 /* Max number of multipaths per RDS connection. Must be a power of 2 */
92 #define	RDS_MPATH_WORKERS	8
93 #define	RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
94 			       (rs)->rs_hash_initval) & ((n) - 1))
95 
96 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
97 
98 /* Per mpath connection state */
99 struct rds_conn_path {
100 	struct rds_connection	*cp_conn;
101 	struct rds_message	*cp_xmit_rm;
102 	unsigned long		cp_xmit_sg;
103 	unsigned int		cp_xmit_hdr_off;
104 	unsigned int		cp_xmit_data_off;
105 	unsigned int		cp_xmit_atomic_sent;
106 	unsigned int		cp_xmit_rdma_sent;
107 	unsigned int		cp_xmit_data_sent;
108 
109 	spinlock_t		cp_lock;		/* protect msg queues */
110 	u64			cp_next_tx_seq;
111 	struct list_head	cp_send_queue;
112 	struct list_head	cp_retrans;
113 
114 	u64			cp_next_rx_seq;
115 
116 	void			*cp_transport_data;
117 
118 	atomic_t		cp_state;
119 	unsigned long		cp_send_gen;
120 	unsigned long		cp_flags;
121 	unsigned long		cp_reconnect_jiffies;
122 	struct delayed_work	cp_send_w;
123 	struct delayed_work	cp_recv_w;
124 	struct delayed_work	cp_conn_w;
125 	struct work_struct	cp_down_w;
126 	struct mutex		cp_cm_lock;	/* protect cp_state & cm */
127 	wait_queue_head_t	cp_waitq;
128 
129 	unsigned int		cp_unacked_packets;
130 	unsigned int		cp_unacked_bytes;
131 	unsigned int		cp_index;
132 };
133 
134 /* One rds_connection per RDS address pair */
135 struct rds_connection {
136 	struct hlist_node	c_hash_node;
137 	__be32			c_laddr;
138 	__be32			c_faddr;
139 	unsigned int		c_loopback:1,
140 				c_ping_triggered:1,
141 				c_destroy_in_prog:1,
142 				c_pad_to_32:29;
143 	int			c_npaths;
144 	struct rds_connection	*c_passive;
145 	struct rds_transport	*c_trans;
146 
147 	struct rds_cong_map	*c_lcong;
148 	struct rds_cong_map	*c_fcong;
149 
150 	/* Protocol version */
151 	unsigned int		c_version;
152 	struct net		*c_net;
153 
154 	struct list_head	c_map_item;
155 	unsigned long		c_map_queued;
156 
157 	struct rds_conn_path	*c_path;
158 	wait_queue_head_t	c_hs_waitq; /* handshake waitq */
159 
160 	u32			c_my_gen_num;
161 	u32			c_peer_gen_num;
162 };
163 
164 static inline
165 struct net *rds_conn_net(struct rds_connection *conn)
166 {
167 	return conn->c_net;
168 }
169 
170 static inline
171 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
172 {
173 	conn->c_net = get_net(net);
174 }
175 
176 #define RDS_FLAG_CONG_BITMAP	0x01
177 #define RDS_FLAG_ACK_REQUIRED	0x02
178 #define RDS_FLAG_RETRANSMITTED	0x04
179 #define RDS_MAX_ADV_CREDIT	255
180 
181 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
182  * probe to exchange control information before establishing a connection.
183  * Currently the control information that is exchanged is the number of
184  * supported paths. If the peer is a legacy (older kernel revision) peer,
185  * it would return a pong message without additional control information
186  * that would then alert the sender that the peer was an older rev.
187  */
188 #define RDS_FLAG_PROBE_PORT	1
189 #define	RDS_HS_PROBE(sport, dport) \
190 		((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
191 		 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
192 /*
193  * Maximum space available for extension headers.
194  */
195 #define RDS_HEADER_EXT_SPACE	16
196 
197 struct rds_header {
198 	__be64	h_sequence;
199 	__be64	h_ack;
200 	__be32	h_len;
201 	__be16	h_sport;
202 	__be16	h_dport;
203 	u8	h_flags;
204 	u8	h_credit;
205 	u8	h_padding[4];
206 	__sum16	h_csum;
207 
208 	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
209 };
210 
211 /*
212  * Reserved - indicates end of extensions
213  */
214 #define RDS_EXTHDR_NONE		0
215 
216 /*
217  * This extension header is included in the very
218  * first message that is sent on a new connection,
219  * and identifies the protocol level. This will help
220  * rolling updates if a future change requires breaking
221  * the protocol.
222  * NB: This is no longer true for IB, where we do a version
223  * negotiation during the connection setup phase (protocol
224  * version information is included in the RDMA CM private data).
225  */
226 #define RDS_EXTHDR_VERSION	1
227 struct rds_ext_header_version {
228 	__be32			h_version;
229 };
230 
231 /*
232  * This extension header is included in the RDS message
233  * chasing an RDMA operation.
234  */
235 #define RDS_EXTHDR_RDMA		2
236 struct rds_ext_header_rdma {
237 	__be32			h_rdma_rkey;
238 };
239 
240 /*
241  * This extension header tells the peer about the
242  * destination <R_Key,offset> of the requested RDMA
243  * operation.
244  */
245 #define RDS_EXTHDR_RDMA_DEST	3
246 struct rds_ext_header_rdma_dest {
247 	__be32			h_rdma_rkey;
248 	__be32			h_rdma_offset;
249 };
250 
251 /* Extension header announcing number of paths.
252  * Implicit length = 2 bytes.
253  */
254 #define RDS_EXTHDR_NPATHS	5
255 #define RDS_EXTHDR_GEN_NUM	6
256 
257 #define __RDS_EXTHDR_MAX	16 /* for now */
258 #define RDS_RX_MAX_TRACES	(RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
259 #define	RDS_MSG_RX_HDR		0
260 #define	RDS_MSG_RX_START	1
261 #define	RDS_MSG_RX_END		2
262 #define	RDS_MSG_RX_CMSG		3
263 
264 struct rds_incoming {
265 	refcount_t		i_refcount;
266 	struct list_head	i_item;
267 	struct rds_connection	*i_conn;
268 	struct rds_conn_path	*i_conn_path;
269 	struct rds_header	i_hdr;
270 	unsigned long		i_rx_jiffies;
271 	__be32			i_saddr;
272 
273 	rds_rdma_cookie_t	i_rdma_cookie;
274 	struct timeval		i_rx_tstamp;
275 	u64			i_rx_lat_trace[RDS_RX_MAX_TRACES];
276 };
277 
278 struct rds_mr {
279 	struct rb_node		r_rb_node;
280 	refcount_t		r_refcount;
281 	u32			r_key;
282 
283 	/* A copy of the creation flags */
284 	unsigned int		r_use_once:1;
285 	unsigned int		r_invalidate:1;
286 	unsigned int		r_write:1;
287 
288 	/* This is for RDS_MR_DEAD.
289 	 * It would be nice & consistent to make this part of the above
290 	 * bit field here, but we need to use test_and_set_bit.
291 	 */
292 	unsigned long		r_state;
293 	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
294 	struct rds_transport	*r_trans;
295 	void			*r_trans_private;
296 };
297 
298 /* Flags for mr->r_state */
299 #define RDS_MR_DEAD		0
300 
301 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
302 {
303 	return r_key | (((u64) offset) << 32);
304 }
305 
306 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
307 {
308 	return cookie;
309 }
310 
311 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
312 {
313 	return cookie >> 32;
314 }
315 
316 /* atomic operation types */
317 #define RDS_ATOMIC_TYPE_CSWP		0
318 #define RDS_ATOMIC_TYPE_FADD		1
319 
320 /*
321  * m_sock_item and m_conn_item are on lists that are serialized under
322  * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
323  * the message will not be put back on the retransmit list after being sent.
324  * messages that are canceled while being sent rely on this.
325  *
326  * m_inc is used by loopback so that it can pass an incoming message straight
327  * back up into the rx path.  It embeds a wire header which is also used by
328  * the send path, which is kind of awkward.
329  *
330  * m_sock_item indicates the message's presence on a socket's send or receive
331  * queue.  m_rs will point to that socket.
332  *
333  * m_daddr is used by cancellation to prune messages to a given destination.
334  *
335  * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
336  * nesting.  As paths iterate over messages on a sock, or conn, they must
337  * also lock the conn, or sock, to remove the message from those lists too.
338  * Testing the flag to determine if the message is still on the lists lets
339  * us avoid testing the list_head directly.  That means each path can use
340  * the message's list_head to keep it on a local list while juggling locks
341  * without confusing the other path.
342  *
343  * m_ack_seq is an optional field set by transports who need a different
344  * sequence number range to invalidate.  They can use this in a callback
345  * that they pass to rds_send_drop_acked() to see if each message has been
346  * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
347  * had ack_seq set yet.
348  */
349 #define RDS_MSG_ON_SOCK		1
350 #define RDS_MSG_ON_CONN		2
351 #define RDS_MSG_HAS_ACK_SEQ	3
352 #define RDS_MSG_ACK_REQUIRED	4
353 #define RDS_MSG_RETRANSMITTED	5
354 #define RDS_MSG_MAPPED		6
355 #define RDS_MSG_PAGEVEC		7
356 #define RDS_MSG_FLUSH		8
357 
358 struct rds_message {
359 	refcount_t		m_refcount;
360 	struct list_head	m_sock_item;
361 	struct list_head	m_conn_item;
362 	struct rds_incoming	m_inc;
363 	u64			m_ack_seq;
364 	__be32			m_daddr;
365 	unsigned long		m_flags;
366 
367 	/* Never access m_rs without holding m_rs_lock.
368 	 * Lock nesting is
369 	 *  rm->m_rs_lock
370 	 *   -> rs->rs_lock
371 	 */
372 	spinlock_t		m_rs_lock;
373 	wait_queue_head_t	m_flush_wait;
374 
375 	struct rds_sock		*m_rs;
376 
377 	/* cookie to send to remote, in rds header */
378 	rds_rdma_cookie_t	m_rdma_cookie;
379 
380 	unsigned int		m_used_sgs;
381 	unsigned int		m_total_sgs;
382 
383 	void			*m_final_op;
384 
385 	struct {
386 		struct rm_atomic_op {
387 			int			op_type;
388 			union {
389 				struct {
390 					uint64_t	compare;
391 					uint64_t	swap;
392 					uint64_t	compare_mask;
393 					uint64_t	swap_mask;
394 				} op_m_cswp;
395 				struct {
396 					uint64_t	add;
397 					uint64_t	nocarry_mask;
398 				} op_m_fadd;
399 			};
400 
401 			u32			op_rkey;
402 			u64			op_remote_addr;
403 			unsigned int		op_notify:1;
404 			unsigned int		op_recverr:1;
405 			unsigned int		op_mapped:1;
406 			unsigned int		op_silent:1;
407 			unsigned int		op_active:1;
408 			struct scatterlist	*op_sg;
409 			struct rds_notifier	*op_notifier;
410 
411 			struct rds_mr		*op_rdma_mr;
412 		} atomic;
413 		struct rm_rdma_op {
414 			u32			op_rkey;
415 			u64			op_remote_addr;
416 			unsigned int		op_write:1;
417 			unsigned int		op_fence:1;
418 			unsigned int		op_notify:1;
419 			unsigned int		op_recverr:1;
420 			unsigned int		op_mapped:1;
421 			unsigned int		op_silent:1;
422 			unsigned int		op_active:1;
423 			unsigned int		op_bytes;
424 			unsigned int		op_nents;
425 			unsigned int		op_count;
426 			struct scatterlist	*op_sg;
427 			struct rds_notifier	*op_notifier;
428 
429 			struct rds_mr		*op_rdma_mr;
430 		} rdma;
431 		struct rm_data_op {
432 			unsigned int		op_active:1;
433 			unsigned int		op_notify:1;
434 			unsigned int		op_nents;
435 			unsigned int		op_count;
436 			unsigned int		op_dmasg;
437 			unsigned int		op_dmaoff;
438 			struct scatterlist	*op_sg;
439 		} data;
440 	};
441 };
442 
443 /*
444  * The RDS notifier is used (optionally) to tell the application about
445  * completed RDMA operations. Rather than keeping the whole rds message
446  * around on the queue, we allocate a small notifier that is put on the
447  * socket's notifier_list. Notifications are delivered to the application
448  * through control messages.
449  */
450 struct rds_notifier {
451 	struct list_head	n_list;
452 	uint64_t		n_user_token;
453 	int			n_status;
454 };
455 
456 /**
457  * struct rds_transport -  transport specific behavioural hooks
458  *
459  * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
460  *        part of a message.  The caller serializes on the send_sem so this
461  *        doesn't need to be reentrant for a given conn.  The header must be
462  *        sent before the data payload.  .xmit must be prepared to send a
463  *        message with no data payload.  .xmit should return the number of
464  *        bytes that were sent down the connection, including header bytes.
465  *        Returning 0 tells the caller that it doesn't need to perform any
466  *        additional work now.  This is usually the case when the transport has
467  *        filled the sending queue for its connection and will handle
468  *        triggering the rds thread to continue the send when space becomes
469  *        available.  Returning -EAGAIN tells the caller to retry the send
470  *        immediately.  Returning -ENOMEM tells the caller to retry the send at
471  *        some point in the future.
472  *
473  * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
474  *                 it returns the connection can not call rds_recv_incoming().
475  *                 This will only be called once after conn_connect returns
476  *                 non-zero success and will The caller serializes this with
477  *                 the send and connecting paths (xmit_* and conn_*).  The
478  *                 transport is responsible for other serialization, including
479  *                 rds_recv_incoming().  This is called in process context but
480  *                 should try hard not to block.
481  */
482 
483 struct rds_transport {
484 	char			t_name[TRANSNAMSIZ];
485 	struct list_head	t_item;
486 	struct module		*t_owner;
487 	unsigned int		t_prefer_loopback:1,
488 				t_mp_capable:1;
489 	unsigned int		t_type;
490 
491 	int (*laddr_check)(struct net *net, __be32 addr);
492 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
493 	void (*conn_free)(void *data);
494 	int (*conn_path_connect)(struct rds_conn_path *cp);
495 	void (*conn_path_shutdown)(struct rds_conn_path *conn);
496 	void (*xmit_path_prepare)(struct rds_conn_path *cp);
497 	void (*xmit_path_complete)(struct rds_conn_path *cp);
498 	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
499 		    unsigned int hdr_off, unsigned int sg, unsigned int off);
500 	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
501 	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
502 	int (*recv_path)(struct rds_conn_path *cp);
503 	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
504 	void (*inc_free)(struct rds_incoming *inc);
505 
506 	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
507 				 struct rdma_cm_event *event);
508 	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
509 	void (*cm_connect_complete)(struct rds_connection *conn,
510 				    struct rdma_cm_event *event);
511 
512 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
513 					unsigned int avail);
514 	void (*exit)(void);
515 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
516 			struct rds_sock *rs, u32 *key_ret);
517 	void (*sync_mr)(void *trans_private, int direction);
518 	void (*free_mr)(void *trans_private, int invalidate);
519 	void (*flush_mrs)(void);
520 };
521 
522 struct rds_sock {
523 	struct sock		rs_sk;
524 
525 	u64			rs_user_addr;
526 	u64			rs_user_bytes;
527 
528 	/*
529 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
530 	 * support.
531 	 */
532 	struct rhash_head	rs_bound_node;
533 	u64			rs_bound_key;
534 	__be32			rs_bound_addr;
535 	__be32			rs_conn_addr;
536 	__be16			rs_bound_port;
537 	__be16			rs_conn_port;
538 	struct rds_transport    *rs_transport;
539 
540 	/*
541 	 * rds_sendmsg caches the conn it used the last time around.
542 	 * This helps avoid costly lookups.
543 	 */
544 	struct rds_connection	*rs_conn;
545 
546 	/* flag indicating we were congested or not */
547 	int			rs_congested;
548 	/* seen congestion (ENOBUFS) when sending? */
549 	int			rs_seen_congestion;
550 
551 	/* rs_lock protects all these adjacent members before the newline */
552 	spinlock_t		rs_lock;
553 	struct list_head	rs_send_queue;
554 	u32			rs_snd_bytes;
555 	int			rs_rcv_bytes;
556 	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
557 
558 	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
559 	 * to decide whether the application should be woken up.
560 	 * If not set, we use rs_cong_track to find out whether a cong map
561 	 * update arrived.
562 	 */
563 	uint64_t		rs_cong_mask;
564 	uint64_t		rs_cong_notify;
565 	struct list_head	rs_cong_list;
566 	unsigned long		rs_cong_track;
567 
568 	/*
569 	 * rs_recv_lock protects the receive queue, and is
570 	 * used to serialize with rds_release.
571 	 */
572 	rwlock_t		rs_recv_lock;
573 	struct list_head	rs_recv_queue;
574 
575 	/* just for stats reporting */
576 	struct list_head	rs_item;
577 
578 	/* these have their own lock */
579 	spinlock_t		rs_rdma_lock;
580 	struct rb_root		rs_rdma_keys;
581 
582 	/* Socket options - in case there will be more */
583 	unsigned char		rs_recverr,
584 				rs_cong_monitor;
585 	u32			rs_hash_initval;
586 
587 	/* Socket receive path trace points*/
588 	u8			rs_rx_traces;
589 	u8			rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
590 };
591 
592 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
593 {
594 	return container_of(sk, struct rds_sock, rs_sk);
595 }
596 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
597 {
598 	return &rs->rs_sk;
599 }
600 
601 /*
602  * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
603  * to account for overhead.  We don't account for overhead, we just apply
604  * the number of payload bytes to the specified value.
605  */
606 static inline int rds_sk_sndbuf(struct rds_sock *rs)
607 {
608 	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
609 }
610 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
611 {
612 	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
613 }
614 
615 struct rds_statistics {
616 	uint64_t	s_conn_reset;
617 	uint64_t	s_recv_drop_bad_checksum;
618 	uint64_t	s_recv_drop_old_seq;
619 	uint64_t	s_recv_drop_no_sock;
620 	uint64_t	s_recv_drop_dead_sock;
621 	uint64_t	s_recv_deliver_raced;
622 	uint64_t	s_recv_delivered;
623 	uint64_t	s_recv_queued;
624 	uint64_t	s_recv_immediate_retry;
625 	uint64_t	s_recv_delayed_retry;
626 	uint64_t	s_recv_ack_required;
627 	uint64_t	s_recv_rdma_bytes;
628 	uint64_t	s_recv_ping;
629 	uint64_t	s_send_queue_empty;
630 	uint64_t	s_send_queue_full;
631 	uint64_t	s_send_lock_contention;
632 	uint64_t	s_send_lock_queue_raced;
633 	uint64_t	s_send_immediate_retry;
634 	uint64_t	s_send_delayed_retry;
635 	uint64_t	s_send_drop_acked;
636 	uint64_t	s_send_ack_required;
637 	uint64_t	s_send_queued;
638 	uint64_t	s_send_rdma;
639 	uint64_t	s_send_rdma_bytes;
640 	uint64_t	s_send_pong;
641 	uint64_t	s_page_remainder_hit;
642 	uint64_t	s_page_remainder_miss;
643 	uint64_t	s_copy_to_user;
644 	uint64_t	s_copy_from_user;
645 	uint64_t	s_cong_update_queued;
646 	uint64_t	s_cong_update_received;
647 	uint64_t	s_cong_send_error;
648 	uint64_t	s_cong_send_blocked;
649 	uint64_t	s_recv_bytes_added_to_socket;
650 	uint64_t	s_recv_bytes_removed_from_socket;
651 
652 };
653 
654 /* af_rds.c */
655 void rds_sock_addref(struct rds_sock *rs);
656 void rds_sock_put(struct rds_sock *rs);
657 void rds_wake_sk_sleep(struct rds_sock *rs);
658 static inline void __rds_wake_sk_sleep(struct sock *sk)
659 {
660 	wait_queue_head_t *waitq = sk_sleep(sk);
661 
662 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
663 		wake_up(waitq);
664 }
665 extern wait_queue_head_t rds_poll_waitq;
666 
667 
668 /* bind.c */
669 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
670 void rds_remove_bound(struct rds_sock *rs);
671 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
672 int rds_bind_lock_init(void);
673 void rds_bind_lock_destroy(void);
674 
675 /* cong.c */
676 int rds_cong_get_maps(struct rds_connection *conn);
677 void rds_cong_add_conn(struct rds_connection *conn);
678 void rds_cong_remove_conn(struct rds_connection *conn);
679 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
680 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
681 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
682 void rds_cong_queue_updates(struct rds_cong_map *map);
683 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
684 int rds_cong_updated_since(unsigned long *recent);
685 void rds_cong_add_socket(struct rds_sock *);
686 void rds_cong_remove_socket(struct rds_sock *);
687 void rds_cong_exit(void);
688 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
689 
690 /* conn.c */
691 extern u32 rds_gen_num;
692 int rds_conn_init(void);
693 void rds_conn_exit(void);
694 struct rds_connection *rds_conn_create(struct net *net,
695 				       __be32 laddr, __be32 faddr,
696 				       struct rds_transport *trans, gfp_t gfp);
697 struct rds_connection *rds_conn_create_outgoing(struct net *net,
698 						__be32 laddr, __be32 faddr,
699 			       struct rds_transport *trans, gfp_t gfp);
700 void rds_conn_shutdown(struct rds_conn_path *cpath);
701 void rds_conn_destroy(struct rds_connection *conn);
702 void rds_conn_drop(struct rds_connection *conn);
703 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
704 void rds_conn_connect_if_down(struct rds_connection *conn);
705 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
706 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
707 			  struct rds_info_iterator *iter,
708 			  struct rds_info_lengths *lens,
709 			  int (*visitor)(struct rds_connection *, void *),
710 			  size_t item_len);
711 
712 __printf(2, 3)
713 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
714 #define rds_conn_path_error(cp, fmt...) \
715 	__rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
716 
717 static inline int
718 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
719 {
720 	return atomic_cmpxchg(&cp->cp_state, old, new) == old;
721 }
722 
723 static inline int
724 rds_conn_transition(struct rds_connection *conn, int old, int new)
725 {
726 	WARN_ON(conn->c_trans->t_mp_capable);
727 	return rds_conn_path_transition(&conn->c_path[0], old, new);
728 }
729 
730 static inline int
731 rds_conn_path_state(struct rds_conn_path *cp)
732 {
733 	return atomic_read(&cp->cp_state);
734 }
735 
736 static inline int
737 rds_conn_state(struct rds_connection *conn)
738 {
739 	WARN_ON(conn->c_trans->t_mp_capable);
740 	return rds_conn_path_state(&conn->c_path[0]);
741 }
742 
743 static inline int
744 rds_conn_path_up(struct rds_conn_path *cp)
745 {
746 	return atomic_read(&cp->cp_state) == RDS_CONN_UP;
747 }
748 
749 static inline int
750 rds_conn_up(struct rds_connection *conn)
751 {
752 	WARN_ON(conn->c_trans->t_mp_capable);
753 	return rds_conn_path_up(&conn->c_path[0]);
754 }
755 
756 static inline int
757 rds_conn_path_connecting(struct rds_conn_path *cp)
758 {
759 	return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
760 }
761 
762 static inline int
763 rds_conn_connecting(struct rds_connection *conn)
764 {
765 	WARN_ON(conn->c_trans->t_mp_capable);
766 	return rds_conn_path_connecting(&conn->c_path[0]);
767 }
768 
769 /* message.c */
770 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
771 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
772 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
773 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
774 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
775 				 __be16 dport, u64 seq);
776 int rds_message_add_extension(struct rds_header *hdr,
777 			      unsigned int type, const void *data, unsigned int len);
778 int rds_message_next_extension(struct rds_header *hdr,
779 			       unsigned int *pos, void *buf, unsigned int *buflen);
780 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
781 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
782 void rds_message_inc_free(struct rds_incoming *inc);
783 void rds_message_addref(struct rds_message *rm);
784 void rds_message_put(struct rds_message *rm);
785 void rds_message_wait(struct rds_message *rm);
786 void rds_message_unmapped(struct rds_message *rm);
787 
788 static inline void rds_message_make_checksum(struct rds_header *hdr)
789 {
790 	hdr->h_csum = 0;
791 	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
792 }
793 
794 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
795 {
796 	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
797 }
798 
799 
800 /* page.c */
801 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
802 			     gfp_t gfp);
803 void rds_page_exit(void);
804 
805 /* recv.c */
806 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
807 		  __be32 saddr);
808 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
809 		       __be32 saddr);
810 void rds_inc_put(struct rds_incoming *inc);
811 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
812 		       struct rds_incoming *inc, gfp_t gfp);
813 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
814 		int msg_flags);
815 void rds_clear_recv_queue(struct rds_sock *rs);
816 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
817 void rds_inc_info_copy(struct rds_incoming *inc,
818 		       struct rds_info_iterator *iter,
819 		       __be32 saddr, __be32 daddr, int flip);
820 
821 /* send.c */
822 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
823 void rds_send_path_reset(struct rds_conn_path *conn);
824 int rds_send_xmit(struct rds_conn_path *cp);
825 struct sockaddr_in;
826 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
827 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
828 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
829 			 is_acked_func is_acked);
830 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
831 			      is_acked_func is_acked);
832 void rds_send_ping(struct rds_connection *conn, int cp_index);
833 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
834 
835 /* rdma.c */
836 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
837 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
838 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
839 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
840 void rds_rdma_drop_keys(struct rds_sock *rs);
841 int rds_rdma_extra_size(struct rds_rdma_args *args);
842 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
843 			  struct cmsghdr *cmsg);
844 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
845 			  struct cmsghdr *cmsg);
846 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
847 			  struct cmsghdr *cmsg);
848 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
849 			  struct cmsghdr *cmsg);
850 void rds_rdma_free_op(struct rm_rdma_op *ro);
851 void rds_atomic_free_op(struct rm_atomic_op *ao);
852 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
853 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
854 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
855 		    struct cmsghdr *cmsg);
856 
857 void __rds_put_mr_final(struct rds_mr *mr);
858 static inline void rds_mr_put(struct rds_mr *mr)
859 {
860 	if (refcount_dec_and_test(&mr->r_refcount))
861 		__rds_put_mr_final(mr);
862 }
863 
864 /* stats.c */
865 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
866 #define rds_stats_inc_which(which, member) do {		\
867 	per_cpu(which, get_cpu()).member++;		\
868 	put_cpu();					\
869 } while (0)
870 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
871 #define rds_stats_add_which(which, member, count) do {		\
872 	per_cpu(which, get_cpu()).member += count;	\
873 	put_cpu();					\
874 } while (0)
875 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
876 int rds_stats_init(void);
877 void rds_stats_exit(void);
878 void rds_stats_info_copy(struct rds_info_iterator *iter,
879 			 uint64_t *values, const char *const *names,
880 			 size_t nr);
881 
882 /* sysctl.c */
883 int rds_sysctl_init(void);
884 void rds_sysctl_exit(void);
885 extern unsigned long rds_sysctl_sndbuf_min;
886 extern unsigned long rds_sysctl_sndbuf_default;
887 extern unsigned long rds_sysctl_sndbuf_max;
888 extern unsigned long rds_sysctl_reconnect_min_jiffies;
889 extern unsigned long rds_sysctl_reconnect_max_jiffies;
890 extern unsigned int  rds_sysctl_max_unacked_packets;
891 extern unsigned int  rds_sysctl_max_unacked_bytes;
892 extern unsigned int  rds_sysctl_ping_enable;
893 extern unsigned long rds_sysctl_trace_flags;
894 extern unsigned int  rds_sysctl_trace_level;
895 
896 /* threads.c */
897 int rds_threads_init(void);
898 void rds_threads_exit(void);
899 extern struct workqueue_struct *rds_wq;
900 void rds_queue_reconnect(struct rds_conn_path *cp);
901 void rds_connect_worker(struct work_struct *);
902 void rds_shutdown_worker(struct work_struct *);
903 void rds_send_worker(struct work_struct *);
904 void rds_recv_worker(struct work_struct *);
905 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
906 void rds_connect_complete(struct rds_connection *conn);
907 
908 /* transport.c */
909 void rds_trans_register(struct rds_transport *trans);
910 void rds_trans_unregister(struct rds_transport *trans);
911 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
912 void rds_trans_put(struct rds_transport *trans);
913 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
914 				       unsigned int avail);
915 struct rds_transport *rds_trans_get(int t_type);
916 int rds_trans_init(void);
917 void rds_trans_exit(void);
918 
919 #endif
920