xref: /openbmc/linux/net/rds/rds.h (revision d774a589)
1 #ifndef _RDS_RDS_H
2 #define _RDS_RDS_H
3 
4 #include <net/sock.h>
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
9 #include <linux/rds.h>
10 #include <linux/rhashtable.h>
11 
12 #include "info.h"
13 
14 /*
15  * RDS Network protocol version
16  */
17 #define RDS_PROTOCOL_3_0	0x0300
18 #define RDS_PROTOCOL_3_1	0x0301
19 #define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
20 #define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
21 #define RDS_PROTOCOL_MINOR(v)	((v) & 255)
22 #define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
23 
24 /*
25  * XXX randomly chosen, but at least seems to be unused:
26  * #               18464-18768 Unassigned
27  * We should do better.  We want a reserved port to discourage unpriv'ed
28  * userspace from listening.
29  */
30 #define RDS_PORT	18634
31 
32 #ifdef ATOMIC64_INIT
33 #define KERNEL_HAS_ATOMIC64
34 #endif
35 
36 #ifdef RDS_DEBUG
37 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38 #else
39 /* sigh, pr_debug() causes unused variable warnings */
40 static inline __printf(1, 2)
41 void rdsdebug(char *fmt, ...)
42 {
43 }
44 #endif
45 
46 /* XXX is there one of these somewhere? */
47 #define ceil(x, y) \
48 	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49 
50 #define RDS_FRAG_SHIFT	12
51 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
52 
53 #define RDS_CONG_MAP_BYTES	(65536 / 8)
54 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
55 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
56 
57 struct rds_cong_map {
58 	struct rb_node		m_rb_node;
59 	__be32			m_addr;
60 	wait_queue_head_t	m_waitq;
61 	struct list_head	m_conn_list;
62 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
63 };
64 
65 
66 /*
67  * This is how we will track the connection state:
68  * A connection is always in one of the following
69  * states. Updates to the state are atomic and imply
70  * a memory barrier.
71  */
72 enum {
73 	RDS_CONN_DOWN = 0,
74 	RDS_CONN_CONNECTING,
75 	RDS_CONN_DISCONNECTING,
76 	RDS_CONN_UP,
77 	RDS_CONN_RESETTING,
78 	RDS_CONN_ERROR,
79 };
80 
81 /* Bits for c_flags */
82 #define RDS_LL_SEND_FULL	0
83 #define RDS_RECONNECT_PENDING	1
84 #define RDS_IN_XMIT		2
85 #define RDS_RECV_REFILL		3
86 
87 /* Max number of multipaths per RDS connection. Must be a power of 2 */
88 #define	RDS_MPATH_WORKERS	8
89 #define	RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
90 			       (rs)->rs_hash_initval) & ((n) - 1))
91 
92 /* Per mpath connection state */
93 struct rds_conn_path {
94 	struct rds_connection	*cp_conn;
95 	struct rds_message	*cp_xmit_rm;
96 	unsigned long		cp_xmit_sg;
97 	unsigned int		cp_xmit_hdr_off;
98 	unsigned int		cp_xmit_data_off;
99 	unsigned int		cp_xmit_atomic_sent;
100 	unsigned int		cp_xmit_rdma_sent;
101 	unsigned int		cp_xmit_data_sent;
102 
103 	spinlock_t		cp_lock;		/* protect msg queues */
104 	u64			cp_next_tx_seq;
105 	struct list_head	cp_send_queue;
106 	struct list_head	cp_retrans;
107 
108 	u64			cp_next_rx_seq;
109 
110 	void			*cp_transport_data;
111 
112 	atomic_t		cp_state;
113 	unsigned long		cp_send_gen;
114 	unsigned long		cp_flags;
115 	unsigned long		cp_reconnect_jiffies;
116 	struct delayed_work	cp_send_w;
117 	struct delayed_work	cp_recv_w;
118 	struct delayed_work	cp_conn_w;
119 	struct work_struct	cp_down_w;
120 	struct mutex		cp_cm_lock;	/* protect cp_state & cm */
121 	wait_queue_head_t	cp_waitq;
122 
123 	unsigned int		cp_unacked_packets;
124 	unsigned int		cp_unacked_bytes;
125 	unsigned int		cp_outgoing:1,
126 				cp_pad_to_32:31;
127 	unsigned int		cp_index;
128 };
129 
130 /* One rds_connection per RDS address pair */
131 struct rds_connection {
132 	struct hlist_node	c_hash_node;
133 	__be32			c_laddr;
134 	__be32			c_faddr;
135 	unsigned int		c_loopback:1,
136 				c_ping_triggered:1,
137 				c_pad_to_32:30;
138 	int			c_npaths;
139 	struct rds_connection	*c_passive;
140 	struct rds_transport	*c_trans;
141 
142 	struct rds_cong_map	*c_lcong;
143 	struct rds_cong_map	*c_fcong;
144 
145 	/* Protocol version */
146 	unsigned int		c_version;
147 	possible_net_t		c_net;
148 
149 	struct list_head	c_map_item;
150 	unsigned long		c_map_queued;
151 
152 	struct rds_conn_path	c_path[RDS_MPATH_WORKERS];
153 	wait_queue_head_t	c_hs_waitq; /* handshake waitq */
154 
155 	u32			c_my_gen_num;
156 	u32			c_peer_gen_num;
157 };
158 
159 static inline
160 struct net *rds_conn_net(struct rds_connection *conn)
161 {
162 	return read_pnet(&conn->c_net);
163 }
164 
165 static inline
166 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
167 {
168 	write_pnet(&conn->c_net, net);
169 }
170 
171 #define RDS_FLAG_CONG_BITMAP	0x01
172 #define RDS_FLAG_ACK_REQUIRED	0x02
173 #define RDS_FLAG_RETRANSMITTED	0x04
174 #define RDS_MAX_ADV_CREDIT	255
175 
176 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
177  * probe to exchange control information before establishing a connection.
178  * Currently the control information that is exchanged is the number of
179  * supported paths. If the peer is a legacy (older kernel revision) peer,
180  * it would return a pong message without additional control information
181  * that would then alert the sender that the peer was an older rev.
182  */
183 #define RDS_FLAG_PROBE_PORT	1
184 #define	RDS_HS_PROBE(sport, dport) \
185 		((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
186 		 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
187 /*
188  * Maximum space available for extension headers.
189  */
190 #define RDS_HEADER_EXT_SPACE	16
191 
192 struct rds_header {
193 	__be64	h_sequence;
194 	__be64	h_ack;
195 	__be32	h_len;
196 	__be16	h_sport;
197 	__be16	h_dport;
198 	u8	h_flags;
199 	u8	h_credit;
200 	u8	h_padding[4];
201 	__sum16	h_csum;
202 
203 	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
204 };
205 
206 /*
207  * Reserved - indicates end of extensions
208  */
209 #define RDS_EXTHDR_NONE		0
210 
211 /*
212  * This extension header is included in the very
213  * first message that is sent on a new connection,
214  * and identifies the protocol level. This will help
215  * rolling updates if a future change requires breaking
216  * the protocol.
217  * NB: This is no longer true for IB, where we do a version
218  * negotiation during the connection setup phase (protocol
219  * version information is included in the RDMA CM private data).
220  */
221 #define RDS_EXTHDR_VERSION	1
222 struct rds_ext_header_version {
223 	__be32			h_version;
224 };
225 
226 /*
227  * This extension header is included in the RDS message
228  * chasing an RDMA operation.
229  */
230 #define RDS_EXTHDR_RDMA		2
231 struct rds_ext_header_rdma {
232 	__be32			h_rdma_rkey;
233 };
234 
235 /*
236  * This extension header tells the peer about the
237  * destination <R_Key,offset> of the requested RDMA
238  * operation.
239  */
240 #define RDS_EXTHDR_RDMA_DEST	3
241 struct rds_ext_header_rdma_dest {
242 	__be32			h_rdma_rkey;
243 	__be32			h_rdma_offset;
244 };
245 
246 /* Extension header announcing number of paths.
247  * Implicit length = 2 bytes.
248  */
249 #define RDS_EXTHDR_NPATHS	5
250 #define RDS_EXTHDR_GEN_NUM	6
251 
252 #define __RDS_EXTHDR_MAX	16 /* for now */
253 
254 struct rds_incoming {
255 	atomic_t		i_refcount;
256 	struct list_head	i_item;
257 	struct rds_connection	*i_conn;
258 	struct rds_conn_path	*i_conn_path;
259 	struct rds_header	i_hdr;
260 	unsigned long		i_rx_jiffies;
261 	__be32			i_saddr;
262 
263 	rds_rdma_cookie_t	i_rdma_cookie;
264 	struct timeval		i_rx_tstamp;
265 };
266 
267 struct rds_mr {
268 	struct rb_node		r_rb_node;
269 	atomic_t		r_refcount;
270 	u32			r_key;
271 
272 	/* A copy of the creation flags */
273 	unsigned int		r_use_once:1;
274 	unsigned int		r_invalidate:1;
275 	unsigned int		r_write:1;
276 
277 	/* This is for RDS_MR_DEAD.
278 	 * It would be nice & consistent to make this part of the above
279 	 * bit field here, but we need to use test_and_set_bit.
280 	 */
281 	unsigned long		r_state;
282 	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
283 	struct rds_transport	*r_trans;
284 	void			*r_trans_private;
285 };
286 
287 /* Flags for mr->r_state */
288 #define RDS_MR_DEAD		0
289 
290 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
291 {
292 	return r_key | (((u64) offset) << 32);
293 }
294 
295 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
296 {
297 	return cookie;
298 }
299 
300 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
301 {
302 	return cookie >> 32;
303 }
304 
305 /* atomic operation types */
306 #define RDS_ATOMIC_TYPE_CSWP		0
307 #define RDS_ATOMIC_TYPE_FADD		1
308 
309 /*
310  * m_sock_item and m_conn_item are on lists that are serialized under
311  * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
312  * the message will not be put back on the retransmit list after being sent.
313  * messages that are canceled while being sent rely on this.
314  *
315  * m_inc is used by loopback so that it can pass an incoming message straight
316  * back up into the rx path.  It embeds a wire header which is also used by
317  * the send path, which is kind of awkward.
318  *
319  * m_sock_item indicates the message's presence on a socket's send or receive
320  * queue.  m_rs will point to that socket.
321  *
322  * m_daddr is used by cancellation to prune messages to a given destination.
323  *
324  * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
325  * nesting.  As paths iterate over messages on a sock, or conn, they must
326  * also lock the conn, or sock, to remove the message from those lists too.
327  * Testing the flag to determine if the message is still on the lists lets
328  * us avoid testing the list_head directly.  That means each path can use
329  * the message's list_head to keep it on a local list while juggling locks
330  * without confusing the other path.
331  *
332  * m_ack_seq is an optional field set by transports who need a different
333  * sequence number range to invalidate.  They can use this in a callback
334  * that they pass to rds_send_drop_acked() to see if each message has been
335  * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
336  * had ack_seq set yet.
337  */
338 #define RDS_MSG_ON_SOCK		1
339 #define RDS_MSG_ON_CONN		2
340 #define RDS_MSG_HAS_ACK_SEQ	3
341 #define RDS_MSG_ACK_REQUIRED	4
342 #define RDS_MSG_RETRANSMITTED	5
343 #define RDS_MSG_MAPPED		6
344 #define RDS_MSG_PAGEVEC		7
345 #define RDS_MSG_FLUSH		8
346 
347 struct rds_message {
348 	atomic_t		m_refcount;
349 	struct list_head	m_sock_item;
350 	struct list_head	m_conn_item;
351 	struct rds_incoming	m_inc;
352 	u64			m_ack_seq;
353 	__be32			m_daddr;
354 	unsigned long		m_flags;
355 
356 	/* Never access m_rs without holding m_rs_lock.
357 	 * Lock nesting is
358 	 *  rm->m_rs_lock
359 	 *   -> rs->rs_lock
360 	 */
361 	spinlock_t		m_rs_lock;
362 	wait_queue_head_t	m_flush_wait;
363 
364 	struct rds_sock		*m_rs;
365 
366 	/* cookie to send to remote, in rds header */
367 	rds_rdma_cookie_t	m_rdma_cookie;
368 
369 	unsigned int		m_used_sgs;
370 	unsigned int		m_total_sgs;
371 
372 	void			*m_final_op;
373 
374 	struct {
375 		struct rm_atomic_op {
376 			int			op_type;
377 			union {
378 				struct {
379 					uint64_t	compare;
380 					uint64_t	swap;
381 					uint64_t	compare_mask;
382 					uint64_t	swap_mask;
383 				} op_m_cswp;
384 				struct {
385 					uint64_t	add;
386 					uint64_t	nocarry_mask;
387 				} op_m_fadd;
388 			};
389 
390 			u32			op_rkey;
391 			u64			op_remote_addr;
392 			unsigned int		op_notify:1;
393 			unsigned int		op_recverr:1;
394 			unsigned int		op_mapped:1;
395 			unsigned int		op_silent:1;
396 			unsigned int		op_active:1;
397 			struct scatterlist	*op_sg;
398 			struct rds_notifier	*op_notifier;
399 
400 			struct rds_mr		*op_rdma_mr;
401 		} atomic;
402 		struct rm_rdma_op {
403 			u32			op_rkey;
404 			u64			op_remote_addr;
405 			unsigned int		op_write:1;
406 			unsigned int		op_fence:1;
407 			unsigned int		op_notify:1;
408 			unsigned int		op_recverr:1;
409 			unsigned int		op_mapped:1;
410 			unsigned int		op_silent:1;
411 			unsigned int		op_active:1;
412 			unsigned int		op_bytes;
413 			unsigned int		op_nents;
414 			unsigned int		op_count;
415 			struct scatterlist	*op_sg;
416 			struct rds_notifier	*op_notifier;
417 
418 			struct rds_mr		*op_rdma_mr;
419 		} rdma;
420 		struct rm_data_op {
421 			unsigned int		op_active:1;
422 			unsigned int		op_nents;
423 			unsigned int		op_count;
424 			unsigned int		op_dmasg;
425 			unsigned int		op_dmaoff;
426 			struct scatterlist	*op_sg;
427 		} data;
428 	};
429 };
430 
431 /*
432  * The RDS notifier is used (optionally) to tell the application about
433  * completed RDMA operations. Rather than keeping the whole rds message
434  * around on the queue, we allocate a small notifier that is put on the
435  * socket's notifier_list. Notifications are delivered to the application
436  * through control messages.
437  */
438 struct rds_notifier {
439 	struct list_head	n_list;
440 	uint64_t		n_user_token;
441 	int			n_status;
442 };
443 
444 /**
445  * struct rds_transport -  transport specific behavioural hooks
446  *
447  * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
448  *        part of a message.  The caller serializes on the send_sem so this
449  *        doesn't need to be reentrant for a given conn.  The header must be
450  *        sent before the data payload.  .xmit must be prepared to send a
451  *        message with no data payload.  .xmit should return the number of
452  *        bytes that were sent down the connection, including header bytes.
453  *        Returning 0 tells the caller that it doesn't need to perform any
454  *        additional work now.  This is usually the case when the transport has
455  *        filled the sending queue for its connection and will handle
456  *        triggering the rds thread to continue the send when space becomes
457  *        available.  Returning -EAGAIN tells the caller to retry the send
458  *        immediately.  Returning -ENOMEM tells the caller to retry the send at
459  *        some point in the future.
460  *
461  * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
462  *                 it returns the connection can not call rds_recv_incoming().
463  *                 This will only be called once after conn_connect returns
464  *                 non-zero success and will The caller serializes this with
465  *                 the send and connecting paths (xmit_* and conn_*).  The
466  *                 transport is responsible for other serialization, including
467  *                 rds_recv_incoming().  This is called in process context but
468  *                 should try hard not to block.
469  */
470 
471 struct rds_transport {
472 	char			t_name[TRANSNAMSIZ];
473 	struct list_head	t_item;
474 	struct module		*t_owner;
475 	unsigned int		t_prefer_loopback:1,
476 				t_mp_capable:1;
477 	unsigned int		t_type;
478 
479 	int (*laddr_check)(struct net *net, __be32 addr);
480 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
481 	void (*conn_free)(void *data);
482 	int (*conn_path_connect)(struct rds_conn_path *cp);
483 	void (*conn_path_shutdown)(struct rds_conn_path *conn);
484 	void (*xmit_path_prepare)(struct rds_conn_path *cp);
485 	void (*xmit_path_complete)(struct rds_conn_path *cp);
486 	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
487 		    unsigned int hdr_off, unsigned int sg, unsigned int off);
488 	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
489 	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
490 	int (*recv_path)(struct rds_conn_path *cp);
491 	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
492 	void (*inc_free)(struct rds_incoming *inc);
493 
494 	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
495 				 struct rdma_cm_event *event);
496 	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
497 	void (*cm_connect_complete)(struct rds_connection *conn,
498 				    struct rdma_cm_event *event);
499 
500 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
501 					unsigned int avail);
502 	void (*exit)(void);
503 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
504 			struct rds_sock *rs, u32 *key_ret);
505 	void (*sync_mr)(void *trans_private, int direction);
506 	void (*free_mr)(void *trans_private, int invalidate);
507 	void (*flush_mrs)(void);
508 };
509 
510 struct rds_sock {
511 	struct sock		rs_sk;
512 
513 	u64			rs_user_addr;
514 	u64			rs_user_bytes;
515 
516 	/*
517 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
518 	 * support.
519 	 */
520 	struct rhash_head	rs_bound_node;
521 	u64			rs_bound_key;
522 	__be32			rs_bound_addr;
523 	__be32			rs_conn_addr;
524 	__be16			rs_bound_port;
525 	__be16			rs_conn_port;
526 	struct rds_transport    *rs_transport;
527 
528 	/*
529 	 * rds_sendmsg caches the conn it used the last time around.
530 	 * This helps avoid costly lookups.
531 	 */
532 	struct rds_connection	*rs_conn;
533 
534 	/* flag indicating we were congested or not */
535 	int			rs_congested;
536 	/* seen congestion (ENOBUFS) when sending? */
537 	int			rs_seen_congestion;
538 
539 	/* rs_lock protects all these adjacent members before the newline */
540 	spinlock_t		rs_lock;
541 	struct list_head	rs_send_queue;
542 	u32			rs_snd_bytes;
543 	int			rs_rcv_bytes;
544 	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
545 
546 	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
547 	 * to decide whether the application should be woken up.
548 	 * If not set, we use rs_cong_track to find out whether a cong map
549 	 * update arrived.
550 	 */
551 	uint64_t		rs_cong_mask;
552 	uint64_t		rs_cong_notify;
553 	struct list_head	rs_cong_list;
554 	unsigned long		rs_cong_track;
555 
556 	/*
557 	 * rs_recv_lock protects the receive queue, and is
558 	 * used to serialize with rds_release.
559 	 */
560 	rwlock_t		rs_recv_lock;
561 	struct list_head	rs_recv_queue;
562 
563 	/* just for stats reporting */
564 	struct list_head	rs_item;
565 
566 	/* these have their own lock */
567 	spinlock_t		rs_rdma_lock;
568 	struct rb_root		rs_rdma_keys;
569 
570 	/* Socket options - in case there will be more */
571 	unsigned char		rs_recverr,
572 				rs_cong_monitor;
573 	u32			rs_hash_initval;
574 };
575 
576 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
577 {
578 	return container_of(sk, struct rds_sock, rs_sk);
579 }
580 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
581 {
582 	return &rs->rs_sk;
583 }
584 
585 /*
586  * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
587  * to account for overhead.  We don't account for overhead, we just apply
588  * the number of payload bytes to the specified value.
589  */
590 static inline int rds_sk_sndbuf(struct rds_sock *rs)
591 {
592 	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
593 }
594 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
595 {
596 	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
597 }
598 
599 struct rds_statistics {
600 	uint64_t	s_conn_reset;
601 	uint64_t	s_recv_drop_bad_checksum;
602 	uint64_t	s_recv_drop_old_seq;
603 	uint64_t	s_recv_drop_no_sock;
604 	uint64_t	s_recv_drop_dead_sock;
605 	uint64_t	s_recv_deliver_raced;
606 	uint64_t	s_recv_delivered;
607 	uint64_t	s_recv_queued;
608 	uint64_t	s_recv_immediate_retry;
609 	uint64_t	s_recv_delayed_retry;
610 	uint64_t	s_recv_ack_required;
611 	uint64_t	s_recv_rdma_bytes;
612 	uint64_t	s_recv_ping;
613 	uint64_t	s_send_queue_empty;
614 	uint64_t	s_send_queue_full;
615 	uint64_t	s_send_lock_contention;
616 	uint64_t	s_send_lock_queue_raced;
617 	uint64_t	s_send_immediate_retry;
618 	uint64_t	s_send_delayed_retry;
619 	uint64_t	s_send_drop_acked;
620 	uint64_t	s_send_ack_required;
621 	uint64_t	s_send_queued;
622 	uint64_t	s_send_rdma;
623 	uint64_t	s_send_rdma_bytes;
624 	uint64_t	s_send_pong;
625 	uint64_t	s_page_remainder_hit;
626 	uint64_t	s_page_remainder_miss;
627 	uint64_t	s_copy_to_user;
628 	uint64_t	s_copy_from_user;
629 	uint64_t	s_cong_update_queued;
630 	uint64_t	s_cong_update_received;
631 	uint64_t	s_cong_send_error;
632 	uint64_t	s_cong_send_blocked;
633 };
634 
635 /* af_rds.c */
636 void rds_sock_addref(struct rds_sock *rs);
637 void rds_sock_put(struct rds_sock *rs);
638 void rds_wake_sk_sleep(struct rds_sock *rs);
639 static inline void __rds_wake_sk_sleep(struct sock *sk)
640 {
641 	wait_queue_head_t *waitq = sk_sleep(sk);
642 
643 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
644 		wake_up(waitq);
645 }
646 extern wait_queue_head_t rds_poll_waitq;
647 
648 
649 /* bind.c */
650 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
651 void rds_remove_bound(struct rds_sock *rs);
652 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
653 int rds_bind_lock_init(void);
654 void rds_bind_lock_destroy(void);
655 
656 /* cong.c */
657 int rds_cong_get_maps(struct rds_connection *conn);
658 void rds_cong_add_conn(struct rds_connection *conn);
659 void rds_cong_remove_conn(struct rds_connection *conn);
660 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
661 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
662 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
663 void rds_cong_queue_updates(struct rds_cong_map *map);
664 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
665 int rds_cong_updated_since(unsigned long *recent);
666 void rds_cong_add_socket(struct rds_sock *);
667 void rds_cong_remove_socket(struct rds_sock *);
668 void rds_cong_exit(void);
669 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
670 
671 /* conn.c */
672 extern u32 rds_gen_num;
673 int rds_conn_init(void);
674 void rds_conn_exit(void);
675 struct rds_connection *rds_conn_create(struct net *net,
676 				       __be32 laddr, __be32 faddr,
677 				       struct rds_transport *trans, gfp_t gfp);
678 struct rds_connection *rds_conn_create_outgoing(struct net *net,
679 						__be32 laddr, __be32 faddr,
680 			       struct rds_transport *trans, gfp_t gfp);
681 void rds_conn_shutdown(struct rds_conn_path *cpath);
682 void rds_conn_destroy(struct rds_connection *conn);
683 void rds_conn_drop(struct rds_connection *conn);
684 void rds_conn_path_drop(struct rds_conn_path *cpath);
685 void rds_conn_connect_if_down(struct rds_connection *conn);
686 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
687 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
688 			  struct rds_info_iterator *iter,
689 			  struct rds_info_lengths *lens,
690 			  int (*visitor)(struct rds_connection *, void *),
691 			  size_t item_len);
692 
693 __printf(2, 3)
694 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
695 #define rds_conn_path_error(cp, fmt...) \
696 	__rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
697 
698 static inline int
699 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
700 {
701 	return atomic_cmpxchg(&cp->cp_state, old, new) == old;
702 }
703 
704 static inline int
705 rds_conn_transition(struct rds_connection *conn, int old, int new)
706 {
707 	WARN_ON(conn->c_trans->t_mp_capable);
708 	return rds_conn_path_transition(&conn->c_path[0], old, new);
709 }
710 
711 static inline int
712 rds_conn_path_state(struct rds_conn_path *cp)
713 {
714 	return atomic_read(&cp->cp_state);
715 }
716 
717 static inline int
718 rds_conn_state(struct rds_connection *conn)
719 {
720 	WARN_ON(conn->c_trans->t_mp_capable);
721 	return rds_conn_path_state(&conn->c_path[0]);
722 }
723 
724 static inline int
725 rds_conn_path_up(struct rds_conn_path *cp)
726 {
727 	return atomic_read(&cp->cp_state) == RDS_CONN_UP;
728 }
729 
730 static inline int
731 rds_conn_up(struct rds_connection *conn)
732 {
733 	WARN_ON(conn->c_trans->t_mp_capable);
734 	return rds_conn_path_up(&conn->c_path[0]);
735 }
736 
737 static inline int
738 rds_conn_path_connecting(struct rds_conn_path *cp)
739 {
740 	return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
741 }
742 
743 static inline int
744 rds_conn_connecting(struct rds_connection *conn)
745 {
746 	WARN_ON(conn->c_trans->t_mp_capable);
747 	return rds_conn_path_connecting(&conn->c_path[0]);
748 }
749 
750 /* message.c */
751 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
752 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
753 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
754 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
755 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
756 				 __be16 dport, u64 seq);
757 int rds_message_add_extension(struct rds_header *hdr,
758 			      unsigned int type, const void *data, unsigned int len);
759 int rds_message_next_extension(struct rds_header *hdr,
760 			       unsigned int *pos, void *buf, unsigned int *buflen);
761 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
762 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
763 void rds_message_inc_free(struct rds_incoming *inc);
764 void rds_message_addref(struct rds_message *rm);
765 void rds_message_put(struct rds_message *rm);
766 void rds_message_wait(struct rds_message *rm);
767 void rds_message_unmapped(struct rds_message *rm);
768 
769 static inline void rds_message_make_checksum(struct rds_header *hdr)
770 {
771 	hdr->h_csum = 0;
772 	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
773 }
774 
775 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
776 {
777 	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
778 }
779 
780 
781 /* page.c */
782 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
783 			     gfp_t gfp);
784 int rds_page_copy_user(struct page *page, unsigned long offset,
785 		       void __user *ptr, unsigned long bytes,
786 		       int to_user);
787 #define rds_page_copy_to_user(page, offset, ptr, bytes) \
788 	rds_page_copy_user(page, offset, ptr, bytes, 1)
789 #define rds_page_copy_from_user(page, offset, ptr, bytes) \
790 	rds_page_copy_user(page, offset, ptr, bytes, 0)
791 void rds_page_exit(void);
792 
793 /* recv.c */
794 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
795 		  __be32 saddr);
796 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
797 		       __be32 saddr);
798 void rds_inc_put(struct rds_incoming *inc);
799 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
800 		       struct rds_incoming *inc, gfp_t gfp);
801 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
802 		int msg_flags);
803 void rds_clear_recv_queue(struct rds_sock *rs);
804 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
805 void rds_inc_info_copy(struct rds_incoming *inc,
806 		       struct rds_info_iterator *iter,
807 		       __be32 saddr, __be32 daddr, int flip);
808 
809 /* send.c */
810 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
811 void rds_send_path_reset(struct rds_conn_path *conn);
812 int rds_send_xmit(struct rds_conn_path *cp);
813 struct sockaddr_in;
814 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
815 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
816 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
817 			 is_acked_func is_acked);
818 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
819 			      is_acked_func is_acked);
820 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
821 
822 /* rdma.c */
823 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
824 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
825 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
826 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
827 void rds_rdma_drop_keys(struct rds_sock *rs);
828 int rds_rdma_extra_size(struct rds_rdma_args *args);
829 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
830 			  struct cmsghdr *cmsg);
831 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
832 			  struct cmsghdr *cmsg);
833 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
834 			  struct cmsghdr *cmsg);
835 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
836 			  struct cmsghdr *cmsg);
837 void rds_rdma_free_op(struct rm_rdma_op *ro);
838 void rds_atomic_free_op(struct rm_atomic_op *ao);
839 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
840 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
841 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
842 		    struct cmsghdr *cmsg);
843 
844 void __rds_put_mr_final(struct rds_mr *mr);
845 static inline void rds_mr_put(struct rds_mr *mr)
846 {
847 	if (atomic_dec_and_test(&mr->r_refcount))
848 		__rds_put_mr_final(mr);
849 }
850 
851 /* stats.c */
852 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
853 #define rds_stats_inc_which(which, member) do {		\
854 	per_cpu(which, get_cpu()).member++;		\
855 	put_cpu();					\
856 } while (0)
857 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
858 #define rds_stats_add_which(which, member, count) do {		\
859 	per_cpu(which, get_cpu()).member += count;	\
860 	put_cpu();					\
861 } while (0)
862 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
863 int rds_stats_init(void);
864 void rds_stats_exit(void);
865 void rds_stats_info_copy(struct rds_info_iterator *iter,
866 			 uint64_t *values, const char *const *names,
867 			 size_t nr);
868 
869 /* sysctl.c */
870 int rds_sysctl_init(void);
871 void rds_sysctl_exit(void);
872 extern unsigned long rds_sysctl_sndbuf_min;
873 extern unsigned long rds_sysctl_sndbuf_default;
874 extern unsigned long rds_sysctl_sndbuf_max;
875 extern unsigned long rds_sysctl_reconnect_min_jiffies;
876 extern unsigned long rds_sysctl_reconnect_max_jiffies;
877 extern unsigned int  rds_sysctl_max_unacked_packets;
878 extern unsigned int  rds_sysctl_max_unacked_bytes;
879 extern unsigned int  rds_sysctl_ping_enable;
880 extern unsigned long rds_sysctl_trace_flags;
881 extern unsigned int  rds_sysctl_trace_level;
882 
883 /* threads.c */
884 int rds_threads_init(void);
885 void rds_threads_exit(void);
886 extern struct workqueue_struct *rds_wq;
887 void rds_queue_reconnect(struct rds_conn_path *cp);
888 void rds_connect_worker(struct work_struct *);
889 void rds_shutdown_worker(struct work_struct *);
890 void rds_send_worker(struct work_struct *);
891 void rds_recv_worker(struct work_struct *);
892 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
893 void rds_connect_complete(struct rds_connection *conn);
894 
895 /* transport.c */
896 int rds_trans_register(struct rds_transport *trans);
897 void rds_trans_unregister(struct rds_transport *trans);
898 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
899 void rds_trans_put(struct rds_transport *trans);
900 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
901 				       unsigned int avail);
902 struct rds_transport *rds_trans_get(int t_type);
903 int rds_trans_init(void);
904 void rds_trans_exit(void);
905 
906 #endif
907