xref: /openbmc/linux/net/rds/rds.h (revision f80d2f08)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _RDS_RDS_H
3 #define _RDS_RDS_H
4 
5 #include <net/sock.h>
6 #include <linux/scatterlist.h>
7 #include <linux/highmem.h>
8 #include <rdma/rdma_cm.h>
9 #include <linux/mutex.h>
10 #include <linux/rds.h>
11 #include <linux/rhashtable.h>
12 #include <linux/refcount.h>
13 #include <linux/in6.h>
14 
15 #include "info.h"
16 
17 /*
18  * RDS Network protocol version
19  */
20 #define RDS_PROTOCOL_3_0	0x0300
21 #define RDS_PROTOCOL_3_1	0x0301
22 #define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
23 #define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
24 #define RDS_PROTOCOL_MINOR(v)	((v) & 255)
25 #define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
26 
27 /* The following ports, 16385, 18634, 18635, are registered with IANA as
28  * the ports to be used for RDS over TCP and UDP.  Currently, only RDS over
29  * TCP and RDS over IB/RDMA are implemented.  18634 is the historical value
30  * used for the RDMA_CM listener port.  RDS/TCP uses port 16385.  After
31  * IPv6 work, RDMA_CM also uses 16385 as the listener port.  18634 is kept
32  * to ensure compatibility with older RDS modules.  Those ports are defined
33  * in each transport's header file.
34  */
35 #define RDS_PORT	18634
36 
37 #ifdef ATOMIC64_INIT
38 #define KERNEL_HAS_ATOMIC64
39 #endif
40 
41 #ifdef RDS_DEBUG
42 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
43 #else
44 /* sigh, pr_debug() causes unused variable warnings */
45 static inline __printf(1, 2)
46 void rdsdebug(char *fmt, ...)
47 {
48 }
49 #endif
50 
51 /* XXX is there one of these somewhere? */
52 #define ceil(x, y) \
53 	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
54 
55 #define RDS_FRAG_SHIFT	12
56 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
57 
58 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
59 #define RDS_MAX_MSG_SIZE	((unsigned int)(1 << 20))
60 
61 #define RDS_CONG_MAP_BYTES	(65536 / 8)
62 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
63 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
64 
65 struct rds_cong_map {
66 	struct rb_node		m_rb_node;
67 	struct in6_addr		m_addr;
68 	wait_queue_head_t	m_waitq;
69 	struct list_head	m_conn_list;
70 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
71 };
72 
73 
74 /*
75  * This is how we will track the connection state:
76  * A connection is always in one of the following
77  * states. Updates to the state are atomic and imply
78  * a memory barrier.
79  */
80 enum {
81 	RDS_CONN_DOWN = 0,
82 	RDS_CONN_CONNECTING,
83 	RDS_CONN_DISCONNECTING,
84 	RDS_CONN_UP,
85 	RDS_CONN_RESETTING,
86 	RDS_CONN_ERROR,
87 };
88 
89 /* Bits for c_flags */
90 #define RDS_LL_SEND_FULL	0
91 #define RDS_RECONNECT_PENDING	1
92 #define RDS_IN_XMIT		2
93 #define RDS_RECV_REFILL		3
94 #define	RDS_DESTROY_PENDING	4
95 
96 /* Max number of multipaths per RDS connection. Must be a power of 2 */
97 #define	RDS_MPATH_WORKERS	8
98 #define	RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
99 			       (rs)->rs_hash_initval) & ((n) - 1))
100 
101 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
102 
103 /* Per mpath connection state */
104 struct rds_conn_path {
105 	struct rds_connection	*cp_conn;
106 	struct rds_message	*cp_xmit_rm;
107 	unsigned long		cp_xmit_sg;
108 	unsigned int		cp_xmit_hdr_off;
109 	unsigned int		cp_xmit_data_off;
110 	unsigned int		cp_xmit_atomic_sent;
111 	unsigned int		cp_xmit_rdma_sent;
112 	unsigned int		cp_xmit_data_sent;
113 
114 	spinlock_t		cp_lock;		/* protect msg queues */
115 	u64			cp_next_tx_seq;
116 	struct list_head	cp_send_queue;
117 	struct list_head	cp_retrans;
118 
119 	u64			cp_next_rx_seq;
120 
121 	void			*cp_transport_data;
122 
123 	atomic_t		cp_state;
124 	unsigned long		cp_send_gen;
125 	unsigned long		cp_flags;
126 	unsigned long		cp_reconnect_jiffies;
127 	struct delayed_work	cp_send_w;
128 	struct delayed_work	cp_recv_w;
129 	struct delayed_work	cp_conn_w;
130 	struct work_struct	cp_down_w;
131 	struct mutex		cp_cm_lock;	/* protect cp_state & cm */
132 	wait_queue_head_t	cp_waitq;
133 
134 	unsigned int		cp_unacked_packets;
135 	unsigned int		cp_unacked_bytes;
136 	unsigned int		cp_index;
137 };
138 
139 /* One rds_connection per RDS address pair */
140 struct rds_connection {
141 	struct hlist_node	c_hash_node;
142 	struct in6_addr		c_laddr;
143 	struct in6_addr		c_faddr;
144 	int			c_dev_if; /* ifindex used for this conn */
145 	int			c_bound_if; /* ifindex of c_laddr */
146 	unsigned int		c_loopback:1,
147 				c_isv6:1,
148 				c_ping_triggered:1,
149 				c_pad_to_32:29;
150 	int			c_npaths;
151 	struct rds_connection	*c_passive;
152 	struct rds_transport	*c_trans;
153 
154 	struct rds_cong_map	*c_lcong;
155 	struct rds_cong_map	*c_fcong;
156 
157 	/* Protocol version */
158 	unsigned int		c_version;
159 	possible_net_t		c_net;
160 
161 	struct list_head	c_map_item;
162 	unsigned long		c_map_queued;
163 
164 	struct rds_conn_path	*c_path;
165 	wait_queue_head_t	c_hs_waitq; /* handshake waitq */
166 
167 	u32			c_my_gen_num;
168 	u32			c_peer_gen_num;
169 };
170 
171 static inline
172 struct net *rds_conn_net(struct rds_connection *conn)
173 {
174 	return read_pnet(&conn->c_net);
175 }
176 
177 static inline
178 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
179 {
180 	write_pnet(&conn->c_net, net);
181 }
182 
183 #define RDS_FLAG_CONG_BITMAP	0x01
184 #define RDS_FLAG_ACK_REQUIRED	0x02
185 #define RDS_FLAG_RETRANSMITTED	0x04
186 #define RDS_MAX_ADV_CREDIT	255
187 
188 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
189  * probe to exchange control information before establishing a connection.
190  * Currently the control information that is exchanged is the number of
191  * supported paths. If the peer is a legacy (older kernel revision) peer,
192  * it would return a pong message without additional control information
193  * that would then alert the sender that the peer was an older rev.
194  */
195 #define RDS_FLAG_PROBE_PORT	1
196 #define	RDS_HS_PROBE(sport, dport) \
197 		((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
198 		 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
199 /*
200  * Maximum space available for extension headers.
201  */
202 #define RDS_HEADER_EXT_SPACE	16
203 
204 struct rds_header {
205 	__be64	h_sequence;
206 	__be64	h_ack;
207 	__be32	h_len;
208 	__be16	h_sport;
209 	__be16	h_dport;
210 	u8	h_flags;
211 	u8	h_credit;
212 	u8	h_padding[4];
213 	__sum16	h_csum;
214 
215 	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
216 };
217 
218 /*
219  * Reserved - indicates end of extensions
220  */
221 #define RDS_EXTHDR_NONE		0
222 
223 /*
224  * This extension header is included in the very
225  * first message that is sent on a new connection,
226  * and identifies the protocol level. This will help
227  * rolling updates if a future change requires breaking
228  * the protocol.
229  * NB: This is no longer true for IB, where we do a version
230  * negotiation during the connection setup phase (protocol
231  * version information is included in the RDMA CM private data).
232  */
233 #define RDS_EXTHDR_VERSION	1
234 struct rds_ext_header_version {
235 	__be32			h_version;
236 };
237 
238 /*
239  * This extension header is included in the RDS message
240  * chasing an RDMA operation.
241  */
242 #define RDS_EXTHDR_RDMA		2
243 struct rds_ext_header_rdma {
244 	__be32			h_rdma_rkey;
245 };
246 
247 /*
248  * This extension header tells the peer about the
249  * destination <R_Key,offset> of the requested RDMA
250  * operation.
251  */
252 #define RDS_EXTHDR_RDMA_DEST	3
253 struct rds_ext_header_rdma_dest {
254 	__be32			h_rdma_rkey;
255 	__be32			h_rdma_offset;
256 };
257 
258 /* Extension header announcing number of paths.
259  * Implicit length = 2 bytes.
260  */
261 #define RDS_EXTHDR_NPATHS	5
262 #define RDS_EXTHDR_GEN_NUM	6
263 
264 #define __RDS_EXTHDR_MAX	16 /* for now */
265 #define RDS_RX_MAX_TRACES	(RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
266 #define	RDS_MSG_RX_HDR		0
267 #define	RDS_MSG_RX_START	1
268 #define	RDS_MSG_RX_END		2
269 #define	RDS_MSG_RX_CMSG		3
270 
271 struct rds_incoming {
272 	refcount_t		i_refcount;
273 	struct list_head	i_item;
274 	struct rds_connection	*i_conn;
275 	struct rds_conn_path	*i_conn_path;
276 	struct rds_header	i_hdr;
277 	unsigned long		i_rx_jiffies;
278 	struct in6_addr		i_saddr;
279 
280 	rds_rdma_cookie_t	i_rdma_cookie;
281 	ktime_t			i_rx_tstamp;
282 	u64			i_rx_lat_trace[RDS_RX_MAX_TRACES];
283 };
284 
285 struct rds_mr {
286 	struct rb_node		r_rb_node;
287 	refcount_t		r_refcount;
288 	u32			r_key;
289 
290 	/* A copy of the creation flags */
291 	unsigned int		r_use_once:1;
292 	unsigned int		r_invalidate:1;
293 	unsigned int		r_write:1;
294 
295 	/* This is for RDS_MR_DEAD.
296 	 * It would be nice & consistent to make this part of the above
297 	 * bit field here, but we need to use test_and_set_bit.
298 	 */
299 	unsigned long		r_state;
300 	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
301 	struct rds_transport	*r_trans;
302 	void			*r_trans_private;
303 };
304 
305 /* Flags for mr->r_state */
306 #define RDS_MR_DEAD		0
307 
308 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
309 {
310 	return r_key | (((u64) offset) << 32);
311 }
312 
313 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
314 {
315 	return cookie;
316 }
317 
318 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
319 {
320 	return cookie >> 32;
321 }
322 
323 /* atomic operation types */
324 #define RDS_ATOMIC_TYPE_CSWP		0
325 #define RDS_ATOMIC_TYPE_FADD		1
326 
327 /*
328  * m_sock_item and m_conn_item are on lists that are serialized under
329  * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
330  * the message will not be put back on the retransmit list after being sent.
331  * messages that are canceled while being sent rely on this.
332  *
333  * m_inc is used by loopback so that it can pass an incoming message straight
334  * back up into the rx path.  It embeds a wire header which is also used by
335  * the send path, which is kind of awkward.
336  *
337  * m_sock_item indicates the message's presence on a socket's send or receive
338  * queue.  m_rs will point to that socket.
339  *
340  * m_daddr is used by cancellation to prune messages to a given destination.
341  *
342  * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
343  * nesting.  As paths iterate over messages on a sock, or conn, they must
344  * also lock the conn, or sock, to remove the message from those lists too.
345  * Testing the flag to determine if the message is still on the lists lets
346  * us avoid testing the list_head directly.  That means each path can use
347  * the message's list_head to keep it on a local list while juggling locks
348  * without confusing the other path.
349  *
350  * m_ack_seq is an optional field set by transports who need a different
351  * sequence number range to invalidate.  They can use this in a callback
352  * that they pass to rds_send_drop_acked() to see if each message has been
353  * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
354  * had ack_seq set yet.
355  */
356 #define RDS_MSG_ON_SOCK		1
357 #define RDS_MSG_ON_CONN		2
358 #define RDS_MSG_HAS_ACK_SEQ	3
359 #define RDS_MSG_ACK_REQUIRED	4
360 #define RDS_MSG_RETRANSMITTED	5
361 #define RDS_MSG_MAPPED		6
362 #define RDS_MSG_PAGEVEC		7
363 #define RDS_MSG_FLUSH		8
364 
365 struct rds_znotifier {
366 	struct mmpin		z_mmp;
367 	u32			z_cookie;
368 };
369 
370 struct rds_msg_zcopy_info {
371 	struct list_head rs_zcookie_next;
372 	union {
373 		struct rds_znotifier znotif;
374 		struct rds_zcopy_cookies zcookies;
375 	};
376 };
377 
378 struct rds_msg_zcopy_queue {
379 	struct list_head zcookie_head;
380 	spinlock_t lock; /* protects zcookie_head queue */
381 };
382 
383 static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
384 {
385 	spin_lock_init(&q->lock);
386 	INIT_LIST_HEAD(&q->zcookie_head);
387 }
388 
389 struct rds_iov_vector {
390 	struct rds_iovec *iov;
391 	int               len;
392 };
393 
394 struct rds_iov_vector_arr {
395 	struct rds_iov_vector *vec;
396 	int                    len;
397 	int                    indx;
398 	int                    incr;
399 };
400 
401 struct rds_message {
402 	refcount_t		m_refcount;
403 	struct list_head	m_sock_item;
404 	struct list_head	m_conn_item;
405 	struct rds_incoming	m_inc;
406 	u64			m_ack_seq;
407 	struct in6_addr		m_daddr;
408 	unsigned long		m_flags;
409 
410 	/* Never access m_rs without holding m_rs_lock.
411 	 * Lock nesting is
412 	 *  rm->m_rs_lock
413 	 *   -> rs->rs_lock
414 	 */
415 	spinlock_t		m_rs_lock;
416 	wait_queue_head_t	m_flush_wait;
417 
418 	struct rds_sock		*m_rs;
419 
420 	/* cookie to send to remote, in rds header */
421 	rds_rdma_cookie_t	m_rdma_cookie;
422 
423 	unsigned int		m_used_sgs;
424 	unsigned int		m_total_sgs;
425 
426 	void			*m_final_op;
427 
428 	struct {
429 		struct rm_atomic_op {
430 			int			op_type;
431 			union {
432 				struct {
433 					uint64_t	compare;
434 					uint64_t	swap;
435 					uint64_t	compare_mask;
436 					uint64_t	swap_mask;
437 				} op_m_cswp;
438 				struct {
439 					uint64_t	add;
440 					uint64_t	nocarry_mask;
441 				} op_m_fadd;
442 			};
443 
444 			u32			op_rkey;
445 			u64			op_remote_addr;
446 			unsigned int		op_notify:1;
447 			unsigned int		op_recverr:1;
448 			unsigned int		op_mapped:1;
449 			unsigned int		op_silent:1;
450 			unsigned int		op_active:1;
451 			struct scatterlist	*op_sg;
452 			struct rds_notifier	*op_notifier;
453 
454 			struct rds_mr		*op_rdma_mr;
455 		} atomic;
456 		struct rm_rdma_op {
457 			u32			op_rkey;
458 			u64			op_remote_addr;
459 			unsigned int		op_write:1;
460 			unsigned int		op_fence:1;
461 			unsigned int		op_notify:1;
462 			unsigned int		op_recverr:1;
463 			unsigned int		op_mapped:1;
464 			unsigned int		op_silent:1;
465 			unsigned int		op_active:1;
466 			unsigned int		op_bytes;
467 			unsigned int		op_nents;
468 			unsigned int		op_count;
469 			struct scatterlist	*op_sg;
470 			struct rds_notifier	*op_notifier;
471 
472 			struct rds_mr		*op_rdma_mr;
473 		} rdma;
474 		struct rm_data_op {
475 			unsigned int		op_active:1;
476 			unsigned int		op_notify:1;
477 			unsigned int		op_nents;
478 			unsigned int		op_count;
479 			unsigned int		op_dmasg;
480 			unsigned int		op_dmaoff;
481 			struct rds_znotifier	*op_mmp_znotifier;
482 			struct scatterlist	*op_sg;
483 		} data;
484 	};
485 
486 	struct rds_conn_path *m_conn_path;
487 };
488 
489 /*
490  * The RDS notifier is used (optionally) to tell the application about
491  * completed RDMA operations. Rather than keeping the whole rds message
492  * around on the queue, we allocate a small notifier that is put on the
493  * socket's notifier_list. Notifications are delivered to the application
494  * through control messages.
495  */
496 struct rds_notifier {
497 	struct list_head	n_list;
498 	uint64_t		n_user_token;
499 	int			n_status;
500 };
501 
502 /* Available as part of RDS core, so doesn't need to participate
503  * in get_preferred transport etc
504  */
505 #define	RDS_TRANS_LOOP	3
506 
507 /**
508  * struct rds_transport -  transport specific behavioural hooks
509  *
510  * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
511  *        part of a message.  The caller serializes on the send_sem so this
512  *        doesn't need to be reentrant for a given conn.  The header must be
513  *        sent before the data payload.  .xmit must be prepared to send a
514  *        message with no data payload.  .xmit should return the number of
515  *        bytes that were sent down the connection, including header bytes.
516  *        Returning 0 tells the caller that it doesn't need to perform any
517  *        additional work now.  This is usually the case when the transport has
518  *        filled the sending queue for its connection and will handle
519  *        triggering the rds thread to continue the send when space becomes
520  *        available.  Returning -EAGAIN tells the caller to retry the send
521  *        immediately.  Returning -ENOMEM tells the caller to retry the send at
522  *        some point in the future.
523  *
524  * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
525  *                 it returns the connection can not call rds_recv_incoming().
526  *                 This will only be called once after conn_connect returns
527  *                 non-zero success and will The caller serializes this with
528  *                 the send and connecting paths (xmit_* and conn_*).  The
529  *                 transport is responsible for other serialization, including
530  *                 rds_recv_incoming().  This is called in process context but
531  *                 should try hard not to block.
532  */
533 
534 struct rds_transport {
535 	char			t_name[TRANSNAMSIZ];
536 	struct list_head	t_item;
537 	struct module		*t_owner;
538 	unsigned int		t_prefer_loopback:1,
539 				t_mp_capable:1;
540 	unsigned int		t_type;
541 
542 	int (*laddr_check)(struct net *net, const struct in6_addr *addr,
543 			   __u32 scope_id);
544 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
545 	void (*conn_free)(void *data);
546 	int (*conn_path_connect)(struct rds_conn_path *cp);
547 	void (*conn_path_shutdown)(struct rds_conn_path *conn);
548 	void (*xmit_path_prepare)(struct rds_conn_path *cp);
549 	void (*xmit_path_complete)(struct rds_conn_path *cp);
550 	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
551 		    unsigned int hdr_off, unsigned int sg, unsigned int off);
552 	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
553 	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
554 	int (*recv_path)(struct rds_conn_path *cp);
555 	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
556 	void (*inc_free)(struct rds_incoming *inc);
557 
558 	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
559 				 struct rdma_cm_event *event, bool isv6);
560 	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6);
561 	void (*cm_connect_complete)(struct rds_connection *conn,
562 				    struct rdma_cm_event *event);
563 
564 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
565 					unsigned int avail);
566 	void (*exit)(void);
567 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
568 			struct rds_sock *rs, u32 *key_ret,
569 			struct rds_connection *conn);
570 	void (*sync_mr)(void *trans_private, int direction);
571 	void (*free_mr)(void *trans_private, int invalidate);
572 	void (*flush_mrs)(void);
573 	bool (*t_unloading)(struct rds_connection *conn);
574 };
575 
576 /* Bind hash table key length.  It is the sum of the size of a struct
577  * in6_addr, a scope_id  and a port.
578  */
579 #define RDS_BOUND_KEY_LEN \
580 	(sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16))
581 
582 struct rds_sock {
583 	struct sock		rs_sk;
584 
585 	u64			rs_user_addr;
586 	u64			rs_user_bytes;
587 
588 	/*
589 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
590 	 * support.
591 	 */
592 	struct rhash_head	rs_bound_node;
593 	u8			rs_bound_key[RDS_BOUND_KEY_LEN];
594 	struct sockaddr_in6	rs_bound_sin6;
595 #define rs_bound_addr		rs_bound_sin6.sin6_addr
596 #define rs_bound_addr_v4	rs_bound_sin6.sin6_addr.s6_addr32[3]
597 #define rs_bound_port		rs_bound_sin6.sin6_port
598 #define rs_bound_scope_id	rs_bound_sin6.sin6_scope_id
599 	struct in6_addr		rs_conn_addr;
600 #define rs_conn_addr_v4		rs_conn_addr.s6_addr32[3]
601 	__be16			rs_conn_port;
602 	struct rds_transport    *rs_transport;
603 
604 	/*
605 	 * rds_sendmsg caches the conn it used the last time around.
606 	 * This helps avoid costly lookups.
607 	 */
608 	struct rds_connection	*rs_conn;
609 
610 	/* flag indicating we were congested or not */
611 	int			rs_congested;
612 	/* seen congestion (ENOBUFS) when sending? */
613 	int			rs_seen_congestion;
614 
615 	/* rs_lock protects all these adjacent members before the newline */
616 	spinlock_t		rs_lock;
617 	struct list_head	rs_send_queue;
618 	u32			rs_snd_bytes;
619 	int			rs_rcv_bytes;
620 	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
621 
622 	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
623 	 * to decide whether the application should be woken up.
624 	 * If not set, we use rs_cong_track to find out whether a cong map
625 	 * update arrived.
626 	 */
627 	uint64_t		rs_cong_mask;
628 	uint64_t		rs_cong_notify;
629 	struct list_head	rs_cong_list;
630 	unsigned long		rs_cong_track;
631 
632 	/*
633 	 * rs_recv_lock protects the receive queue, and is
634 	 * used to serialize with rds_release.
635 	 */
636 	rwlock_t		rs_recv_lock;
637 	struct list_head	rs_recv_queue;
638 
639 	/* just for stats reporting */
640 	struct list_head	rs_item;
641 
642 	/* these have their own lock */
643 	spinlock_t		rs_rdma_lock;
644 	struct rb_root		rs_rdma_keys;
645 
646 	/* Socket options - in case there will be more */
647 	unsigned char		rs_recverr,
648 				rs_cong_monitor;
649 	u32			rs_hash_initval;
650 
651 	/* Socket receive path trace points*/
652 	u8			rs_rx_traces;
653 	u8			rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
654 	struct rds_msg_zcopy_queue rs_zcookie_queue;
655 };
656 
657 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
658 {
659 	return container_of(sk, struct rds_sock, rs_sk);
660 }
661 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
662 {
663 	return &rs->rs_sk;
664 }
665 
666 /*
667  * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
668  * to account for overhead.  We don't account for overhead, we just apply
669  * the number of payload bytes to the specified value.
670  */
671 static inline int rds_sk_sndbuf(struct rds_sock *rs)
672 {
673 	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
674 }
675 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
676 {
677 	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
678 }
679 
680 struct rds_statistics {
681 	uint64_t	s_conn_reset;
682 	uint64_t	s_recv_drop_bad_checksum;
683 	uint64_t	s_recv_drop_old_seq;
684 	uint64_t	s_recv_drop_no_sock;
685 	uint64_t	s_recv_drop_dead_sock;
686 	uint64_t	s_recv_deliver_raced;
687 	uint64_t	s_recv_delivered;
688 	uint64_t	s_recv_queued;
689 	uint64_t	s_recv_immediate_retry;
690 	uint64_t	s_recv_delayed_retry;
691 	uint64_t	s_recv_ack_required;
692 	uint64_t	s_recv_rdma_bytes;
693 	uint64_t	s_recv_ping;
694 	uint64_t	s_send_queue_empty;
695 	uint64_t	s_send_queue_full;
696 	uint64_t	s_send_lock_contention;
697 	uint64_t	s_send_lock_queue_raced;
698 	uint64_t	s_send_immediate_retry;
699 	uint64_t	s_send_delayed_retry;
700 	uint64_t	s_send_drop_acked;
701 	uint64_t	s_send_ack_required;
702 	uint64_t	s_send_queued;
703 	uint64_t	s_send_rdma;
704 	uint64_t	s_send_rdma_bytes;
705 	uint64_t	s_send_pong;
706 	uint64_t	s_page_remainder_hit;
707 	uint64_t	s_page_remainder_miss;
708 	uint64_t	s_copy_to_user;
709 	uint64_t	s_copy_from_user;
710 	uint64_t	s_cong_update_queued;
711 	uint64_t	s_cong_update_received;
712 	uint64_t	s_cong_send_error;
713 	uint64_t	s_cong_send_blocked;
714 	uint64_t	s_recv_bytes_added_to_socket;
715 	uint64_t	s_recv_bytes_removed_from_socket;
716 
717 };
718 
719 /* af_rds.c */
720 void rds_sock_addref(struct rds_sock *rs);
721 void rds_sock_put(struct rds_sock *rs);
722 void rds_wake_sk_sleep(struct rds_sock *rs);
723 static inline void __rds_wake_sk_sleep(struct sock *sk)
724 {
725 	wait_queue_head_t *waitq = sk_sleep(sk);
726 
727 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
728 		wake_up(waitq);
729 }
730 extern wait_queue_head_t rds_poll_waitq;
731 
732 
733 /* bind.c */
734 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
735 void rds_remove_bound(struct rds_sock *rs);
736 struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
737 				__u32 scope_id);
738 int rds_bind_lock_init(void);
739 void rds_bind_lock_destroy(void);
740 
741 /* cong.c */
742 int rds_cong_get_maps(struct rds_connection *conn);
743 void rds_cong_add_conn(struct rds_connection *conn);
744 void rds_cong_remove_conn(struct rds_connection *conn);
745 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
746 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
747 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
748 void rds_cong_queue_updates(struct rds_cong_map *map);
749 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
750 int rds_cong_updated_since(unsigned long *recent);
751 void rds_cong_add_socket(struct rds_sock *);
752 void rds_cong_remove_socket(struct rds_sock *);
753 void rds_cong_exit(void);
754 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
755 
756 /* connection.c */
757 extern u32 rds_gen_num;
758 int rds_conn_init(void);
759 void rds_conn_exit(void);
760 struct rds_connection *rds_conn_create(struct net *net,
761 				       const struct in6_addr *laddr,
762 				       const struct in6_addr *faddr,
763 				       struct rds_transport *trans, gfp_t gfp,
764 				       int dev_if);
765 struct rds_connection *rds_conn_create_outgoing(struct net *net,
766 						const struct in6_addr *laddr,
767 						const struct in6_addr *faddr,
768 						struct rds_transport *trans,
769 						gfp_t gfp, int dev_if);
770 void rds_conn_shutdown(struct rds_conn_path *cpath);
771 void rds_conn_destroy(struct rds_connection *conn);
772 void rds_conn_drop(struct rds_connection *conn);
773 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
774 void rds_conn_connect_if_down(struct rds_connection *conn);
775 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
776 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
777 			  struct rds_info_iterator *iter,
778 			  struct rds_info_lengths *lens,
779 			  int (*visitor)(struct rds_connection *, void *),
780 			  u64 *buffer,
781 			  size_t item_len);
782 
783 __printf(2, 3)
784 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
785 #define rds_conn_path_error(cp, fmt...) \
786 	__rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
787 
788 static inline int
789 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
790 {
791 	return atomic_cmpxchg(&cp->cp_state, old, new) == old;
792 }
793 
794 static inline int
795 rds_conn_transition(struct rds_connection *conn, int old, int new)
796 {
797 	WARN_ON(conn->c_trans->t_mp_capable);
798 	return rds_conn_path_transition(&conn->c_path[0], old, new);
799 }
800 
801 static inline int
802 rds_conn_path_state(struct rds_conn_path *cp)
803 {
804 	return atomic_read(&cp->cp_state);
805 }
806 
807 static inline int
808 rds_conn_state(struct rds_connection *conn)
809 {
810 	WARN_ON(conn->c_trans->t_mp_capable);
811 	return rds_conn_path_state(&conn->c_path[0]);
812 }
813 
814 static inline int
815 rds_conn_path_up(struct rds_conn_path *cp)
816 {
817 	return atomic_read(&cp->cp_state) == RDS_CONN_UP;
818 }
819 
820 static inline int
821 rds_conn_up(struct rds_connection *conn)
822 {
823 	WARN_ON(conn->c_trans->t_mp_capable);
824 	return rds_conn_path_up(&conn->c_path[0]);
825 }
826 
827 static inline int
828 rds_conn_path_connecting(struct rds_conn_path *cp)
829 {
830 	return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
831 }
832 
833 static inline int
834 rds_conn_connecting(struct rds_connection *conn)
835 {
836 	WARN_ON(conn->c_trans->t_mp_capable);
837 	return rds_conn_path_connecting(&conn->c_path[0]);
838 }
839 
840 /* message.c */
841 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
842 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
843 					  int *ret);
844 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
845 			       bool zcopy);
846 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
847 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
848 				 __be16 dport, u64 seq);
849 int rds_message_add_extension(struct rds_header *hdr,
850 			      unsigned int type, const void *data, unsigned int len);
851 int rds_message_next_extension(struct rds_header *hdr,
852 			       unsigned int *pos, void *buf, unsigned int *buflen);
853 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
854 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
855 void rds_message_inc_free(struct rds_incoming *inc);
856 void rds_message_addref(struct rds_message *rm);
857 void rds_message_put(struct rds_message *rm);
858 void rds_message_wait(struct rds_message *rm);
859 void rds_message_unmapped(struct rds_message *rm);
860 void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *info);
861 
862 static inline void rds_message_make_checksum(struct rds_header *hdr)
863 {
864 	hdr->h_csum = 0;
865 	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
866 }
867 
868 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
869 {
870 	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
871 }
872 
873 
874 /* page.c */
875 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
876 			     gfp_t gfp);
877 void rds_page_exit(void);
878 
879 /* recv.c */
880 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
881 		  struct in6_addr *saddr);
882 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
883 		       struct in6_addr *saddr);
884 void rds_inc_put(struct rds_incoming *inc);
885 void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
886 		       struct in6_addr *daddr,
887 		       struct rds_incoming *inc, gfp_t gfp);
888 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
889 		int msg_flags);
890 void rds_clear_recv_queue(struct rds_sock *rs);
891 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
892 void rds_inc_info_copy(struct rds_incoming *inc,
893 		       struct rds_info_iterator *iter,
894 		       __be32 saddr, __be32 daddr, int flip);
895 void rds6_inc_info_copy(struct rds_incoming *inc,
896 			struct rds_info_iterator *iter,
897 			struct in6_addr *saddr, struct in6_addr *daddr,
898 			int flip);
899 
900 /* send.c */
901 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
902 void rds_send_path_reset(struct rds_conn_path *conn);
903 int rds_send_xmit(struct rds_conn_path *cp);
904 struct sockaddr_in;
905 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest);
906 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
907 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
908 			 is_acked_func is_acked);
909 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
910 			      is_acked_func is_acked);
911 void rds_send_ping(struct rds_connection *conn, int cp_index);
912 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
913 
914 /* rdma.c */
915 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
916 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
917 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
918 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
919 void rds_rdma_drop_keys(struct rds_sock *rs);
920 int rds_rdma_extra_size(struct rds_rdma_args *args,
921 			struct rds_iov_vector *iov);
922 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
923 			  struct cmsghdr *cmsg);
924 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
925 			  struct cmsghdr *cmsg,
926 			  struct rds_iov_vector *vec);
927 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
928 			  struct cmsghdr *cmsg);
929 void rds_rdma_free_op(struct rm_rdma_op *ro);
930 void rds_atomic_free_op(struct rm_atomic_op *ao);
931 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
932 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
933 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
934 		    struct cmsghdr *cmsg);
935 
936 void __rds_put_mr_final(struct rds_mr *mr);
937 static inline void rds_mr_put(struct rds_mr *mr)
938 {
939 	if (refcount_dec_and_test(&mr->r_refcount))
940 		__rds_put_mr_final(mr);
941 }
942 
943 static inline bool rds_destroy_pending(struct rds_connection *conn)
944 {
945 	return !check_net(rds_conn_net(conn)) ||
946 	       (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
947 }
948 
949 /* stats.c */
950 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
951 #define rds_stats_inc_which(which, member) do {		\
952 	per_cpu(which, get_cpu()).member++;		\
953 	put_cpu();					\
954 } while (0)
955 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
956 #define rds_stats_add_which(which, member, count) do {		\
957 	per_cpu(which, get_cpu()).member += count;	\
958 	put_cpu();					\
959 } while (0)
960 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
961 int rds_stats_init(void);
962 void rds_stats_exit(void);
963 void rds_stats_info_copy(struct rds_info_iterator *iter,
964 			 uint64_t *values, const char *const *names,
965 			 size_t nr);
966 
967 /* sysctl.c */
968 int rds_sysctl_init(void);
969 void rds_sysctl_exit(void);
970 extern unsigned long rds_sysctl_sndbuf_min;
971 extern unsigned long rds_sysctl_sndbuf_default;
972 extern unsigned long rds_sysctl_sndbuf_max;
973 extern unsigned long rds_sysctl_reconnect_min_jiffies;
974 extern unsigned long rds_sysctl_reconnect_max_jiffies;
975 extern unsigned int  rds_sysctl_max_unacked_packets;
976 extern unsigned int  rds_sysctl_max_unacked_bytes;
977 extern unsigned int  rds_sysctl_ping_enable;
978 extern unsigned long rds_sysctl_trace_flags;
979 extern unsigned int  rds_sysctl_trace_level;
980 
981 /* threads.c */
982 int rds_threads_init(void);
983 void rds_threads_exit(void);
984 extern struct workqueue_struct *rds_wq;
985 void rds_queue_reconnect(struct rds_conn_path *cp);
986 void rds_connect_worker(struct work_struct *);
987 void rds_shutdown_worker(struct work_struct *);
988 void rds_send_worker(struct work_struct *);
989 void rds_recv_worker(struct work_struct *);
990 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
991 void rds_connect_complete(struct rds_connection *conn);
992 int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2);
993 
994 /* transport.c */
995 void rds_trans_register(struct rds_transport *trans);
996 void rds_trans_unregister(struct rds_transport *trans);
997 struct rds_transport *rds_trans_get_preferred(struct net *net,
998 					      const struct in6_addr *addr,
999 					      __u32 scope_id);
1000 void rds_trans_put(struct rds_transport *trans);
1001 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
1002 				       unsigned int avail);
1003 struct rds_transport *rds_trans_get(int t_type);
1004 int rds_trans_init(void);
1005 void rds_trans_exit(void);
1006 
1007 #endif
1008