xref: /openbmc/linux/net/rds/rds.h (revision e6c81cce)
1 #ifndef _RDS_RDS_H
2 #define _RDS_RDS_H
3 
4 #include <net/sock.h>
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
9 #include <linux/rds.h>
10 
11 #include "info.h"
12 
13 /*
14  * RDS Network protocol version
15  */
16 #define RDS_PROTOCOL_3_0	0x0300
17 #define RDS_PROTOCOL_3_1	0x0301
18 #define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
19 #define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
20 #define RDS_PROTOCOL_MINOR(v)	((v) & 255)
21 #define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
22 
23 /*
24  * XXX randomly chosen, but at least seems to be unused:
25  * #               18464-18768 Unassigned
26  * We should do better.  We want a reserved port to discourage unpriv'ed
27  * userspace from listening.
28  */
29 #define RDS_PORT	18634
30 
31 #ifdef ATOMIC64_INIT
32 #define KERNEL_HAS_ATOMIC64
33 #endif
34 
35 #ifdef DEBUG
36 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
37 #else
38 /* sigh, pr_debug() causes unused variable warnings */
39 static inline __printf(1, 2)
40 void rdsdebug(char *fmt, ...)
41 {
42 }
43 #endif
44 
45 /* XXX is there one of these somewhere? */
46 #define ceil(x, y) \
47 	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
48 
49 #define RDS_FRAG_SHIFT	12
50 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
51 
52 #define RDS_CONG_MAP_BYTES	(65536 / 8)
53 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
54 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
55 
56 struct rds_cong_map {
57 	struct rb_node		m_rb_node;
58 	__be32			m_addr;
59 	wait_queue_head_t	m_waitq;
60 	struct list_head	m_conn_list;
61 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
62 };
63 
64 
65 /*
66  * This is how we will track the connection state:
67  * A connection is always in one of the following
68  * states. Updates to the state are atomic and imply
69  * a memory barrier.
70  */
71 enum {
72 	RDS_CONN_DOWN = 0,
73 	RDS_CONN_CONNECTING,
74 	RDS_CONN_DISCONNECTING,
75 	RDS_CONN_UP,
76 	RDS_CONN_ERROR,
77 };
78 
79 /* Bits for c_flags */
80 #define RDS_LL_SEND_FULL	0
81 #define RDS_RECONNECT_PENDING	1
82 #define RDS_IN_XMIT		2
83 
84 struct rds_connection {
85 	struct hlist_node	c_hash_node;
86 	__be32			c_laddr;
87 	__be32			c_faddr;
88 	unsigned int		c_loopback:1;
89 	struct rds_connection	*c_passive;
90 
91 	struct rds_cong_map	*c_lcong;
92 	struct rds_cong_map	*c_fcong;
93 
94 	struct rds_message	*c_xmit_rm;
95 	unsigned long		c_xmit_sg;
96 	unsigned int		c_xmit_hdr_off;
97 	unsigned int		c_xmit_data_off;
98 	unsigned int		c_xmit_atomic_sent;
99 	unsigned int		c_xmit_rdma_sent;
100 	unsigned int		c_xmit_data_sent;
101 
102 	spinlock_t		c_lock;		/* protect msg queues */
103 	u64			c_next_tx_seq;
104 	struct list_head	c_send_queue;
105 	struct list_head	c_retrans;
106 
107 	u64			c_next_rx_seq;
108 
109 	struct rds_transport	*c_trans;
110 	void			*c_transport_data;
111 
112 	atomic_t		c_state;
113 	unsigned long		c_send_gen;
114 	unsigned long		c_flags;
115 	unsigned long		c_reconnect_jiffies;
116 	struct delayed_work	c_send_w;
117 	struct delayed_work	c_recv_w;
118 	struct delayed_work	c_conn_w;
119 	struct work_struct	c_down_w;
120 	struct mutex		c_cm_lock;	/* protect conn state & cm */
121 	wait_queue_head_t	c_waitq;
122 
123 	struct list_head	c_map_item;
124 	unsigned long		c_map_queued;
125 
126 	unsigned int		c_unacked_packets;
127 	unsigned int		c_unacked_bytes;
128 
129 	/* Protocol version */
130 	unsigned int		c_version;
131 };
132 
133 #define RDS_FLAG_CONG_BITMAP	0x01
134 #define RDS_FLAG_ACK_REQUIRED	0x02
135 #define RDS_FLAG_RETRANSMITTED	0x04
136 #define RDS_MAX_ADV_CREDIT	255
137 
138 /*
139  * Maximum space available for extension headers.
140  */
141 #define RDS_HEADER_EXT_SPACE	16
142 
143 struct rds_header {
144 	__be64	h_sequence;
145 	__be64	h_ack;
146 	__be32	h_len;
147 	__be16	h_sport;
148 	__be16	h_dport;
149 	u8	h_flags;
150 	u8	h_credit;
151 	u8	h_padding[4];
152 	__sum16	h_csum;
153 
154 	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
155 };
156 
157 /*
158  * Reserved - indicates end of extensions
159  */
160 #define RDS_EXTHDR_NONE		0
161 
162 /*
163  * This extension header is included in the very
164  * first message that is sent on a new connection,
165  * and identifies the protocol level. This will help
166  * rolling updates if a future change requires breaking
167  * the protocol.
168  * NB: This is no longer true for IB, where we do a version
169  * negotiation during the connection setup phase (protocol
170  * version information is included in the RDMA CM private data).
171  */
172 #define RDS_EXTHDR_VERSION	1
173 struct rds_ext_header_version {
174 	__be32			h_version;
175 };
176 
177 /*
178  * This extension header is included in the RDS message
179  * chasing an RDMA operation.
180  */
181 #define RDS_EXTHDR_RDMA		2
182 struct rds_ext_header_rdma {
183 	__be32			h_rdma_rkey;
184 };
185 
186 /*
187  * This extension header tells the peer about the
188  * destination <R_Key,offset> of the requested RDMA
189  * operation.
190  */
191 #define RDS_EXTHDR_RDMA_DEST	3
192 struct rds_ext_header_rdma_dest {
193 	__be32			h_rdma_rkey;
194 	__be32			h_rdma_offset;
195 };
196 
197 #define __RDS_EXTHDR_MAX	16 /* for now */
198 
199 struct rds_incoming {
200 	atomic_t		i_refcount;
201 	struct list_head	i_item;
202 	struct rds_connection	*i_conn;
203 	struct rds_header	i_hdr;
204 	unsigned long		i_rx_jiffies;
205 	__be32			i_saddr;
206 
207 	rds_rdma_cookie_t	i_rdma_cookie;
208 };
209 
210 struct rds_mr {
211 	struct rb_node		r_rb_node;
212 	atomic_t		r_refcount;
213 	u32			r_key;
214 
215 	/* A copy of the creation flags */
216 	unsigned int		r_use_once:1;
217 	unsigned int		r_invalidate:1;
218 	unsigned int		r_write:1;
219 
220 	/* This is for RDS_MR_DEAD.
221 	 * It would be nice & consistent to make this part of the above
222 	 * bit field here, but we need to use test_and_set_bit.
223 	 */
224 	unsigned long		r_state;
225 	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
226 	struct rds_transport	*r_trans;
227 	void			*r_trans_private;
228 };
229 
230 /* Flags for mr->r_state */
231 #define RDS_MR_DEAD		0
232 
233 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
234 {
235 	return r_key | (((u64) offset) << 32);
236 }
237 
238 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
239 {
240 	return cookie;
241 }
242 
243 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
244 {
245 	return cookie >> 32;
246 }
247 
248 /* atomic operation types */
249 #define RDS_ATOMIC_TYPE_CSWP		0
250 #define RDS_ATOMIC_TYPE_FADD		1
251 
252 /*
253  * m_sock_item and m_conn_item are on lists that are serialized under
254  * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
255  * the message will not be put back on the retransmit list after being sent.
256  * messages that are canceled while being sent rely on this.
257  *
258  * m_inc is used by loopback so that it can pass an incoming message straight
259  * back up into the rx path.  It embeds a wire header which is also used by
260  * the send path, which is kind of awkward.
261  *
262  * m_sock_item indicates the message's presence on a socket's send or receive
263  * queue.  m_rs will point to that socket.
264  *
265  * m_daddr is used by cancellation to prune messages to a given destination.
266  *
267  * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
268  * nesting.  As paths iterate over messages on a sock, or conn, they must
269  * also lock the conn, or sock, to remove the message from those lists too.
270  * Testing the flag to determine if the message is still on the lists lets
271  * us avoid testing the list_head directly.  That means each path can use
272  * the message's list_head to keep it on a local list while juggling locks
273  * without confusing the other path.
274  *
275  * m_ack_seq is an optional field set by transports who need a different
276  * sequence number range to invalidate.  They can use this in a callback
277  * that they pass to rds_send_drop_acked() to see if each message has been
278  * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
279  * had ack_seq set yet.
280  */
281 #define RDS_MSG_ON_SOCK		1
282 #define RDS_MSG_ON_CONN		2
283 #define RDS_MSG_HAS_ACK_SEQ	3
284 #define RDS_MSG_ACK_REQUIRED	4
285 #define RDS_MSG_RETRANSMITTED	5
286 #define RDS_MSG_MAPPED		6
287 #define RDS_MSG_PAGEVEC		7
288 
289 struct rds_message {
290 	atomic_t		m_refcount;
291 	struct list_head	m_sock_item;
292 	struct list_head	m_conn_item;
293 	struct rds_incoming	m_inc;
294 	u64			m_ack_seq;
295 	__be32			m_daddr;
296 	unsigned long		m_flags;
297 
298 	/* Never access m_rs without holding m_rs_lock.
299 	 * Lock nesting is
300 	 *  rm->m_rs_lock
301 	 *   -> rs->rs_lock
302 	 */
303 	spinlock_t		m_rs_lock;
304 	wait_queue_head_t	m_flush_wait;
305 
306 	struct rds_sock		*m_rs;
307 
308 	/* cookie to send to remote, in rds header */
309 	rds_rdma_cookie_t	m_rdma_cookie;
310 
311 	unsigned int		m_used_sgs;
312 	unsigned int		m_total_sgs;
313 
314 	void			*m_final_op;
315 
316 	struct {
317 		struct rm_atomic_op {
318 			int			op_type;
319 			union {
320 				struct {
321 					uint64_t	compare;
322 					uint64_t	swap;
323 					uint64_t	compare_mask;
324 					uint64_t	swap_mask;
325 				} op_m_cswp;
326 				struct {
327 					uint64_t	add;
328 					uint64_t	nocarry_mask;
329 				} op_m_fadd;
330 			};
331 
332 			u32			op_rkey;
333 			u64			op_remote_addr;
334 			unsigned int		op_notify:1;
335 			unsigned int		op_recverr:1;
336 			unsigned int		op_mapped:1;
337 			unsigned int		op_silent:1;
338 			unsigned int		op_active:1;
339 			struct scatterlist	*op_sg;
340 			struct rds_notifier	*op_notifier;
341 
342 			struct rds_mr		*op_rdma_mr;
343 		} atomic;
344 		struct rm_rdma_op {
345 			u32			op_rkey;
346 			u64			op_remote_addr;
347 			unsigned int		op_write:1;
348 			unsigned int		op_fence:1;
349 			unsigned int		op_notify:1;
350 			unsigned int		op_recverr:1;
351 			unsigned int		op_mapped:1;
352 			unsigned int		op_silent:1;
353 			unsigned int		op_active:1;
354 			unsigned int		op_bytes;
355 			unsigned int		op_nents;
356 			unsigned int		op_count;
357 			struct scatterlist	*op_sg;
358 			struct rds_notifier	*op_notifier;
359 
360 			struct rds_mr		*op_rdma_mr;
361 		} rdma;
362 		struct rm_data_op {
363 			unsigned int		op_active:1;
364 			unsigned int		op_nents;
365 			unsigned int		op_count;
366 			struct scatterlist	*op_sg;
367 		} data;
368 	};
369 };
370 
371 /*
372  * The RDS notifier is used (optionally) to tell the application about
373  * completed RDMA operations. Rather than keeping the whole rds message
374  * around on the queue, we allocate a small notifier that is put on the
375  * socket's notifier_list. Notifications are delivered to the application
376  * through control messages.
377  */
378 struct rds_notifier {
379 	struct list_head	n_list;
380 	uint64_t		n_user_token;
381 	int			n_status;
382 };
383 
384 /**
385  * struct rds_transport -  transport specific behavioural hooks
386  *
387  * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
388  *        part of a message.  The caller serializes on the send_sem so this
389  *        doesn't need to be reentrant for a given conn.  The header must be
390  *        sent before the data payload.  .xmit must be prepared to send a
391  *        message with no data payload.  .xmit should return the number of
392  *        bytes that were sent down the connection, including header bytes.
393  *        Returning 0 tells the caller that it doesn't need to perform any
394  *        additional work now.  This is usually the case when the transport has
395  *        filled the sending queue for its connection and will handle
396  *        triggering the rds thread to continue the send when space becomes
397  *        available.  Returning -EAGAIN tells the caller to retry the send
398  *        immediately.  Returning -ENOMEM tells the caller to retry the send at
399  *        some point in the future.
400  *
401  * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
402  *                 it returns the connection can not call rds_recv_incoming().
403  *                 This will only be called once after conn_connect returns
404  *                 non-zero success and will The caller serializes this with
405  *                 the send and connecting paths (xmit_* and conn_*).  The
406  *                 transport is responsible for other serialization, including
407  *                 rds_recv_incoming().  This is called in process context but
408  *                 should try hard not to block.
409  */
410 
411 #define RDS_TRANS_IB	0
412 #define RDS_TRANS_IWARP	1
413 #define RDS_TRANS_TCP	2
414 #define RDS_TRANS_COUNT	3
415 
416 struct rds_transport {
417 	char			t_name[TRANSNAMSIZ];
418 	struct list_head	t_item;
419 	struct module		*t_owner;
420 	unsigned int		t_prefer_loopback:1;
421 	unsigned int		t_type;
422 
423 	int (*laddr_check)(__be32 addr);
424 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
425 	void (*conn_free)(void *data);
426 	int (*conn_connect)(struct rds_connection *conn);
427 	void (*conn_shutdown)(struct rds_connection *conn);
428 	void (*xmit_prepare)(struct rds_connection *conn);
429 	void (*xmit_complete)(struct rds_connection *conn);
430 	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
431 		    unsigned int hdr_off, unsigned int sg, unsigned int off);
432 	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
433 	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
434 	int (*recv)(struct rds_connection *conn);
435 	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
436 	void (*inc_free)(struct rds_incoming *inc);
437 
438 	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
439 				 struct rdma_cm_event *event);
440 	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
441 	void (*cm_connect_complete)(struct rds_connection *conn,
442 				    struct rdma_cm_event *event);
443 
444 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
445 					unsigned int avail);
446 	void (*exit)(void);
447 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
448 			struct rds_sock *rs, u32 *key_ret);
449 	void (*sync_mr)(void *trans_private, int direction);
450 	void (*free_mr)(void *trans_private, int invalidate);
451 	void (*flush_mrs)(void);
452 };
453 
454 struct rds_sock {
455 	struct sock		rs_sk;
456 
457 	u64			rs_user_addr;
458 	u64			rs_user_bytes;
459 
460 	/*
461 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
462 	 * support.
463 	 */
464 	struct hlist_node	rs_bound_node;
465 	__be32			rs_bound_addr;
466 	__be32			rs_conn_addr;
467 	__be16			rs_bound_port;
468 	__be16			rs_conn_port;
469 	struct rds_transport    *rs_transport;
470 
471 	/*
472 	 * rds_sendmsg caches the conn it used the last time around.
473 	 * This helps avoid costly lookups.
474 	 */
475 	struct rds_connection	*rs_conn;
476 
477 	/* flag indicating we were congested or not */
478 	int			rs_congested;
479 	/* seen congestion (ENOBUFS) when sending? */
480 	int			rs_seen_congestion;
481 
482 	/* rs_lock protects all these adjacent members before the newline */
483 	spinlock_t		rs_lock;
484 	struct list_head	rs_send_queue;
485 	u32			rs_snd_bytes;
486 	int			rs_rcv_bytes;
487 	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
488 
489 	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
490 	 * to decide whether the application should be woken up.
491 	 * If not set, we use rs_cong_track to find out whether a cong map
492 	 * update arrived.
493 	 */
494 	uint64_t		rs_cong_mask;
495 	uint64_t		rs_cong_notify;
496 	struct list_head	rs_cong_list;
497 	unsigned long		rs_cong_track;
498 
499 	/*
500 	 * rs_recv_lock protects the receive queue, and is
501 	 * used to serialize with rds_release.
502 	 */
503 	rwlock_t		rs_recv_lock;
504 	struct list_head	rs_recv_queue;
505 
506 	/* just for stats reporting */
507 	struct list_head	rs_item;
508 
509 	/* these have their own lock */
510 	spinlock_t		rs_rdma_lock;
511 	struct rb_root		rs_rdma_keys;
512 
513 	/* Socket options - in case there will be more */
514 	unsigned char		rs_recverr,
515 				rs_cong_monitor;
516 };
517 
518 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
519 {
520 	return container_of(sk, struct rds_sock, rs_sk);
521 }
522 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
523 {
524 	return &rs->rs_sk;
525 }
526 
527 /*
528  * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
529  * to account for overhead.  We don't account for overhead, we just apply
530  * the number of payload bytes to the specified value.
531  */
532 static inline int rds_sk_sndbuf(struct rds_sock *rs)
533 {
534 	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
535 }
536 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
537 {
538 	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
539 }
540 
541 struct rds_statistics {
542 	uint64_t	s_conn_reset;
543 	uint64_t	s_recv_drop_bad_checksum;
544 	uint64_t	s_recv_drop_old_seq;
545 	uint64_t	s_recv_drop_no_sock;
546 	uint64_t	s_recv_drop_dead_sock;
547 	uint64_t	s_recv_deliver_raced;
548 	uint64_t	s_recv_delivered;
549 	uint64_t	s_recv_queued;
550 	uint64_t	s_recv_immediate_retry;
551 	uint64_t	s_recv_delayed_retry;
552 	uint64_t	s_recv_ack_required;
553 	uint64_t	s_recv_rdma_bytes;
554 	uint64_t	s_recv_ping;
555 	uint64_t	s_send_queue_empty;
556 	uint64_t	s_send_queue_full;
557 	uint64_t	s_send_lock_contention;
558 	uint64_t	s_send_lock_queue_raced;
559 	uint64_t	s_send_immediate_retry;
560 	uint64_t	s_send_delayed_retry;
561 	uint64_t	s_send_drop_acked;
562 	uint64_t	s_send_ack_required;
563 	uint64_t	s_send_queued;
564 	uint64_t	s_send_rdma;
565 	uint64_t	s_send_rdma_bytes;
566 	uint64_t	s_send_pong;
567 	uint64_t	s_page_remainder_hit;
568 	uint64_t	s_page_remainder_miss;
569 	uint64_t	s_copy_to_user;
570 	uint64_t	s_copy_from_user;
571 	uint64_t	s_cong_update_queued;
572 	uint64_t	s_cong_update_received;
573 	uint64_t	s_cong_send_error;
574 	uint64_t	s_cong_send_blocked;
575 };
576 
577 /* af_rds.c */
578 char *rds_str_array(char **array, size_t elements, size_t index);
579 void rds_sock_addref(struct rds_sock *rs);
580 void rds_sock_put(struct rds_sock *rs);
581 void rds_wake_sk_sleep(struct rds_sock *rs);
582 static inline void __rds_wake_sk_sleep(struct sock *sk)
583 {
584 	wait_queue_head_t *waitq = sk_sleep(sk);
585 
586 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
587 		wake_up(waitq);
588 }
589 extern wait_queue_head_t rds_poll_waitq;
590 
591 
592 /* bind.c */
593 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
594 void rds_remove_bound(struct rds_sock *rs);
595 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
596 
597 /* cong.c */
598 int rds_cong_get_maps(struct rds_connection *conn);
599 void rds_cong_add_conn(struct rds_connection *conn);
600 void rds_cong_remove_conn(struct rds_connection *conn);
601 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
602 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
603 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
604 void rds_cong_queue_updates(struct rds_cong_map *map);
605 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
606 int rds_cong_updated_since(unsigned long *recent);
607 void rds_cong_add_socket(struct rds_sock *);
608 void rds_cong_remove_socket(struct rds_sock *);
609 void rds_cong_exit(void);
610 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
611 
612 /* conn.c */
613 int rds_conn_init(void);
614 void rds_conn_exit(void);
615 struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
616 				       struct rds_transport *trans, gfp_t gfp);
617 struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
618 			       struct rds_transport *trans, gfp_t gfp);
619 void rds_conn_shutdown(struct rds_connection *conn);
620 void rds_conn_destroy(struct rds_connection *conn);
621 void rds_conn_drop(struct rds_connection *conn);
622 void rds_conn_connect_if_down(struct rds_connection *conn);
623 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
624 			  struct rds_info_iterator *iter,
625 			  struct rds_info_lengths *lens,
626 			  int (*visitor)(struct rds_connection *, void *),
627 			  size_t item_len);
628 __printf(2, 3)
629 void __rds_conn_error(struct rds_connection *conn, const char *, ...);
630 #define rds_conn_error(conn, fmt...) \
631 	__rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
632 
633 static inline int
634 rds_conn_transition(struct rds_connection *conn, int old, int new)
635 {
636 	return atomic_cmpxchg(&conn->c_state, old, new) == old;
637 }
638 
639 static inline int
640 rds_conn_state(struct rds_connection *conn)
641 {
642 	return atomic_read(&conn->c_state);
643 }
644 
645 static inline int
646 rds_conn_up(struct rds_connection *conn)
647 {
648 	return atomic_read(&conn->c_state) == RDS_CONN_UP;
649 }
650 
651 static inline int
652 rds_conn_connecting(struct rds_connection *conn)
653 {
654 	return atomic_read(&conn->c_state) == RDS_CONN_CONNECTING;
655 }
656 
657 /* message.c */
658 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
659 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
660 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
661 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
662 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
663 				 __be16 dport, u64 seq);
664 int rds_message_add_extension(struct rds_header *hdr,
665 			      unsigned int type, const void *data, unsigned int len);
666 int rds_message_next_extension(struct rds_header *hdr,
667 			       unsigned int *pos, void *buf, unsigned int *buflen);
668 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
669 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
670 void rds_message_inc_free(struct rds_incoming *inc);
671 void rds_message_addref(struct rds_message *rm);
672 void rds_message_put(struct rds_message *rm);
673 void rds_message_wait(struct rds_message *rm);
674 void rds_message_unmapped(struct rds_message *rm);
675 
676 static inline void rds_message_make_checksum(struct rds_header *hdr)
677 {
678 	hdr->h_csum = 0;
679 	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
680 }
681 
682 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
683 {
684 	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
685 }
686 
687 
688 /* page.c */
689 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
690 			     gfp_t gfp);
691 int rds_page_copy_user(struct page *page, unsigned long offset,
692 		       void __user *ptr, unsigned long bytes,
693 		       int to_user);
694 #define rds_page_copy_to_user(page, offset, ptr, bytes) \
695 	rds_page_copy_user(page, offset, ptr, bytes, 1)
696 #define rds_page_copy_from_user(page, offset, ptr, bytes) \
697 	rds_page_copy_user(page, offset, ptr, bytes, 0)
698 void rds_page_exit(void);
699 
700 /* recv.c */
701 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
702 		  __be32 saddr);
703 void rds_inc_put(struct rds_incoming *inc);
704 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
705 		       struct rds_incoming *inc, gfp_t gfp);
706 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
707 		int msg_flags);
708 void rds_clear_recv_queue(struct rds_sock *rs);
709 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
710 void rds_inc_info_copy(struct rds_incoming *inc,
711 		       struct rds_info_iterator *iter,
712 		       __be32 saddr, __be32 daddr, int flip);
713 
714 /* send.c */
715 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
716 void rds_send_reset(struct rds_connection *conn);
717 int rds_send_xmit(struct rds_connection *conn);
718 struct sockaddr_in;
719 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
720 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
721 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
722 			 is_acked_func is_acked);
723 int rds_send_pong(struct rds_connection *conn, __be16 dport);
724 struct rds_message *rds_send_get_message(struct rds_connection *,
725 					 struct rm_rdma_op *);
726 
727 /* rdma.c */
728 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
729 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
730 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
731 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
732 void rds_rdma_drop_keys(struct rds_sock *rs);
733 int rds_rdma_extra_size(struct rds_rdma_args *args);
734 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
735 			  struct cmsghdr *cmsg);
736 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
737 			  struct cmsghdr *cmsg);
738 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
739 			  struct cmsghdr *cmsg);
740 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
741 			  struct cmsghdr *cmsg);
742 void rds_rdma_free_op(struct rm_rdma_op *ro);
743 void rds_atomic_free_op(struct rm_atomic_op *ao);
744 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
745 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
746 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
747 		    struct cmsghdr *cmsg);
748 
749 void __rds_put_mr_final(struct rds_mr *mr);
750 static inline void rds_mr_put(struct rds_mr *mr)
751 {
752 	if (atomic_dec_and_test(&mr->r_refcount))
753 		__rds_put_mr_final(mr);
754 }
755 
756 /* stats.c */
757 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
758 #define rds_stats_inc_which(which, member) do {		\
759 	per_cpu(which, get_cpu()).member++;		\
760 	put_cpu();					\
761 } while (0)
762 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
763 #define rds_stats_add_which(which, member, count) do {		\
764 	per_cpu(which, get_cpu()).member += count;	\
765 	put_cpu();					\
766 } while (0)
767 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
768 int rds_stats_init(void);
769 void rds_stats_exit(void);
770 void rds_stats_info_copy(struct rds_info_iterator *iter,
771 			 uint64_t *values, const char *const *names,
772 			 size_t nr);
773 
774 /* sysctl.c */
775 int rds_sysctl_init(void);
776 void rds_sysctl_exit(void);
777 extern unsigned long rds_sysctl_sndbuf_min;
778 extern unsigned long rds_sysctl_sndbuf_default;
779 extern unsigned long rds_sysctl_sndbuf_max;
780 extern unsigned long rds_sysctl_reconnect_min_jiffies;
781 extern unsigned long rds_sysctl_reconnect_max_jiffies;
782 extern unsigned int  rds_sysctl_max_unacked_packets;
783 extern unsigned int  rds_sysctl_max_unacked_bytes;
784 extern unsigned int  rds_sysctl_ping_enable;
785 extern unsigned long rds_sysctl_trace_flags;
786 extern unsigned int  rds_sysctl_trace_level;
787 
788 /* threads.c */
789 int rds_threads_init(void);
790 void rds_threads_exit(void);
791 extern struct workqueue_struct *rds_wq;
792 void rds_queue_reconnect(struct rds_connection *conn);
793 void rds_connect_worker(struct work_struct *);
794 void rds_shutdown_worker(struct work_struct *);
795 void rds_send_worker(struct work_struct *);
796 void rds_recv_worker(struct work_struct *);
797 void rds_connect_complete(struct rds_connection *conn);
798 
799 /* transport.c */
800 int rds_trans_register(struct rds_transport *trans);
801 void rds_trans_unregister(struct rds_transport *trans);
802 struct rds_transport *rds_trans_get_preferred(__be32 addr);
803 void rds_trans_put(struct rds_transport *trans);
804 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
805 				       unsigned int avail);
806 int rds_trans_init(void);
807 void rds_trans_exit(void);
808 
809 #endif
810