xref: /openbmc/linux/net/rds/rds.h (revision 6189f1b0)
1 #ifndef _RDS_RDS_H
2 #define _RDS_RDS_H
3 
4 #include <net/sock.h>
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
9 #include <linux/rds.h>
10 
11 #include "info.h"
12 
13 /*
14  * RDS Network protocol version
15  */
16 #define RDS_PROTOCOL_3_0	0x0300
17 #define RDS_PROTOCOL_3_1	0x0301
18 #define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
19 #define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
20 #define RDS_PROTOCOL_MINOR(v)	((v) & 255)
21 #define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
22 
23 /*
24  * XXX randomly chosen, but at least seems to be unused:
25  * #               18464-18768 Unassigned
26  * We should do better.  We want a reserved port to discourage unpriv'ed
27  * userspace from listening.
28  */
29 #define RDS_PORT	18634
30 
31 #ifdef ATOMIC64_INIT
32 #define KERNEL_HAS_ATOMIC64
33 #endif
34 
35 #ifdef DEBUG
36 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
37 #else
38 /* sigh, pr_debug() causes unused variable warnings */
39 static inline __printf(1, 2)
40 void rdsdebug(char *fmt, ...)
41 {
42 }
43 #endif
44 
45 /* XXX is there one of these somewhere? */
46 #define ceil(x, y) \
47 	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
48 
49 #define RDS_FRAG_SHIFT	12
50 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
51 
52 #define RDS_CONG_MAP_BYTES	(65536 / 8)
53 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
54 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
55 
56 struct rds_cong_map {
57 	struct rb_node		m_rb_node;
58 	__be32			m_addr;
59 	wait_queue_head_t	m_waitq;
60 	struct list_head	m_conn_list;
61 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
62 };
63 
64 
65 /*
66  * This is how we will track the connection state:
67  * A connection is always in one of the following
68  * states. Updates to the state are atomic and imply
69  * a memory barrier.
70  */
71 enum {
72 	RDS_CONN_DOWN = 0,
73 	RDS_CONN_CONNECTING,
74 	RDS_CONN_DISCONNECTING,
75 	RDS_CONN_UP,
76 	RDS_CONN_ERROR,
77 };
78 
79 /* Bits for c_flags */
80 #define RDS_LL_SEND_FULL	0
81 #define RDS_RECONNECT_PENDING	1
82 #define RDS_IN_XMIT		2
83 
84 struct rds_connection {
85 	struct hlist_node	c_hash_node;
86 	__be32			c_laddr;
87 	__be32			c_faddr;
88 	unsigned int		c_loopback:1;
89 	struct rds_connection	*c_passive;
90 
91 	struct rds_cong_map	*c_lcong;
92 	struct rds_cong_map	*c_fcong;
93 
94 	struct rds_message	*c_xmit_rm;
95 	unsigned long		c_xmit_sg;
96 	unsigned int		c_xmit_hdr_off;
97 	unsigned int		c_xmit_data_off;
98 	unsigned int		c_xmit_atomic_sent;
99 	unsigned int		c_xmit_rdma_sent;
100 	unsigned int		c_xmit_data_sent;
101 
102 	spinlock_t		c_lock;		/* protect msg queues */
103 	u64			c_next_tx_seq;
104 	struct list_head	c_send_queue;
105 	struct list_head	c_retrans;
106 
107 	u64			c_next_rx_seq;
108 
109 	struct rds_transport	*c_trans;
110 	void			*c_transport_data;
111 
112 	atomic_t		c_state;
113 	unsigned long		c_send_gen;
114 	unsigned long		c_flags;
115 	unsigned long		c_reconnect_jiffies;
116 	struct delayed_work	c_send_w;
117 	struct delayed_work	c_recv_w;
118 	struct delayed_work	c_conn_w;
119 	struct work_struct	c_down_w;
120 	struct mutex		c_cm_lock;	/* protect conn state & cm */
121 	wait_queue_head_t	c_waitq;
122 
123 	struct list_head	c_map_item;
124 	unsigned long		c_map_queued;
125 
126 	unsigned int		c_unacked_packets;
127 	unsigned int		c_unacked_bytes;
128 
129 	/* Protocol version */
130 	unsigned int		c_version;
131 };
132 
133 #define RDS_FLAG_CONG_BITMAP	0x01
134 #define RDS_FLAG_ACK_REQUIRED	0x02
135 #define RDS_FLAG_RETRANSMITTED	0x04
136 #define RDS_MAX_ADV_CREDIT	255
137 
138 /*
139  * Maximum space available for extension headers.
140  */
141 #define RDS_HEADER_EXT_SPACE	16
142 
143 struct rds_header {
144 	__be64	h_sequence;
145 	__be64	h_ack;
146 	__be32	h_len;
147 	__be16	h_sport;
148 	__be16	h_dport;
149 	u8	h_flags;
150 	u8	h_credit;
151 	u8	h_padding[4];
152 	__sum16	h_csum;
153 
154 	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
155 };
156 
157 /*
158  * Reserved - indicates end of extensions
159  */
160 #define RDS_EXTHDR_NONE		0
161 
162 /*
163  * This extension header is included in the very
164  * first message that is sent on a new connection,
165  * and identifies the protocol level. This will help
166  * rolling updates if a future change requires breaking
167  * the protocol.
168  * NB: This is no longer true for IB, where we do a version
169  * negotiation during the connection setup phase (protocol
170  * version information is included in the RDMA CM private data).
171  */
172 #define RDS_EXTHDR_VERSION	1
173 struct rds_ext_header_version {
174 	__be32			h_version;
175 };
176 
177 /*
178  * This extension header is included in the RDS message
179  * chasing an RDMA operation.
180  */
181 #define RDS_EXTHDR_RDMA		2
182 struct rds_ext_header_rdma {
183 	__be32			h_rdma_rkey;
184 };
185 
186 /*
187  * This extension header tells the peer about the
188  * destination <R_Key,offset> of the requested RDMA
189  * operation.
190  */
191 #define RDS_EXTHDR_RDMA_DEST	3
192 struct rds_ext_header_rdma_dest {
193 	__be32			h_rdma_rkey;
194 	__be32			h_rdma_offset;
195 };
196 
197 #define __RDS_EXTHDR_MAX	16 /* for now */
198 
199 struct rds_incoming {
200 	atomic_t		i_refcount;
201 	struct list_head	i_item;
202 	struct rds_connection	*i_conn;
203 	struct rds_header	i_hdr;
204 	unsigned long		i_rx_jiffies;
205 	__be32			i_saddr;
206 
207 	rds_rdma_cookie_t	i_rdma_cookie;
208 };
209 
210 struct rds_mr {
211 	struct rb_node		r_rb_node;
212 	atomic_t		r_refcount;
213 	u32			r_key;
214 
215 	/* A copy of the creation flags */
216 	unsigned int		r_use_once:1;
217 	unsigned int		r_invalidate:1;
218 	unsigned int		r_write:1;
219 
220 	/* This is for RDS_MR_DEAD.
221 	 * It would be nice & consistent to make this part of the above
222 	 * bit field here, but we need to use test_and_set_bit.
223 	 */
224 	unsigned long		r_state;
225 	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
226 	struct rds_transport	*r_trans;
227 	void			*r_trans_private;
228 };
229 
230 /* Flags for mr->r_state */
231 #define RDS_MR_DEAD		0
232 
233 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
234 {
235 	return r_key | (((u64) offset) << 32);
236 }
237 
238 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
239 {
240 	return cookie;
241 }
242 
243 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
244 {
245 	return cookie >> 32;
246 }
247 
248 /* atomic operation types */
249 #define RDS_ATOMIC_TYPE_CSWP		0
250 #define RDS_ATOMIC_TYPE_FADD		1
251 
252 /*
253  * m_sock_item and m_conn_item are on lists that are serialized under
254  * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
255  * the message will not be put back on the retransmit list after being sent.
256  * messages that are canceled while being sent rely on this.
257  *
258  * m_inc is used by loopback so that it can pass an incoming message straight
259  * back up into the rx path.  It embeds a wire header which is also used by
260  * the send path, which is kind of awkward.
261  *
262  * m_sock_item indicates the message's presence on a socket's send or receive
263  * queue.  m_rs will point to that socket.
264  *
265  * m_daddr is used by cancellation to prune messages to a given destination.
266  *
267  * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
268  * nesting.  As paths iterate over messages on a sock, or conn, they must
269  * also lock the conn, or sock, to remove the message from those lists too.
270  * Testing the flag to determine if the message is still on the lists lets
271  * us avoid testing the list_head directly.  That means each path can use
272  * the message's list_head to keep it on a local list while juggling locks
273  * without confusing the other path.
274  *
275  * m_ack_seq is an optional field set by transports who need a different
276  * sequence number range to invalidate.  They can use this in a callback
277  * that they pass to rds_send_drop_acked() to see if each message has been
278  * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
279  * had ack_seq set yet.
280  */
281 #define RDS_MSG_ON_SOCK		1
282 #define RDS_MSG_ON_CONN		2
283 #define RDS_MSG_HAS_ACK_SEQ	3
284 #define RDS_MSG_ACK_REQUIRED	4
285 #define RDS_MSG_RETRANSMITTED	5
286 #define RDS_MSG_MAPPED		6
287 #define RDS_MSG_PAGEVEC		7
288 
289 struct rds_message {
290 	atomic_t		m_refcount;
291 	struct list_head	m_sock_item;
292 	struct list_head	m_conn_item;
293 	struct rds_incoming	m_inc;
294 	u64			m_ack_seq;
295 	__be32			m_daddr;
296 	unsigned long		m_flags;
297 
298 	/* Never access m_rs without holding m_rs_lock.
299 	 * Lock nesting is
300 	 *  rm->m_rs_lock
301 	 *   -> rs->rs_lock
302 	 */
303 	spinlock_t		m_rs_lock;
304 	wait_queue_head_t	m_flush_wait;
305 
306 	struct rds_sock		*m_rs;
307 
308 	/* cookie to send to remote, in rds header */
309 	rds_rdma_cookie_t	m_rdma_cookie;
310 
311 	unsigned int		m_used_sgs;
312 	unsigned int		m_total_sgs;
313 
314 	void			*m_final_op;
315 
316 	struct {
317 		struct rm_atomic_op {
318 			int			op_type;
319 			union {
320 				struct {
321 					uint64_t	compare;
322 					uint64_t	swap;
323 					uint64_t	compare_mask;
324 					uint64_t	swap_mask;
325 				} op_m_cswp;
326 				struct {
327 					uint64_t	add;
328 					uint64_t	nocarry_mask;
329 				} op_m_fadd;
330 			};
331 
332 			u32			op_rkey;
333 			u64			op_remote_addr;
334 			unsigned int		op_notify:1;
335 			unsigned int		op_recverr:1;
336 			unsigned int		op_mapped:1;
337 			unsigned int		op_silent:1;
338 			unsigned int		op_active:1;
339 			struct scatterlist	*op_sg;
340 			struct rds_notifier	*op_notifier;
341 
342 			struct rds_mr		*op_rdma_mr;
343 		} atomic;
344 		struct rm_rdma_op {
345 			u32			op_rkey;
346 			u64			op_remote_addr;
347 			unsigned int		op_write:1;
348 			unsigned int		op_fence:1;
349 			unsigned int		op_notify:1;
350 			unsigned int		op_recverr:1;
351 			unsigned int		op_mapped:1;
352 			unsigned int		op_silent:1;
353 			unsigned int		op_active:1;
354 			unsigned int		op_bytes;
355 			unsigned int		op_nents;
356 			unsigned int		op_count;
357 			struct scatterlist	*op_sg;
358 			struct rds_notifier	*op_notifier;
359 
360 			struct rds_mr		*op_rdma_mr;
361 		} rdma;
362 		struct rm_data_op {
363 			unsigned int		op_active:1;
364 			unsigned int		op_nents;
365 			unsigned int		op_count;
366 			unsigned int		op_dmasg;
367 			unsigned int		op_dmaoff;
368 			struct scatterlist	*op_sg;
369 		} data;
370 	};
371 };
372 
373 /*
374  * The RDS notifier is used (optionally) to tell the application about
375  * completed RDMA operations. Rather than keeping the whole rds message
376  * around on the queue, we allocate a small notifier that is put on the
377  * socket's notifier_list. Notifications are delivered to the application
378  * through control messages.
379  */
380 struct rds_notifier {
381 	struct list_head	n_list;
382 	uint64_t		n_user_token;
383 	int			n_status;
384 };
385 
386 /**
387  * struct rds_transport -  transport specific behavioural hooks
388  *
389  * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
390  *        part of a message.  The caller serializes on the send_sem so this
391  *        doesn't need to be reentrant for a given conn.  The header must be
392  *        sent before the data payload.  .xmit must be prepared to send a
393  *        message with no data payload.  .xmit should return the number of
394  *        bytes that were sent down the connection, including header bytes.
395  *        Returning 0 tells the caller that it doesn't need to perform any
396  *        additional work now.  This is usually the case when the transport has
397  *        filled the sending queue for its connection and will handle
398  *        triggering the rds thread to continue the send when space becomes
399  *        available.  Returning -EAGAIN tells the caller to retry the send
400  *        immediately.  Returning -ENOMEM tells the caller to retry the send at
401  *        some point in the future.
402  *
403  * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
404  *                 it returns the connection can not call rds_recv_incoming().
405  *                 This will only be called once after conn_connect returns
406  *                 non-zero success and will The caller serializes this with
407  *                 the send and connecting paths (xmit_* and conn_*).  The
408  *                 transport is responsible for other serialization, including
409  *                 rds_recv_incoming().  This is called in process context but
410  *                 should try hard not to block.
411  */
412 
413 struct rds_transport {
414 	char			t_name[TRANSNAMSIZ];
415 	struct list_head	t_item;
416 	struct module		*t_owner;
417 	unsigned int		t_prefer_loopback:1;
418 	unsigned int		t_type;
419 
420 	int (*laddr_check)(__be32 addr);
421 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
422 	void (*conn_free)(void *data);
423 	int (*conn_connect)(struct rds_connection *conn);
424 	void (*conn_shutdown)(struct rds_connection *conn);
425 	void (*xmit_prepare)(struct rds_connection *conn);
426 	void (*xmit_complete)(struct rds_connection *conn);
427 	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
428 		    unsigned int hdr_off, unsigned int sg, unsigned int off);
429 	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
430 	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
431 	int (*recv)(struct rds_connection *conn);
432 	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
433 	void (*inc_free)(struct rds_incoming *inc);
434 
435 	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
436 				 struct rdma_cm_event *event);
437 	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
438 	void (*cm_connect_complete)(struct rds_connection *conn,
439 				    struct rdma_cm_event *event);
440 
441 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
442 					unsigned int avail);
443 	void (*exit)(void);
444 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
445 			struct rds_sock *rs, u32 *key_ret);
446 	void (*sync_mr)(void *trans_private, int direction);
447 	void (*free_mr)(void *trans_private, int invalidate);
448 	void (*flush_mrs)(void);
449 };
450 
451 struct rds_sock {
452 	struct sock		rs_sk;
453 
454 	u64			rs_user_addr;
455 	u64			rs_user_bytes;
456 
457 	/*
458 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
459 	 * support.
460 	 */
461 	struct hlist_node	rs_bound_node;
462 	__be32			rs_bound_addr;
463 	__be32			rs_conn_addr;
464 	__be16			rs_bound_port;
465 	__be16			rs_conn_port;
466 	struct rds_transport    *rs_transport;
467 
468 	/*
469 	 * rds_sendmsg caches the conn it used the last time around.
470 	 * This helps avoid costly lookups.
471 	 */
472 	struct rds_connection	*rs_conn;
473 
474 	/* flag indicating we were congested or not */
475 	int			rs_congested;
476 	/* seen congestion (ENOBUFS) when sending? */
477 	int			rs_seen_congestion;
478 
479 	/* rs_lock protects all these adjacent members before the newline */
480 	spinlock_t		rs_lock;
481 	struct list_head	rs_send_queue;
482 	u32			rs_snd_bytes;
483 	int			rs_rcv_bytes;
484 	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
485 
486 	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
487 	 * to decide whether the application should be woken up.
488 	 * If not set, we use rs_cong_track to find out whether a cong map
489 	 * update arrived.
490 	 */
491 	uint64_t		rs_cong_mask;
492 	uint64_t		rs_cong_notify;
493 	struct list_head	rs_cong_list;
494 	unsigned long		rs_cong_track;
495 
496 	/*
497 	 * rs_recv_lock protects the receive queue, and is
498 	 * used to serialize with rds_release.
499 	 */
500 	rwlock_t		rs_recv_lock;
501 	struct list_head	rs_recv_queue;
502 
503 	/* just for stats reporting */
504 	struct list_head	rs_item;
505 
506 	/* these have their own lock */
507 	spinlock_t		rs_rdma_lock;
508 	struct rb_root		rs_rdma_keys;
509 
510 	/* Socket options - in case there will be more */
511 	unsigned char		rs_recverr,
512 				rs_cong_monitor;
513 };
514 
515 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
516 {
517 	return container_of(sk, struct rds_sock, rs_sk);
518 }
519 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
520 {
521 	return &rs->rs_sk;
522 }
523 
524 /*
525  * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
526  * to account for overhead.  We don't account for overhead, we just apply
527  * the number of payload bytes to the specified value.
528  */
529 static inline int rds_sk_sndbuf(struct rds_sock *rs)
530 {
531 	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
532 }
533 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
534 {
535 	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
536 }
537 
538 struct rds_statistics {
539 	uint64_t	s_conn_reset;
540 	uint64_t	s_recv_drop_bad_checksum;
541 	uint64_t	s_recv_drop_old_seq;
542 	uint64_t	s_recv_drop_no_sock;
543 	uint64_t	s_recv_drop_dead_sock;
544 	uint64_t	s_recv_deliver_raced;
545 	uint64_t	s_recv_delivered;
546 	uint64_t	s_recv_queued;
547 	uint64_t	s_recv_immediate_retry;
548 	uint64_t	s_recv_delayed_retry;
549 	uint64_t	s_recv_ack_required;
550 	uint64_t	s_recv_rdma_bytes;
551 	uint64_t	s_recv_ping;
552 	uint64_t	s_send_queue_empty;
553 	uint64_t	s_send_queue_full;
554 	uint64_t	s_send_lock_contention;
555 	uint64_t	s_send_lock_queue_raced;
556 	uint64_t	s_send_immediate_retry;
557 	uint64_t	s_send_delayed_retry;
558 	uint64_t	s_send_drop_acked;
559 	uint64_t	s_send_ack_required;
560 	uint64_t	s_send_queued;
561 	uint64_t	s_send_rdma;
562 	uint64_t	s_send_rdma_bytes;
563 	uint64_t	s_send_pong;
564 	uint64_t	s_page_remainder_hit;
565 	uint64_t	s_page_remainder_miss;
566 	uint64_t	s_copy_to_user;
567 	uint64_t	s_copy_from_user;
568 	uint64_t	s_cong_update_queued;
569 	uint64_t	s_cong_update_received;
570 	uint64_t	s_cong_send_error;
571 	uint64_t	s_cong_send_blocked;
572 };
573 
574 /* af_rds.c */
575 void rds_sock_addref(struct rds_sock *rs);
576 void rds_sock_put(struct rds_sock *rs);
577 void rds_wake_sk_sleep(struct rds_sock *rs);
578 static inline void __rds_wake_sk_sleep(struct sock *sk)
579 {
580 	wait_queue_head_t *waitq = sk_sleep(sk);
581 
582 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
583 		wake_up(waitq);
584 }
585 extern wait_queue_head_t rds_poll_waitq;
586 
587 
588 /* bind.c */
589 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
590 void rds_remove_bound(struct rds_sock *rs);
591 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
592 
593 /* cong.c */
594 int rds_cong_get_maps(struct rds_connection *conn);
595 void rds_cong_add_conn(struct rds_connection *conn);
596 void rds_cong_remove_conn(struct rds_connection *conn);
597 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
598 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
599 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
600 void rds_cong_queue_updates(struct rds_cong_map *map);
601 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
602 int rds_cong_updated_since(unsigned long *recent);
603 void rds_cong_add_socket(struct rds_sock *);
604 void rds_cong_remove_socket(struct rds_sock *);
605 void rds_cong_exit(void);
606 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
607 
608 /* conn.c */
609 int rds_conn_init(void);
610 void rds_conn_exit(void);
611 struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
612 				       struct rds_transport *trans, gfp_t gfp);
613 struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
614 			       struct rds_transport *trans, gfp_t gfp);
615 void rds_conn_shutdown(struct rds_connection *conn);
616 void rds_conn_destroy(struct rds_connection *conn);
617 void rds_conn_drop(struct rds_connection *conn);
618 void rds_conn_connect_if_down(struct rds_connection *conn);
619 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
620 			  struct rds_info_iterator *iter,
621 			  struct rds_info_lengths *lens,
622 			  int (*visitor)(struct rds_connection *, void *),
623 			  size_t item_len);
624 __printf(2, 3)
625 void __rds_conn_error(struct rds_connection *conn, const char *, ...);
626 #define rds_conn_error(conn, fmt...) \
627 	__rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
628 
629 static inline int
630 rds_conn_transition(struct rds_connection *conn, int old, int new)
631 {
632 	return atomic_cmpxchg(&conn->c_state, old, new) == old;
633 }
634 
635 static inline int
636 rds_conn_state(struct rds_connection *conn)
637 {
638 	return atomic_read(&conn->c_state);
639 }
640 
641 static inline int
642 rds_conn_up(struct rds_connection *conn)
643 {
644 	return atomic_read(&conn->c_state) == RDS_CONN_UP;
645 }
646 
647 static inline int
648 rds_conn_connecting(struct rds_connection *conn)
649 {
650 	return atomic_read(&conn->c_state) == RDS_CONN_CONNECTING;
651 }
652 
653 /* message.c */
654 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
655 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
656 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
657 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
658 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
659 				 __be16 dport, u64 seq);
660 int rds_message_add_extension(struct rds_header *hdr,
661 			      unsigned int type, const void *data, unsigned int len);
662 int rds_message_next_extension(struct rds_header *hdr,
663 			       unsigned int *pos, void *buf, unsigned int *buflen);
664 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
665 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
666 void rds_message_inc_free(struct rds_incoming *inc);
667 void rds_message_addref(struct rds_message *rm);
668 void rds_message_put(struct rds_message *rm);
669 void rds_message_wait(struct rds_message *rm);
670 void rds_message_unmapped(struct rds_message *rm);
671 
672 static inline void rds_message_make_checksum(struct rds_header *hdr)
673 {
674 	hdr->h_csum = 0;
675 	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
676 }
677 
678 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
679 {
680 	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
681 }
682 
683 
684 /* page.c */
685 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
686 			     gfp_t gfp);
687 int rds_page_copy_user(struct page *page, unsigned long offset,
688 		       void __user *ptr, unsigned long bytes,
689 		       int to_user);
690 #define rds_page_copy_to_user(page, offset, ptr, bytes) \
691 	rds_page_copy_user(page, offset, ptr, bytes, 1)
692 #define rds_page_copy_from_user(page, offset, ptr, bytes) \
693 	rds_page_copy_user(page, offset, ptr, bytes, 0)
694 void rds_page_exit(void);
695 
696 /* recv.c */
697 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
698 		  __be32 saddr);
699 void rds_inc_put(struct rds_incoming *inc);
700 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
701 		       struct rds_incoming *inc, gfp_t gfp);
702 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
703 		int msg_flags);
704 void rds_clear_recv_queue(struct rds_sock *rs);
705 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
706 void rds_inc_info_copy(struct rds_incoming *inc,
707 		       struct rds_info_iterator *iter,
708 		       __be32 saddr, __be32 daddr, int flip);
709 
710 /* send.c */
711 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
712 void rds_send_reset(struct rds_connection *conn);
713 int rds_send_xmit(struct rds_connection *conn);
714 struct sockaddr_in;
715 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
716 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
717 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
718 			 is_acked_func is_acked);
719 int rds_send_pong(struct rds_connection *conn, __be16 dport);
720 struct rds_message *rds_send_get_message(struct rds_connection *,
721 					 struct rm_rdma_op *);
722 
723 /* rdma.c */
724 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
725 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
726 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
727 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
728 void rds_rdma_drop_keys(struct rds_sock *rs);
729 int rds_rdma_extra_size(struct rds_rdma_args *args);
730 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
731 			  struct cmsghdr *cmsg);
732 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
733 			  struct cmsghdr *cmsg);
734 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
735 			  struct cmsghdr *cmsg);
736 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
737 			  struct cmsghdr *cmsg);
738 void rds_rdma_free_op(struct rm_rdma_op *ro);
739 void rds_atomic_free_op(struct rm_atomic_op *ao);
740 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
741 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
742 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
743 		    struct cmsghdr *cmsg);
744 
745 void __rds_put_mr_final(struct rds_mr *mr);
746 static inline void rds_mr_put(struct rds_mr *mr)
747 {
748 	if (atomic_dec_and_test(&mr->r_refcount))
749 		__rds_put_mr_final(mr);
750 }
751 
752 /* stats.c */
753 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
754 #define rds_stats_inc_which(which, member) do {		\
755 	per_cpu(which, get_cpu()).member++;		\
756 	put_cpu();					\
757 } while (0)
758 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
759 #define rds_stats_add_which(which, member, count) do {		\
760 	per_cpu(which, get_cpu()).member += count;	\
761 	put_cpu();					\
762 } while (0)
763 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
764 int rds_stats_init(void);
765 void rds_stats_exit(void);
766 void rds_stats_info_copy(struct rds_info_iterator *iter,
767 			 uint64_t *values, const char *const *names,
768 			 size_t nr);
769 
770 /* sysctl.c */
771 int rds_sysctl_init(void);
772 void rds_sysctl_exit(void);
773 extern unsigned long rds_sysctl_sndbuf_min;
774 extern unsigned long rds_sysctl_sndbuf_default;
775 extern unsigned long rds_sysctl_sndbuf_max;
776 extern unsigned long rds_sysctl_reconnect_min_jiffies;
777 extern unsigned long rds_sysctl_reconnect_max_jiffies;
778 extern unsigned int  rds_sysctl_max_unacked_packets;
779 extern unsigned int  rds_sysctl_max_unacked_bytes;
780 extern unsigned int  rds_sysctl_ping_enable;
781 extern unsigned long rds_sysctl_trace_flags;
782 extern unsigned int  rds_sysctl_trace_level;
783 
784 /* threads.c */
785 int rds_threads_init(void);
786 void rds_threads_exit(void);
787 extern struct workqueue_struct *rds_wq;
788 void rds_queue_reconnect(struct rds_connection *conn);
789 void rds_connect_worker(struct work_struct *);
790 void rds_shutdown_worker(struct work_struct *);
791 void rds_send_worker(struct work_struct *);
792 void rds_recv_worker(struct work_struct *);
793 void rds_connect_complete(struct rds_connection *conn);
794 
795 /* transport.c */
796 int rds_trans_register(struct rds_transport *trans);
797 void rds_trans_unregister(struct rds_transport *trans);
798 struct rds_transport *rds_trans_get_preferred(__be32 addr);
799 void rds_trans_put(struct rds_transport *trans);
800 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
801 				       unsigned int avail);
802 struct rds_transport *rds_trans_get(int t_type);
803 int rds_trans_init(void);
804 void rds_trans_exit(void);
805 
806 #endif
807