1 #ifndef _RDS_RDS_H 2 #define _RDS_RDS_H 3 4 #include <net/sock.h> 5 #include <linux/scatterlist.h> 6 #include <linux/highmem.h> 7 #include <rdma/rdma_cm.h> 8 #include <linux/mutex.h> 9 #include <linux/rds.h> 10 #include <linux/rhashtable.h> 11 12 #include "info.h" 13 14 /* 15 * RDS Network protocol version 16 */ 17 #define RDS_PROTOCOL_3_0 0x0300 18 #define RDS_PROTOCOL_3_1 0x0301 19 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1 20 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8) 21 #define RDS_PROTOCOL_MINOR(v) ((v) & 255) 22 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min) 23 24 /* 25 * XXX randomly chosen, but at least seems to be unused: 26 * # 18464-18768 Unassigned 27 * We should do better. We want a reserved port to discourage unpriv'ed 28 * userspace from listening. 29 */ 30 #define RDS_PORT 18634 31 32 #ifdef ATOMIC64_INIT 33 #define KERNEL_HAS_ATOMIC64 34 #endif 35 36 #ifdef DEBUG 37 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) 38 #else 39 /* sigh, pr_debug() causes unused variable warnings */ 40 static inline __printf(1, 2) 41 void rdsdebug(char *fmt, ...) 42 { 43 } 44 #endif 45 46 /* XXX is there one of these somewhere? */ 47 #define ceil(x, y) \ 48 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; }) 49 50 #define RDS_FRAG_SHIFT 12 51 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) 52 53 #define RDS_CONG_MAP_BYTES (65536 / 8) 54 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE) 55 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8) 56 57 struct rds_cong_map { 58 struct rb_node m_rb_node; 59 __be32 m_addr; 60 wait_queue_head_t m_waitq; 61 struct list_head m_conn_list; 62 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES]; 63 }; 64 65 66 /* 67 * This is how we will track the connection state: 68 * A connection is always in one of the following 69 * states. Updates to the state are atomic and imply 70 * a memory barrier. 71 */ 72 enum { 73 RDS_CONN_DOWN = 0, 74 RDS_CONN_CONNECTING, 75 RDS_CONN_DISCONNECTING, 76 RDS_CONN_UP, 77 RDS_CONN_RESETTING, 78 RDS_CONN_ERROR, 79 }; 80 81 /* Bits for c_flags */ 82 #define RDS_LL_SEND_FULL 0 83 #define RDS_RECONNECT_PENDING 1 84 #define RDS_IN_XMIT 2 85 #define RDS_RECV_REFILL 3 86 87 /* Max number of multipaths per RDS connection. Must be a power of 2 */ 88 #define RDS_MPATH_WORKERS 1 89 90 /* Per mpath connection state */ 91 struct rds_conn_path { 92 struct rds_connection *cp_conn; 93 struct rds_message *cp_xmit_rm; 94 unsigned long cp_xmit_sg; 95 unsigned int cp_xmit_hdr_off; 96 unsigned int cp_xmit_data_off; 97 unsigned int cp_xmit_atomic_sent; 98 unsigned int cp_xmit_rdma_sent; 99 unsigned int cp_xmit_data_sent; 100 101 spinlock_t cp_lock; /* protect msg queues */ 102 u64 cp_next_tx_seq; 103 struct list_head cp_send_queue; 104 struct list_head cp_retrans; 105 106 u64 cp_next_rx_seq; 107 108 void *cp_transport_data; 109 110 atomic_t cp_state; 111 unsigned long cp_send_gen; 112 unsigned long cp_flags; 113 unsigned long cp_reconnect_jiffies; 114 struct delayed_work cp_send_w; 115 struct delayed_work cp_recv_w; 116 struct delayed_work cp_conn_w; 117 struct work_struct cp_down_w; 118 struct mutex cp_cm_lock; /* protect cp_state & cm */ 119 wait_queue_head_t cp_waitq; 120 121 unsigned int cp_unacked_packets; 122 unsigned int cp_unacked_bytes; 123 unsigned int cp_outgoing:1, 124 cp_pad_to_32:31; 125 unsigned int cp_index; 126 }; 127 128 /* One rds_connection per RDS address pair */ 129 struct rds_connection { 130 struct hlist_node c_hash_node; 131 __be32 c_laddr; 132 __be32 c_faddr; 133 unsigned int c_loopback:1, 134 c_pad_to_32:31; 135 int c_npaths; 136 struct rds_connection *c_passive; 137 struct rds_transport *c_trans; 138 139 struct rds_cong_map *c_lcong; 140 struct rds_cong_map *c_fcong; 141 142 /* Protocol version */ 143 unsigned int c_version; 144 possible_net_t c_net; 145 146 struct list_head c_map_item; 147 unsigned long c_map_queued; 148 149 struct rds_conn_path c_path[RDS_MPATH_WORKERS]; 150 }; 151 152 static inline 153 struct net *rds_conn_net(struct rds_connection *conn) 154 { 155 return read_pnet(&conn->c_net); 156 } 157 158 static inline 159 void rds_conn_net_set(struct rds_connection *conn, struct net *net) 160 { 161 write_pnet(&conn->c_net, net); 162 } 163 164 #define RDS_FLAG_CONG_BITMAP 0x01 165 #define RDS_FLAG_ACK_REQUIRED 0x02 166 #define RDS_FLAG_RETRANSMITTED 0x04 167 #define RDS_MAX_ADV_CREDIT 255 168 169 /* 170 * Maximum space available for extension headers. 171 */ 172 #define RDS_HEADER_EXT_SPACE 16 173 174 struct rds_header { 175 __be64 h_sequence; 176 __be64 h_ack; 177 __be32 h_len; 178 __be16 h_sport; 179 __be16 h_dport; 180 u8 h_flags; 181 u8 h_credit; 182 u8 h_padding[4]; 183 __sum16 h_csum; 184 185 u8 h_exthdr[RDS_HEADER_EXT_SPACE]; 186 }; 187 188 /* 189 * Reserved - indicates end of extensions 190 */ 191 #define RDS_EXTHDR_NONE 0 192 193 /* 194 * This extension header is included in the very 195 * first message that is sent on a new connection, 196 * and identifies the protocol level. This will help 197 * rolling updates if a future change requires breaking 198 * the protocol. 199 * NB: This is no longer true for IB, where we do a version 200 * negotiation during the connection setup phase (protocol 201 * version information is included in the RDMA CM private data). 202 */ 203 #define RDS_EXTHDR_VERSION 1 204 struct rds_ext_header_version { 205 __be32 h_version; 206 }; 207 208 /* 209 * This extension header is included in the RDS message 210 * chasing an RDMA operation. 211 */ 212 #define RDS_EXTHDR_RDMA 2 213 struct rds_ext_header_rdma { 214 __be32 h_rdma_rkey; 215 }; 216 217 /* 218 * This extension header tells the peer about the 219 * destination <R_Key,offset> of the requested RDMA 220 * operation. 221 */ 222 #define RDS_EXTHDR_RDMA_DEST 3 223 struct rds_ext_header_rdma_dest { 224 __be32 h_rdma_rkey; 225 __be32 h_rdma_offset; 226 }; 227 228 #define __RDS_EXTHDR_MAX 16 /* for now */ 229 230 struct rds_incoming { 231 atomic_t i_refcount; 232 struct list_head i_item; 233 struct rds_connection *i_conn; 234 struct rds_conn_path *i_conn_path; 235 struct rds_header i_hdr; 236 unsigned long i_rx_jiffies; 237 __be32 i_saddr; 238 239 rds_rdma_cookie_t i_rdma_cookie; 240 struct timeval i_rx_tstamp; 241 }; 242 243 struct rds_mr { 244 struct rb_node r_rb_node; 245 atomic_t r_refcount; 246 u32 r_key; 247 248 /* A copy of the creation flags */ 249 unsigned int r_use_once:1; 250 unsigned int r_invalidate:1; 251 unsigned int r_write:1; 252 253 /* This is for RDS_MR_DEAD. 254 * It would be nice & consistent to make this part of the above 255 * bit field here, but we need to use test_and_set_bit. 256 */ 257 unsigned long r_state; 258 struct rds_sock *r_sock; /* back pointer to the socket that owns us */ 259 struct rds_transport *r_trans; 260 void *r_trans_private; 261 }; 262 263 /* Flags for mr->r_state */ 264 #define RDS_MR_DEAD 0 265 266 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset) 267 { 268 return r_key | (((u64) offset) << 32); 269 } 270 271 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie) 272 { 273 return cookie; 274 } 275 276 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie) 277 { 278 return cookie >> 32; 279 } 280 281 /* atomic operation types */ 282 #define RDS_ATOMIC_TYPE_CSWP 0 283 #define RDS_ATOMIC_TYPE_FADD 1 284 285 /* 286 * m_sock_item and m_conn_item are on lists that are serialized under 287 * conn->c_lock. m_sock_item has additional meaning in that once it is empty 288 * the message will not be put back on the retransmit list after being sent. 289 * messages that are canceled while being sent rely on this. 290 * 291 * m_inc is used by loopback so that it can pass an incoming message straight 292 * back up into the rx path. It embeds a wire header which is also used by 293 * the send path, which is kind of awkward. 294 * 295 * m_sock_item indicates the message's presence on a socket's send or receive 296 * queue. m_rs will point to that socket. 297 * 298 * m_daddr is used by cancellation to prune messages to a given destination. 299 * 300 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock 301 * nesting. As paths iterate over messages on a sock, or conn, they must 302 * also lock the conn, or sock, to remove the message from those lists too. 303 * Testing the flag to determine if the message is still on the lists lets 304 * us avoid testing the list_head directly. That means each path can use 305 * the message's list_head to keep it on a local list while juggling locks 306 * without confusing the other path. 307 * 308 * m_ack_seq is an optional field set by transports who need a different 309 * sequence number range to invalidate. They can use this in a callback 310 * that they pass to rds_send_drop_acked() to see if each message has been 311 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't 312 * had ack_seq set yet. 313 */ 314 #define RDS_MSG_ON_SOCK 1 315 #define RDS_MSG_ON_CONN 2 316 #define RDS_MSG_HAS_ACK_SEQ 3 317 #define RDS_MSG_ACK_REQUIRED 4 318 #define RDS_MSG_RETRANSMITTED 5 319 #define RDS_MSG_MAPPED 6 320 #define RDS_MSG_PAGEVEC 7 321 322 struct rds_message { 323 atomic_t m_refcount; 324 struct list_head m_sock_item; 325 struct list_head m_conn_item; 326 struct rds_incoming m_inc; 327 u64 m_ack_seq; 328 __be32 m_daddr; 329 unsigned long m_flags; 330 331 /* Never access m_rs without holding m_rs_lock. 332 * Lock nesting is 333 * rm->m_rs_lock 334 * -> rs->rs_lock 335 */ 336 spinlock_t m_rs_lock; 337 wait_queue_head_t m_flush_wait; 338 339 struct rds_sock *m_rs; 340 341 /* cookie to send to remote, in rds header */ 342 rds_rdma_cookie_t m_rdma_cookie; 343 344 unsigned int m_used_sgs; 345 unsigned int m_total_sgs; 346 347 void *m_final_op; 348 349 struct { 350 struct rm_atomic_op { 351 int op_type; 352 union { 353 struct { 354 uint64_t compare; 355 uint64_t swap; 356 uint64_t compare_mask; 357 uint64_t swap_mask; 358 } op_m_cswp; 359 struct { 360 uint64_t add; 361 uint64_t nocarry_mask; 362 } op_m_fadd; 363 }; 364 365 u32 op_rkey; 366 u64 op_remote_addr; 367 unsigned int op_notify:1; 368 unsigned int op_recverr:1; 369 unsigned int op_mapped:1; 370 unsigned int op_silent:1; 371 unsigned int op_active:1; 372 struct scatterlist *op_sg; 373 struct rds_notifier *op_notifier; 374 375 struct rds_mr *op_rdma_mr; 376 } atomic; 377 struct rm_rdma_op { 378 u32 op_rkey; 379 u64 op_remote_addr; 380 unsigned int op_write:1; 381 unsigned int op_fence:1; 382 unsigned int op_notify:1; 383 unsigned int op_recverr:1; 384 unsigned int op_mapped:1; 385 unsigned int op_silent:1; 386 unsigned int op_active:1; 387 unsigned int op_bytes; 388 unsigned int op_nents; 389 unsigned int op_count; 390 struct scatterlist *op_sg; 391 struct rds_notifier *op_notifier; 392 393 struct rds_mr *op_rdma_mr; 394 } rdma; 395 struct rm_data_op { 396 unsigned int op_active:1; 397 unsigned int op_nents; 398 unsigned int op_count; 399 unsigned int op_dmasg; 400 unsigned int op_dmaoff; 401 struct scatterlist *op_sg; 402 } data; 403 }; 404 }; 405 406 /* 407 * The RDS notifier is used (optionally) to tell the application about 408 * completed RDMA operations. Rather than keeping the whole rds message 409 * around on the queue, we allocate a small notifier that is put on the 410 * socket's notifier_list. Notifications are delivered to the application 411 * through control messages. 412 */ 413 struct rds_notifier { 414 struct list_head n_list; 415 uint64_t n_user_token; 416 int n_status; 417 }; 418 419 /** 420 * struct rds_transport - transport specific behavioural hooks 421 * 422 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send 423 * part of a message. The caller serializes on the send_sem so this 424 * doesn't need to be reentrant for a given conn. The header must be 425 * sent before the data payload. .xmit must be prepared to send a 426 * message with no data payload. .xmit should return the number of 427 * bytes that were sent down the connection, including header bytes. 428 * Returning 0 tells the caller that it doesn't need to perform any 429 * additional work now. This is usually the case when the transport has 430 * filled the sending queue for its connection and will handle 431 * triggering the rds thread to continue the send when space becomes 432 * available. Returning -EAGAIN tells the caller to retry the send 433 * immediately. Returning -ENOMEM tells the caller to retry the send at 434 * some point in the future. 435 * 436 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once 437 * it returns the connection can not call rds_recv_incoming(). 438 * This will only be called once after conn_connect returns 439 * non-zero success and will The caller serializes this with 440 * the send and connecting paths (xmit_* and conn_*). The 441 * transport is responsible for other serialization, including 442 * rds_recv_incoming(). This is called in process context but 443 * should try hard not to block. 444 */ 445 446 struct rds_transport { 447 char t_name[TRANSNAMSIZ]; 448 struct list_head t_item; 449 struct module *t_owner; 450 unsigned int t_prefer_loopback:1, 451 t_mp_capable:1; 452 unsigned int t_type; 453 454 int (*laddr_check)(struct net *net, __be32 addr); 455 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp); 456 void (*conn_free)(void *data); 457 int (*conn_connect)(struct rds_connection *conn); 458 void (*conn_shutdown)(struct rds_connection *conn); 459 void (*xmit_prepare)(struct rds_connection *conn); 460 void (*xmit_path_prepare)(struct rds_conn_path *cp); 461 void (*xmit_complete)(struct rds_connection *conn); 462 void (*xmit_path_complete)(struct rds_conn_path *cp); 463 int (*xmit)(struct rds_connection *conn, struct rds_message *rm, 464 unsigned int hdr_off, unsigned int sg, unsigned int off); 465 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op); 466 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op); 467 int (*recv)(struct rds_connection *conn); 468 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to); 469 void (*inc_free)(struct rds_incoming *inc); 470 471 int (*cm_handle_connect)(struct rdma_cm_id *cm_id, 472 struct rdma_cm_event *event); 473 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id); 474 void (*cm_connect_complete)(struct rds_connection *conn, 475 struct rdma_cm_event *event); 476 477 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter, 478 unsigned int avail); 479 void (*exit)(void); 480 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, 481 struct rds_sock *rs, u32 *key_ret); 482 void (*sync_mr)(void *trans_private, int direction); 483 void (*free_mr)(void *trans_private, int invalidate); 484 void (*flush_mrs)(void); 485 }; 486 487 struct rds_sock { 488 struct sock rs_sk; 489 490 u64 rs_user_addr; 491 u64 rs_user_bytes; 492 493 /* 494 * bound_addr used for both incoming and outgoing, no INADDR_ANY 495 * support. 496 */ 497 struct rhash_head rs_bound_node; 498 u64 rs_bound_key; 499 __be32 rs_bound_addr; 500 __be32 rs_conn_addr; 501 __be16 rs_bound_port; 502 __be16 rs_conn_port; 503 struct rds_transport *rs_transport; 504 505 /* 506 * rds_sendmsg caches the conn it used the last time around. 507 * This helps avoid costly lookups. 508 */ 509 struct rds_connection *rs_conn; 510 511 /* flag indicating we were congested or not */ 512 int rs_congested; 513 /* seen congestion (ENOBUFS) when sending? */ 514 int rs_seen_congestion; 515 516 /* rs_lock protects all these adjacent members before the newline */ 517 spinlock_t rs_lock; 518 struct list_head rs_send_queue; 519 u32 rs_snd_bytes; 520 int rs_rcv_bytes; 521 struct list_head rs_notify_queue; /* currently used for failed RDMAs */ 522 523 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask 524 * to decide whether the application should be woken up. 525 * If not set, we use rs_cong_track to find out whether a cong map 526 * update arrived. 527 */ 528 uint64_t rs_cong_mask; 529 uint64_t rs_cong_notify; 530 struct list_head rs_cong_list; 531 unsigned long rs_cong_track; 532 533 /* 534 * rs_recv_lock protects the receive queue, and is 535 * used to serialize with rds_release. 536 */ 537 rwlock_t rs_recv_lock; 538 struct list_head rs_recv_queue; 539 540 /* just for stats reporting */ 541 struct list_head rs_item; 542 543 /* these have their own lock */ 544 spinlock_t rs_rdma_lock; 545 struct rb_root rs_rdma_keys; 546 547 /* Socket options - in case there will be more */ 548 unsigned char rs_recverr, 549 rs_cong_monitor; 550 }; 551 552 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk) 553 { 554 return container_of(sk, struct rds_sock, rs_sk); 555 } 556 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs) 557 { 558 return &rs->rs_sk; 559 } 560 561 /* 562 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value 563 * to account for overhead. We don't account for overhead, we just apply 564 * the number of payload bytes to the specified value. 565 */ 566 static inline int rds_sk_sndbuf(struct rds_sock *rs) 567 { 568 return rds_rs_to_sk(rs)->sk_sndbuf / 2; 569 } 570 static inline int rds_sk_rcvbuf(struct rds_sock *rs) 571 { 572 return rds_rs_to_sk(rs)->sk_rcvbuf / 2; 573 } 574 575 struct rds_statistics { 576 uint64_t s_conn_reset; 577 uint64_t s_recv_drop_bad_checksum; 578 uint64_t s_recv_drop_old_seq; 579 uint64_t s_recv_drop_no_sock; 580 uint64_t s_recv_drop_dead_sock; 581 uint64_t s_recv_deliver_raced; 582 uint64_t s_recv_delivered; 583 uint64_t s_recv_queued; 584 uint64_t s_recv_immediate_retry; 585 uint64_t s_recv_delayed_retry; 586 uint64_t s_recv_ack_required; 587 uint64_t s_recv_rdma_bytes; 588 uint64_t s_recv_ping; 589 uint64_t s_send_queue_empty; 590 uint64_t s_send_queue_full; 591 uint64_t s_send_lock_contention; 592 uint64_t s_send_lock_queue_raced; 593 uint64_t s_send_immediate_retry; 594 uint64_t s_send_delayed_retry; 595 uint64_t s_send_drop_acked; 596 uint64_t s_send_ack_required; 597 uint64_t s_send_queued; 598 uint64_t s_send_rdma; 599 uint64_t s_send_rdma_bytes; 600 uint64_t s_send_pong; 601 uint64_t s_page_remainder_hit; 602 uint64_t s_page_remainder_miss; 603 uint64_t s_copy_to_user; 604 uint64_t s_copy_from_user; 605 uint64_t s_cong_update_queued; 606 uint64_t s_cong_update_received; 607 uint64_t s_cong_send_error; 608 uint64_t s_cong_send_blocked; 609 }; 610 611 /* af_rds.c */ 612 void rds_sock_addref(struct rds_sock *rs); 613 void rds_sock_put(struct rds_sock *rs); 614 void rds_wake_sk_sleep(struct rds_sock *rs); 615 static inline void __rds_wake_sk_sleep(struct sock *sk) 616 { 617 wait_queue_head_t *waitq = sk_sleep(sk); 618 619 if (!sock_flag(sk, SOCK_DEAD) && waitq) 620 wake_up(waitq); 621 } 622 extern wait_queue_head_t rds_poll_waitq; 623 624 625 /* bind.c */ 626 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); 627 void rds_remove_bound(struct rds_sock *rs); 628 struct rds_sock *rds_find_bound(__be32 addr, __be16 port); 629 int rds_bind_lock_init(void); 630 void rds_bind_lock_destroy(void); 631 632 /* cong.c */ 633 int rds_cong_get_maps(struct rds_connection *conn); 634 void rds_cong_add_conn(struct rds_connection *conn); 635 void rds_cong_remove_conn(struct rds_connection *conn); 636 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port); 637 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port); 638 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs); 639 void rds_cong_queue_updates(struct rds_cong_map *map); 640 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t); 641 int rds_cong_updated_since(unsigned long *recent); 642 void rds_cong_add_socket(struct rds_sock *); 643 void rds_cong_remove_socket(struct rds_sock *); 644 void rds_cong_exit(void); 645 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn); 646 647 /* conn.c */ 648 int rds_conn_init(void); 649 void rds_conn_exit(void); 650 struct rds_connection *rds_conn_create(struct net *net, 651 __be32 laddr, __be32 faddr, 652 struct rds_transport *trans, gfp_t gfp); 653 struct rds_connection *rds_conn_create_outgoing(struct net *net, 654 __be32 laddr, __be32 faddr, 655 struct rds_transport *trans, gfp_t gfp); 656 void rds_conn_shutdown(struct rds_connection *conn); 657 void rds_conn_destroy(struct rds_connection *conn); 658 void rds_conn_drop(struct rds_connection *conn); 659 void rds_conn_path_drop(struct rds_conn_path *cpath); 660 void rds_conn_connect_if_down(struct rds_connection *conn); 661 void rds_conn_path_connect_if_down(struct rds_conn_path *cp); 662 void rds_for_each_conn_info(struct socket *sock, unsigned int len, 663 struct rds_info_iterator *iter, 664 struct rds_info_lengths *lens, 665 int (*visitor)(struct rds_connection *, void *), 666 size_t item_len); 667 __printf(2, 3) 668 void __rds_conn_error(struct rds_connection *conn, const char *, ...); 669 #define rds_conn_error(conn, fmt...) \ 670 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt) 671 672 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...); 673 #define rds_conn_path_error(cp, fmt...) \ 674 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt) 675 676 static inline int 677 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new) 678 { 679 return atomic_cmpxchg(&cp->cp_state, old, new) == old; 680 } 681 682 static inline int 683 rds_conn_transition(struct rds_connection *conn, int old, int new) 684 { 685 WARN_ON(conn->c_trans->t_mp_capable); 686 return rds_conn_path_transition(&conn->c_path[0], old, new); 687 } 688 689 static inline int 690 rds_conn_path_state(struct rds_conn_path *cp) 691 { 692 return atomic_read(&cp->cp_state); 693 } 694 695 static inline int 696 rds_conn_state(struct rds_connection *conn) 697 { 698 WARN_ON(conn->c_trans->t_mp_capable); 699 return rds_conn_path_state(&conn->c_path[0]); 700 } 701 702 static inline int 703 rds_conn_path_up(struct rds_conn_path *cp) 704 { 705 return atomic_read(&cp->cp_state) == RDS_CONN_UP; 706 } 707 708 static inline int 709 rds_conn_up(struct rds_connection *conn) 710 { 711 WARN_ON(conn->c_trans->t_mp_capable); 712 return rds_conn_path_up(&conn->c_path[0]); 713 } 714 715 static inline int 716 rds_conn_path_connecting(struct rds_conn_path *cp) 717 { 718 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING; 719 } 720 721 static inline int 722 rds_conn_connecting(struct rds_connection *conn) 723 { 724 WARN_ON(conn->c_trans->t_mp_capable); 725 return rds_conn_path_connecting(&conn->c_path[0]); 726 } 727 728 /* message.c */ 729 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); 730 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents); 731 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from); 732 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); 733 void rds_message_populate_header(struct rds_header *hdr, __be16 sport, 734 __be16 dport, u64 seq); 735 int rds_message_add_extension(struct rds_header *hdr, 736 unsigned int type, const void *data, unsigned int len); 737 int rds_message_next_extension(struct rds_header *hdr, 738 unsigned int *pos, void *buf, unsigned int *buflen); 739 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset); 740 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); 741 void rds_message_inc_free(struct rds_incoming *inc); 742 void rds_message_addref(struct rds_message *rm); 743 void rds_message_put(struct rds_message *rm); 744 void rds_message_wait(struct rds_message *rm); 745 void rds_message_unmapped(struct rds_message *rm); 746 747 static inline void rds_message_make_checksum(struct rds_header *hdr) 748 { 749 hdr->h_csum = 0; 750 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2); 751 } 752 753 static inline int rds_message_verify_checksum(const struct rds_header *hdr) 754 { 755 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0; 756 } 757 758 759 /* page.c */ 760 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, 761 gfp_t gfp); 762 int rds_page_copy_user(struct page *page, unsigned long offset, 763 void __user *ptr, unsigned long bytes, 764 int to_user); 765 #define rds_page_copy_to_user(page, offset, ptr, bytes) \ 766 rds_page_copy_user(page, offset, ptr, bytes, 1) 767 #define rds_page_copy_from_user(page, offset, ptr, bytes) \ 768 rds_page_copy_user(page, offset, ptr, bytes, 0) 769 void rds_page_exit(void); 770 771 /* recv.c */ 772 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, 773 __be32 saddr); 774 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn, 775 __be32 saddr); 776 void rds_inc_put(struct rds_incoming *inc); 777 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, 778 struct rds_incoming *inc, gfp_t gfp); 779 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 780 int msg_flags); 781 void rds_clear_recv_queue(struct rds_sock *rs); 782 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg); 783 void rds_inc_info_copy(struct rds_incoming *inc, 784 struct rds_info_iterator *iter, 785 __be32 saddr, __be32 daddr, int flip); 786 787 /* send.c */ 788 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len); 789 void rds_send_reset(struct rds_connection *conn); 790 int rds_send_xmit(struct rds_conn_path *cp); 791 struct sockaddr_in; 792 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest); 793 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack); 794 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 795 is_acked_func is_acked); 796 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, 797 is_acked_func is_acked); 798 int rds_send_pong(struct rds_conn_path *cp, __be16 dport); 799 800 /* rdma.c */ 801 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); 802 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen); 803 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen); 804 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen); 805 void rds_rdma_drop_keys(struct rds_sock *rs); 806 int rds_rdma_extra_size(struct rds_rdma_args *args); 807 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, 808 struct cmsghdr *cmsg); 809 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, 810 struct cmsghdr *cmsg); 811 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, 812 struct cmsghdr *cmsg); 813 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, 814 struct cmsghdr *cmsg); 815 void rds_rdma_free_op(struct rm_rdma_op *ro); 816 void rds_atomic_free_op(struct rm_atomic_op *ao); 817 void rds_rdma_send_complete(struct rds_message *rm, int wc_status); 818 void rds_atomic_send_complete(struct rds_message *rm, int wc_status); 819 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, 820 struct cmsghdr *cmsg); 821 822 void __rds_put_mr_final(struct rds_mr *mr); 823 static inline void rds_mr_put(struct rds_mr *mr) 824 { 825 if (atomic_dec_and_test(&mr->r_refcount)) 826 __rds_put_mr_final(mr); 827 } 828 829 /* stats.c */ 830 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); 831 #define rds_stats_inc_which(which, member) do { \ 832 per_cpu(which, get_cpu()).member++; \ 833 put_cpu(); \ 834 } while (0) 835 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member) 836 #define rds_stats_add_which(which, member, count) do { \ 837 per_cpu(which, get_cpu()).member += count; \ 838 put_cpu(); \ 839 } while (0) 840 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count) 841 int rds_stats_init(void); 842 void rds_stats_exit(void); 843 void rds_stats_info_copy(struct rds_info_iterator *iter, 844 uint64_t *values, const char *const *names, 845 size_t nr); 846 847 /* sysctl.c */ 848 int rds_sysctl_init(void); 849 void rds_sysctl_exit(void); 850 extern unsigned long rds_sysctl_sndbuf_min; 851 extern unsigned long rds_sysctl_sndbuf_default; 852 extern unsigned long rds_sysctl_sndbuf_max; 853 extern unsigned long rds_sysctl_reconnect_min_jiffies; 854 extern unsigned long rds_sysctl_reconnect_max_jiffies; 855 extern unsigned int rds_sysctl_max_unacked_packets; 856 extern unsigned int rds_sysctl_max_unacked_bytes; 857 extern unsigned int rds_sysctl_ping_enable; 858 extern unsigned long rds_sysctl_trace_flags; 859 extern unsigned int rds_sysctl_trace_level; 860 861 /* threads.c */ 862 int rds_threads_init(void); 863 void rds_threads_exit(void); 864 extern struct workqueue_struct *rds_wq; 865 void rds_queue_reconnect(struct rds_conn_path *cp); 866 void rds_connect_worker(struct work_struct *); 867 void rds_shutdown_worker(struct work_struct *); 868 void rds_send_worker(struct work_struct *); 869 void rds_recv_worker(struct work_struct *); 870 void rds_connect_path_complete(struct rds_conn_path *conn, int curr); 871 void rds_connect_complete(struct rds_connection *conn); 872 873 /* transport.c */ 874 int rds_trans_register(struct rds_transport *trans); 875 void rds_trans_unregister(struct rds_transport *trans); 876 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr); 877 void rds_trans_put(struct rds_transport *trans); 878 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, 879 unsigned int avail); 880 struct rds_transport *rds_trans_get(int t_type); 881 int rds_trans_init(void); 882 void rds_trans_exit(void); 883 884 #endif 885