1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Definitions for the SMC module (socket related) 6 * 7 * Copyright IBM Corp. 2016 8 * 9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 10 */ 11 #ifndef __SMC_H 12 #define __SMC_H 13 14 #include <linux/socket.h> 15 #include <linux/types.h> 16 #include <linux/compiler.h> /* __aligned */ 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 20 #include "smc_ib.h" 21 22 #define SMC_V1 1 /* SMC version V1 */ 23 #define SMC_V2 2 /* SMC version V2 */ 24 25 #define SMC_RELEASE_0 0 26 #define SMC_RELEASE_1 1 27 #define SMC_RELEASE SMC_RELEASE_1 /* the latest release version */ 28 29 #define SMCPROTO_SMC 0 /* SMC protocol, IPv4 */ 30 #define SMCPROTO_SMC6 1 /* SMC protocol, IPv6 */ 31 32 #define SMC_MAX_ISM_DEVS 8 /* max # of proposed non-native ISM 33 * devices 34 */ 35 #define SMC_AUTOCORKING_DEFAULT_SIZE 0x10000 /* 64K by default */ 36 37 extern struct proto smc_proto; 38 extern struct proto smc_proto6; 39 40 #ifdef ATOMIC64_INIT 41 #define KERNEL_HAS_ATOMIC64 42 #endif 43 44 enum smc_state { /* possible states of an SMC socket */ 45 SMC_ACTIVE = 1, 46 SMC_INIT = 2, 47 SMC_CLOSED = 7, 48 SMC_LISTEN = 10, 49 /* normal close */ 50 SMC_PEERCLOSEWAIT1 = 20, 51 SMC_PEERCLOSEWAIT2 = 21, 52 SMC_APPFINCLOSEWAIT = 24, 53 SMC_APPCLOSEWAIT1 = 22, 54 SMC_APPCLOSEWAIT2 = 23, 55 SMC_PEERFINCLOSEWAIT = 25, 56 /* abnormal close */ 57 SMC_PEERABORTWAIT = 26, 58 SMC_PROCESSABORT = 27, 59 }; 60 61 struct smc_link_group; 62 63 struct smc_wr_rx_hdr { /* common prefix part of LLC and CDC to demultiplex */ 64 union { 65 u8 type; 66 #if defined(__BIG_ENDIAN_BITFIELD) 67 struct { 68 u8 llc_version:4, 69 llc_type:4; 70 }; 71 #elif defined(__LITTLE_ENDIAN_BITFIELD) 72 struct { 73 u8 llc_type:4, 74 llc_version:4; 75 }; 76 #endif 77 }; 78 } __aligned(1); 79 80 struct smc_cdc_conn_state_flags { 81 #if defined(__BIG_ENDIAN_BITFIELD) 82 u8 peer_done_writing : 1; /* Sending done indicator */ 83 u8 peer_conn_closed : 1; /* Peer connection closed indicator */ 84 u8 peer_conn_abort : 1; /* Abnormal close indicator */ 85 u8 reserved : 5; 86 #elif defined(__LITTLE_ENDIAN_BITFIELD) 87 u8 reserved : 5; 88 u8 peer_conn_abort : 1; 89 u8 peer_conn_closed : 1; 90 u8 peer_done_writing : 1; 91 #endif 92 }; 93 94 struct smc_cdc_producer_flags { 95 #if defined(__BIG_ENDIAN_BITFIELD) 96 u8 write_blocked : 1; /* Writing Blocked, no rx buf space */ 97 u8 urg_data_pending : 1; /* Urgent Data Pending */ 98 u8 urg_data_present : 1; /* Urgent Data Present */ 99 u8 cons_curs_upd_req : 1; /* cursor update requested */ 100 u8 failover_validation : 1;/* message replay due to failover */ 101 u8 reserved : 3; 102 #elif defined(__LITTLE_ENDIAN_BITFIELD) 103 u8 reserved : 3; 104 u8 failover_validation : 1; 105 u8 cons_curs_upd_req : 1; 106 u8 urg_data_present : 1; 107 u8 urg_data_pending : 1; 108 u8 write_blocked : 1; 109 #endif 110 }; 111 112 /* in host byte order */ 113 union smc_host_cursor { /* SMC cursor - an offset in an RMBE */ 114 struct { 115 u16 reserved; 116 u16 wrap; /* window wrap sequence number */ 117 u32 count; /* cursor (= offset) part */ 118 }; 119 #ifdef KERNEL_HAS_ATOMIC64 120 atomic64_t acurs; /* for atomic processing */ 121 #else 122 u64 acurs; /* for atomic processing */ 123 #endif 124 } __aligned(8); 125 126 /* in host byte order, except for flag bitfields in network byte order */ 127 struct smc_host_cdc_msg { /* Connection Data Control message */ 128 struct smc_wr_rx_hdr common; /* .type = 0xFE */ 129 u8 len; /* length = 44 */ 130 u16 seqno; /* connection seq # */ 131 u32 token; /* alert_token */ 132 union smc_host_cursor prod; /* producer cursor */ 133 union smc_host_cursor cons; /* consumer cursor, 134 * piggy backed "ack" 135 */ 136 struct smc_cdc_producer_flags prod_flags; /* conn. tx/rx status */ 137 struct smc_cdc_conn_state_flags conn_state_flags; /* peer conn. status*/ 138 u8 reserved[18]; 139 } __aligned(8); 140 141 enum smc_urg_state { 142 SMC_URG_VALID = 1, /* data present */ 143 SMC_URG_NOTYET = 2, /* data pending */ 144 SMC_URG_READ = 3, /* data was already read */ 145 }; 146 147 struct smc_mark_woken { 148 bool woken; 149 void *key; 150 wait_queue_entry_t wait_entry; 151 }; 152 153 struct smc_connection { 154 struct rb_node alert_node; 155 struct smc_link_group *lgr; /* link group of connection */ 156 struct smc_link *lnk; /* assigned SMC-R link */ 157 u32 alert_token_local; /* unique conn. id */ 158 u8 peer_rmbe_idx; /* from tcp handshake */ 159 int peer_rmbe_size; /* size of peer rx buffer */ 160 atomic_t peer_rmbe_space;/* remaining free bytes in peer 161 * rmbe 162 */ 163 int rtoken_idx; /* idx to peer RMB rkey/addr */ 164 165 struct smc_buf_desc *sndbuf_desc; /* send buffer descriptor */ 166 struct smc_buf_desc *rmb_desc; /* RMBE descriptor */ 167 int rmbe_size_comp; /* compressed notation */ 168 int rmbe_update_limit; 169 /* lower limit for consumer 170 * cursor update 171 */ 172 173 struct smc_host_cdc_msg local_tx_ctrl; /* host byte order staging 174 * buffer for CDC msg send 175 * .prod cf. TCP snd_nxt 176 * .cons cf. TCP sends ack 177 */ 178 union smc_host_cursor local_tx_ctrl_fin; 179 /* prod crsr - confirmed by peer 180 */ 181 union smc_host_cursor tx_curs_prep; /* tx - prepared data 182 * snd_max..wmem_alloc 183 */ 184 union smc_host_cursor tx_curs_sent; /* tx - sent data 185 * snd_nxt ? 186 */ 187 union smc_host_cursor tx_curs_fin; /* tx - confirmed by peer 188 * snd-wnd-begin ? 189 */ 190 atomic_t sndbuf_space; /* remaining space in sndbuf */ 191 u16 tx_cdc_seq; /* sequence # for CDC send */ 192 u16 tx_cdc_seq_fin; /* sequence # - tx completed */ 193 spinlock_t send_lock; /* protect wr_sends */ 194 atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe 195 * - inc when post wqe, 196 * - dec on polled tx cqe 197 */ 198 wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/ 199 atomic_t tx_pushing; /* nr_threads trying tx push */ 200 struct delayed_work tx_work; /* retry of smc_cdc_msg_send */ 201 u32 tx_off; /* base offset in peer rmb */ 202 203 struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl. 204 * .prod cf. TCP rcv_nxt 205 * .cons cf. TCP snd_una 206 */ 207 union smc_host_cursor rx_curs_confirmed; /* confirmed to peer 208 * source of snd_una ? 209 */ 210 union smc_host_cursor urg_curs; /* points at urgent byte */ 211 enum smc_urg_state urg_state; 212 bool urg_tx_pend; /* urgent data staged */ 213 bool urg_rx_skip_pend; 214 /* indicate urgent oob data 215 * read, but previous regular 216 * data still pending 217 */ 218 char urg_rx_byte; /* urgent byte */ 219 bool tx_in_release_sock; 220 /* flush pending tx data in 221 * sock release_cb() 222 */ 223 atomic_t bytes_to_rcv; /* arrived data, 224 * not yet received 225 */ 226 atomic_t splice_pending; /* number of spliced bytes 227 * pending processing 228 */ 229 #ifndef KERNEL_HAS_ATOMIC64 230 spinlock_t acurs_lock; /* protect cursors */ 231 #endif 232 struct work_struct close_work; /* peer sent some closing */ 233 struct work_struct abort_work; /* abort the connection */ 234 struct tasklet_struct rx_tsklet; /* Receiver tasklet for SMC-D */ 235 u8 rx_off; /* receive offset: 236 * 0 for SMC-R, 32 for SMC-D 237 */ 238 u64 peer_token; /* SMC-D token of peer */ 239 u8 killed : 1; /* abnormal termination */ 240 u8 freed : 1; /* normal termiation */ 241 u8 out_of_sync : 1; /* out of sync with peer */ 242 }; 243 244 struct smc_sock { /* smc sock container */ 245 struct sock sk; 246 struct socket *clcsock; /* internal tcp socket */ 247 void (*clcsk_state_change)(struct sock *sk); 248 /* original stat_change fct. */ 249 void (*clcsk_data_ready)(struct sock *sk); 250 /* original data_ready fct. */ 251 void (*clcsk_write_space)(struct sock *sk); 252 /* original write_space fct. */ 253 void (*clcsk_error_report)(struct sock *sk); 254 /* original error_report fct. */ 255 struct smc_connection conn; /* smc connection */ 256 struct smc_sock *listen_smc; /* listen parent */ 257 struct work_struct connect_work; /* handle non-blocking connect*/ 258 struct work_struct tcp_listen_work;/* handle tcp socket accepts */ 259 struct work_struct smc_listen_work;/* prepare new accept socket */ 260 struct list_head accept_q; /* sockets to be accepted */ 261 spinlock_t accept_q_lock; /* protects accept_q */ 262 bool limit_smc_hs; /* put constraint on handshake */ 263 bool use_fallback; /* fallback to tcp */ 264 int fallback_rsn; /* reason for fallback */ 265 u32 peer_diagnosis; /* decline reason from peer */ 266 atomic_t queued_smc_hs; /* queued smc handshakes */ 267 struct inet_connection_sock_af_ops af_ops; 268 const struct inet_connection_sock_af_ops *ori_af_ops; 269 /* original af ops */ 270 int sockopt_defer_accept; 271 /* sockopt TCP_DEFER_ACCEPT 272 * value 273 */ 274 u8 wait_close_tx_prepared : 1; 275 /* shutdown wr or close 276 * started, waiting for unsent 277 * data to be sent 278 */ 279 u8 connect_nonblock : 1; 280 /* non-blocking connect in 281 * flight 282 */ 283 struct mutex clcsock_release_lock; 284 /* protects clcsock of a listen 285 * socket 286 * */ 287 }; 288 289 #define smc_sk(ptr) container_of_const(ptr, struct smc_sock, sk) 290 291 static inline void smc_init_saved_callbacks(struct smc_sock *smc) 292 { 293 smc->clcsk_state_change = NULL; 294 smc->clcsk_data_ready = NULL; 295 smc->clcsk_write_space = NULL; 296 smc->clcsk_error_report = NULL; 297 } 298 299 static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk) 300 { 301 return (struct smc_sock *) 302 ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY); 303 } 304 305 /* save target_cb in saved_cb, and replace target_cb with new_cb */ 306 static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *), 307 void (*new_cb)(struct sock *), 308 void (**saved_cb)(struct sock *)) 309 { 310 /* only save once */ 311 if (!*saved_cb) 312 *saved_cb = *target_cb; 313 *target_cb = new_cb; 314 } 315 316 /* restore target_cb to saved_cb, and reset saved_cb to NULL */ 317 static inline void smc_clcsock_restore_cb(void (**target_cb)(struct sock *), 318 void (**saved_cb)(struct sock *)) 319 { 320 if (!*saved_cb) 321 return; 322 *target_cb = *saved_cb; 323 *saved_cb = NULL; 324 } 325 326 extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */ 327 extern struct workqueue_struct *smc_close_wq; /* wq for close work */ 328 329 #define SMC_SYSTEMID_LEN 8 330 331 extern u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */ 332 333 #define ntohll(x) be64_to_cpu(x) 334 #define htonll(x) cpu_to_be64(x) 335 336 /* convert an u32 value into network byte order, store it into a 3 byte field */ 337 static inline void hton24(u8 *net, u32 host) 338 { 339 __be32 t; 340 341 t = cpu_to_be32(host); 342 memcpy(net, ((u8 *)&t) + 1, 3); 343 } 344 345 /* convert a received 3 byte field into host byte order*/ 346 static inline u32 ntoh24(u8 *net) 347 { 348 __be32 t = 0; 349 350 memcpy(((u8 *)&t) + 1, net, 3); 351 return be32_to_cpu(t); 352 } 353 354 #ifdef CONFIG_XFRM 355 static inline bool using_ipsec(struct smc_sock *smc) 356 { 357 return (smc->clcsock->sk->sk_policy[0] || 358 smc->clcsock->sk->sk_policy[1]) ? true : false; 359 } 360 #else 361 static inline bool using_ipsec(struct smc_sock *smc) 362 { 363 return false; 364 } 365 #endif 366 367 struct smc_gidlist; 368 369 struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock); 370 void smc_close_non_accepted(struct sock *sk); 371 void smc_fill_gid_list(struct smc_link_group *lgr, 372 struct smc_gidlist *gidlist, 373 struct smc_ib_device *known_dev, u8 *known_gid); 374 375 /* smc handshake limitation interface for netlink */ 376 int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb); 377 int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info); 378 int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info); 379 380 static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag) 381 { 382 set_bit(flag, &sk->sk_flags); 383 } 384 385 #endif /* __SMC_H */ 386