1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright (C) 2017, Microsoft Corporation. 4 * 5 * Author(s): Long Li <longli@microsoft.com> 6 */ 7 #ifndef _SMBDIRECT_H 8 #define _SMBDIRECT_H 9 10 #ifdef CONFIG_CIFS_SMB_DIRECT 11 #define cifs_rdma_enabled(server) ((server)->rdma) 12 13 #include "cifsglob.h" 14 #include <rdma/ib_verbs.h> 15 #include <rdma/rdma_cm.h> 16 #include <linux/mempool.h> 17 18 extern int rdma_readwrite_threshold; 19 extern int smbd_max_frmr_depth; 20 extern int smbd_keep_alive_interval; 21 extern int smbd_max_receive_size; 22 extern int smbd_max_fragmented_recv_size; 23 extern int smbd_max_send_size; 24 extern int smbd_send_credit_target; 25 extern int smbd_receive_credit_max; 26 27 enum keep_alive_status { 28 KEEP_ALIVE_NONE, 29 KEEP_ALIVE_PENDING, 30 KEEP_ALIVE_SENT, 31 }; 32 33 enum smbd_connection_status { 34 SMBD_CREATED, 35 SMBD_CONNECTING, 36 SMBD_CONNECTED, 37 SMBD_NEGOTIATE_FAILED, 38 SMBD_DISCONNECTING, 39 SMBD_DISCONNECTED, 40 SMBD_DESTROYED 41 }; 42 43 /* 44 * The context for the SMBDirect transport 45 * Everything related to the transport is here. It has several logical parts 46 * 1. RDMA related structures 47 * 2. SMBDirect connection parameters 48 * 3. Memory registrations 49 * 4. Receive and reassembly queues for data receive path 50 * 5. mempools for allocating packets 51 */ 52 struct smbd_connection { 53 enum smbd_connection_status transport_status; 54 55 /* RDMA related */ 56 struct rdma_cm_id *id; 57 struct ib_qp_init_attr qp_attr; 58 struct ib_pd *pd; 59 struct ib_cq *send_cq, *recv_cq; 60 struct ib_device_attr dev_attr; 61 int ri_rc; 62 struct completion ri_done; 63 wait_queue_head_t conn_wait; 64 wait_queue_head_t disconn_wait; 65 66 struct completion negotiate_completion; 67 bool negotiate_done; 68 69 struct work_struct disconnect_work; 70 struct work_struct post_send_credits_work; 71 72 spinlock_t lock_new_credits_offered; 73 int new_credits_offered; 74 75 /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */ 76 int receive_credit_max; 77 int send_credit_target; 78 int max_send_size; 79 int max_fragmented_recv_size; 80 int max_fragmented_send_size; 81 int max_receive_size; 82 int keep_alive_interval; 83 int max_readwrite_size; 84 enum keep_alive_status keep_alive_requested; 85 int protocol; 86 atomic_t send_credits; 87 atomic_t receive_credits; 88 int receive_credit_target; 89 int fragment_reassembly_remaining; 90 91 /* Memory registrations */ 92 /* Maximum number of RDMA read/write outstanding on this connection */ 93 int responder_resources; 94 /* Maximum number of pages in a single RDMA write/read on this connection */ 95 int max_frmr_depth; 96 /* 97 * If payload is less than or equal to the threshold, 98 * use RDMA send/recv to send upper layer I/O. 99 * If payload is more than the threshold, 100 * use RDMA read/write through memory registration for I/O. 101 */ 102 int rdma_readwrite_threshold; 103 enum ib_mr_type mr_type; 104 struct list_head mr_list; 105 spinlock_t mr_list_lock; 106 /* The number of available MRs ready for memory registration */ 107 atomic_t mr_ready_count; 108 atomic_t mr_used_count; 109 wait_queue_head_t wait_mr; 110 struct work_struct mr_recovery_work; 111 /* Used by transport to wait until all MRs are returned */ 112 wait_queue_head_t wait_for_mr_cleanup; 113 114 /* Activity accoutning */ 115 atomic_t send_pending; 116 wait_queue_head_t wait_send_pending; 117 wait_queue_head_t wait_post_send; 118 119 /* Receive queue */ 120 struct list_head receive_queue; 121 int count_receive_queue; 122 spinlock_t receive_queue_lock; 123 124 struct list_head empty_packet_queue; 125 int count_empty_packet_queue; 126 spinlock_t empty_packet_queue_lock; 127 128 wait_queue_head_t wait_receive_queues; 129 130 /* Reassembly queue */ 131 struct list_head reassembly_queue; 132 spinlock_t reassembly_queue_lock; 133 wait_queue_head_t wait_reassembly_queue; 134 135 /* total data length of reassembly queue */ 136 int reassembly_data_length; 137 int reassembly_queue_length; 138 /* the offset to first buffer in reassembly queue */ 139 int first_entry_offset; 140 141 bool send_immediate; 142 143 wait_queue_head_t wait_send_queue; 144 145 /* 146 * Indicate if we have received a full packet on the connection 147 * This is used to identify the first SMBD packet of a assembled 148 * payload (SMB packet) in reassembly queue so we can return a 149 * RFC1002 length to upper layer to indicate the length of the SMB 150 * packet received 151 */ 152 bool full_packet_received; 153 154 struct workqueue_struct *workqueue; 155 struct delayed_work idle_timer_work; 156 157 /* Memory pool for preallocating buffers */ 158 /* request pool for RDMA send */ 159 struct kmem_cache *request_cache; 160 mempool_t *request_mempool; 161 162 /* response pool for RDMA receive */ 163 struct kmem_cache *response_cache; 164 mempool_t *response_mempool; 165 166 /* for debug purposes */ 167 unsigned int count_get_receive_buffer; 168 unsigned int count_put_receive_buffer; 169 unsigned int count_reassembly_queue; 170 unsigned int count_enqueue_reassembly_queue; 171 unsigned int count_dequeue_reassembly_queue; 172 unsigned int count_send_empty; 173 }; 174 175 enum smbd_message_type { 176 SMBD_NEGOTIATE_RESP, 177 SMBD_TRANSFER_DATA, 178 }; 179 180 #define SMB_DIRECT_RESPONSE_REQUESTED 0x0001 181 182 /* SMBD negotiation request packet [MS-SMBD] 2.2.1 */ 183 struct smbd_negotiate_req { 184 __le16 min_version; 185 __le16 max_version; 186 __le16 reserved; 187 __le16 credits_requested; 188 __le32 preferred_send_size; 189 __le32 max_receive_size; 190 __le32 max_fragmented_size; 191 } __packed; 192 193 /* SMBD negotiation response packet [MS-SMBD] 2.2.2 */ 194 struct smbd_negotiate_resp { 195 __le16 min_version; 196 __le16 max_version; 197 __le16 negotiated_version; 198 __le16 reserved; 199 __le16 credits_requested; 200 __le16 credits_granted; 201 __le32 status; 202 __le32 max_readwrite_size; 203 __le32 preferred_send_size; 204 __le32 max_receive_size; 205 __le32 max_fragmented_size; 206 } __packed; 207 208 /* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */ 209 struct smbd_data_transfer { 210 __le16 credits_requested; 211 __le16 credits_granted; 212 __le16 flags; 213 __le16 reserved; 214 __le32 remaining_data_length; 215 __le32 data_offset; 216 __le32 data_length; 217 __le32 padding; 218 __u8 buffer[]; 219 } __packed; 220 221 /* The packet fields for a registered RDMA buffer */ 222 struct smbd_buffer_descriptor_v1 { 223 __le64 offset; 224 __le32 token; 225 __le32 length; 226 } __packed; 227 228 /* Maximum number of SGEs used by smbdirect.c in any send work request */ 229 #define SMBDIRECT_MAX_SEND_SGE 6 230 231 /* The context for a SMBD request */ 232 struct smbd_request { 233 struct smbd_connection *info; 234 struct ib_cqe cqe; 235 236 /* the SGE entries for this work request */ 237 struct ib_sge sge[SMBDIRECT_MAX_SEND_SGE]; 238 int num_sge; 239 240 /* SMBD packet header follows this structure */ 241 u8 packet[]; 242 }; 243 244 /* Maximum number of SGEs used by smbdirect.c in any receive work request */ 245 #define SMBDIRECT_MAX_RECV_SGE 1 246 247 /* The context for a SMBD response */ 248 struct smbd_response { 249 struct smbd_connection *info; 250 struct ib_cqe cqe; 251 struct ib_sge sge; 252 253 enum smbd_message_type type; 254 255 /* Link to receive queue or reassembly queue */ 256 struct list_head list; 257 258 /* Indicate if this is the 1st packet of a payload */ 259 bool first_segment; 260 261 /* SMBD packet header and payload follows this structure */ 262 u8 packet[]; 263 }; 264 265 /* Create a SMBDirect session */ 266 struct smbd_connection *smbd_get_connection( 267 struct TCP_Server_Info *server, struct sockaddr *dstaddr); 268 269 /* Reconnect SMBDirect session */ 270 int smbd_reconnect(struct TCP_Server_Info *server); 271 /* Destroy SMBDirect session */ 272 void smbd_destroy(struct TCP_Server_Info *server); 273 274 /* Interface for carrying upper layer I/O through send/recv */ 275 int smbd_recv(struct smbd_connection *info, struct msghdr *msg); 276 int smbd_send(struct TCP_Server_Info *server, 277 int num_rqst, struct smb_rqst *rqst); 278 279 enum mr_state { 280 MR_READY, 281 MR_REGISTERED, 282 MR_INVALIDATED, 283 MR_ERROR 284 }; 285 286 struct smbd_mr { 287 struct smbd_connection *conn; 288 struct list_head list; 289 enum mr_state state; 290 struct ib_mr *mr; 291 struct sg_table sgt; 292 enum dma_data_direction dir; 293 union { 294 struct ib_reg_wr wr; 295 struct ib_send_wr inv_wr; 296 }; 297 struct ib_cqe cqe; 298 bool need_invalidate; 299 struct completion invalidate_done; 300 }; 301 302 /* Interfaces to register and deregister MR for RDMA read/write */ 303 struct smbd_mr *smbd_register_mr( 304 struct smbd_connection *info, struct iov_iter *iter, 305 bool writing, bool need_invalidate); 306 int smbd_deregister_mr(struct smbd_mr *mr); 307 308 #else 309 #define cifs_rdma_enabled(server) 0 310 struct smbd_connection {}; 311 static inline void *smbd_get_connection( 312 struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;} 313 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; } 314 static inline void smbd_destroy(struct TCP_Server_Info *server) {} 315 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; } 316 static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; } 317 #endif 318 319 #endif 320