1 #ifndef DEF_RDMAVT_INCQP_H 2 #define DEF_RDMAVT_INCQP_H 3 4 /* 5 * Copyright(c) 2016 Intel Corporation. 6 * 7 * This file is provided under a dual BSD/GPLv2 license. When using or 8 * redistributing this file, you may do so under either license. 9 * 10 * GPL LICENSE SUMMARY 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * BSD LICENSE 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 27 * - Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * - Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in 31 * the documentation and/or other materials provided with the 32 * distribution. 33 * - Neither the name of Intel Corporation nor the names of its 34 * contributors may be used to endorse or promote products derived 35 * from this software without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 48 * 49 */ 50 51 #include <rdma/rdma_vt.h> 52 #include <rdma/ib_pack.h> 53 #include <rdma/ib_verbs.h> 54 /* 55 * Atomic bit definitions for r_aflags. 56 */ 57 #define RVT_R_WRID_VALID 0 58 #define RVT_R_REWIND_SGE 1 59 60 /* 61 * Bit definitions for r_flags. 62 */ 63 #define RVT_R_REUSE_SGE 0x01 64 #define RVT_R_RDMAR_SEQ 0x02 65 #define RVT_R_RSP_NAK 0x04 66 #define RVT_R_RSP_SEND 0x08 67 #define RVT_R_COMM_EST 0x10 68 69 /* 70 * Bit definitions for s_flags. 71 * 72 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled 73 * RVT_S_BUSY - send tasklet is processing the QP 74 * RVT_S_TIMER - the RC retry timer is active 75 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics 76 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs 77 * before processing the next SWQE 78 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete 79 * before processing the next SWQE 80 * RVT_S_WAIT_RNR - waiting for RNR timeout 81 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE 82 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating 83 * next send completion entry not via send DMA 84 * RVT_S_WAIT_PIO - waiting for a send buffer to be available 85 * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets 86 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available 87 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available 88 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available 89 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue 90 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests 91 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK 92 * RVT_S_ECN - a BECN was queued to the send engine 93 */ 94 #define RVT_S_SIGNAL_REQ_WR 0x0001 95 #define RVT_S_BUSY 0x0002 96 #define RVT_S_TIMER 0x0004 97 #define RVT_S_RESP_PENDING 0x0008 98 #define RVT_S_ACK_PENDING 0x0010 99 #define RVT_S_WAIT_FENCE 0x0020 100 #define RVT_S_WAIT_RDMAR 0x0040 101 #define RVT_S_WAIT_RNR 0x0080 102 #define RVT_S_WAIT_SSN_CREDIT 0x0100 103 #define RVT_S_WAIT_DMA 0x0200 104 #define RVT_S_WAIT_PIO 0x0400 105 #define RVT_S_WAIT_PIO_DRAIN 0x0800 106 #define RVT_S_WAIT_TX 0x1000 107 #define RVT_S_WAIT_DMA_DESC 0x2000 108 #define RVT_S_WAIT_KMEM 0x4000 109 #define RVT_S_WAIT_PSN 0x8000 110 #define RVT_S_WAIT_ACK 0x10000 111 #define RVT_S_SEND_ONE 0x20000 112 #define RVT_S_UNLIMITED_CREDIT 0x40000 113 #define RVT_S_AHG_VALID 0x80000 114 #define RVT_S_AHG_CLEAR 0x100000 115 #define RVT_S_ECN 0x200000 116 117 /* 118 * Wait flags that would prevent any packet type from being sent. 119 */ 120 #define RVT_S_ANY_WAIT_IO (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \ 121 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM) 122 123 /* 124 * Wait flags that would prevent send work requests from making progress. 125 */ 126 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \ 127 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \ 128 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK) 129 130 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND) 131 132 /* Number of bits to pay attention to in the opcode for checking qp type */ 133 #define RVT_OPCODE_QP_MASK 0xE0 134 135 /* Flags for checking QP state (see ib_rvt_state_ops[]) */ 136 #define RVT_POST_SEND_OK 0x01 137 #define RVT_POST_RECV_OK 0x02 138 #define RVT_PROCESS_RECV_OK 0x04 139 #define RVT_PROCESS_SEND_OK 0x08 140 #define RVT_PROCESS_NEXT_SEND_OK 0x10 141 #define RVT_FLUSH_SEND 0x20 142 #define RVT_FLUSH_RECV 0x40 143 #define RVT_PROCESS_OR_FLUSH_SEND \ 144 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) 145 146 /* 147 * Send work request queue entry. 148 * The size of the sg_list is determined when the QP is created and stored 149 * in qp->s_max_sge. 150 */ 151 struct rvt_swqe { 152 union { 153 struct ib_send_wr wr; /* don't use wr.sg_list */ 154 struct ib_ud_wr ud_wr; 155 struct ib_reg_wr reg_wr; 156 struct ib_rdma_wr rdma_wr; 157 struct ib_atomic_wr atomic_wr; 158 }; 159 u32 psn; /* first packet sequence number */ 160 u32 lpsn; /* last packet sequence number */ 161 u32 ssn; /* send sequence number */ 162 u32 length; /* total length of data in sg_list */ 163 struct rvt_sge sg_list[0]; 164 }; 165 166 /* 167 * Receive work request queue entry. 168 * The size of the sg_list is determined when the QP (or SRQ) is created 169 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). 170 */ 171 struct rvt_rwqe { 172 u64 wr_id; 173 u8 num_sge; 174 struct ib_sge sg_list[0]; 175 }; 176 177 /* 178 * This structure is used to contain the head pointer, tail pointer, 179 * and receive work queue entries as a single memory allocation so 180 * it can be mmap'ed into user space. 181 * Note that the wq array elements are variable size so you can't 182 * just index into the array to get the N'th element; 183 * use get_rwqe_ptr() instead. 184 */ 185 struct rvt_rwq { 186 u32 head; /* new work requests posted to the head */ 187 u32 tail; /* receives pull requests from here. */ 188 struct rvt_rwqe wq[0]; 189 }; 190 191 struct rvt_rq { 192 struct rvt_rwq *wq; 193 u32 size; /* size of RWQE array */ 194 u8 max_sge; 195 /* protect changes in this struct */ 196 spinlock_t lock ____cacheline_aligned_in_smp; 197 }; 198 199 /* 200 * This structure is used by rvt_mmap() to validate an offset 201 * when an mmap() request is made. The vm_area_struct then uses 202 * this as its vm_private_data. 203 */ 204 struct rvt_mmap_info { 205 struct list_head pending_mmaps; 206 struct ib_ucontext *context; 207 void *obj; 208 __u64 offset; 209 struct kref ref; 210 unsigned size; 211 }; 212 213 #define RVT_MAX_RDMA_ATOMIC 16 214 215 /* 216 * This structure holds the information that the send tasklet needs 217 * to send a RDMA read response or atomic operation. 218 */ 219 struct rvt_ack_entry { 220 u8 opcode; 221 u8 sent; 222 u32 psn; 223 u32 lpsn; 224 union { 225 struct rvt_sge rdma_sge; 226 u64 atomic_data; 227 }; 228 }; 229 230 #define RC_QP_SCALING_INTERVAL 5 231 232 /* 233 * Variables prefixed with s_ are for the requester (sender). 234 * Variables prefixed with r_ are for the responder (receiver). 235 * Variables prefixed with ack_ are for responder replies. 236 * 237 * Common variables are protected by both r_rq.lock and s_lock in that order 238 * which only happens in modify_qp() or changing the QP 'state'. 239 */ 240 struct rvt_qp { 241 struct ib_qp ibqp; 242 void *priv; /* Driver private data */ 243 /* read mostly fields above and below */ 244 struct ib_ah_attr remote_ah_attr; 245 struct ib_ah_attr alt_ah_attr; 246 struct rvt_qp __rcu *next; /* link list for QPN hash table */ 247 struct rvt_swqe *s_wq; /* send work queue */ 248 struct rvt_mmap_info *ip; 249 250 unsigned long timeout_jiffies; /* computed from timeout */ 251 252 enum ib_mtu path_mtu; 253 int srate_mbps; /* s_srate (below) converted to Mbit/s */ 254 pid_t pid; /* pid for user mode QPs */ 255 u32 remote_qpn; 256 u32 qkey; /* QKEY for this QP (for UD or RD) */ 257 u32 s_size; /* send work queue size */ 258 u32 s_ahgpsn; /* set to the psn in the copy of the header */ 259 260 u16 pmtu; /* decoded from path_mtu */ 261 u8 log_pmtu; /* shift for pmtu */ 262 u8 state; /* QP state */ 263 u8 allowed_ops; /* high order bits of allowed opcodes */ 264 u8 qp_access_flags; 265 u8 alt_timeout; /* Alternate path timeout for this QP */ 266 u8 timeout; /* Timeout for this QP */ 267 u8 s_srate; 268 u8 s_mig_state; 269 u8 port_num; 270 u8 s_pkey_index; /* PKEY index to use */ 271 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ 272 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 273 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ 274 u8 s_retry_cnt; /* number of times to retry */ 275 u8 s_rnr_retry_cnt; 276 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 277 u8 s_max_sge; /* size of s_wq->sg_list */ 278 u8 s_draining; 279 280 /* start of read/write fields */ 281 atomic_t refcount ____cacheline_aligned_in_smp; 282 wait_queue_head_t wait; 283 284 struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1] 285 ____cacheline_aligned_in_smp; 286 struct rvt_sge_state s_rdma_read_sge; 287 288 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ 289 u32 r_psn; /* expected rcv packet sequence number */ 290 unsigned long r_aflags; 291 u64 r_wr_id; /* ID for current receive WQE */ 292 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ 293 u32 r_len; /* total length of r_sge */ 294 u32 r_rcv_len; /* receive data len processed */ 295 u32 r_msn; /* message sequence number */ 296 297 u8 r_state; /* opcode of last packet received */ 298 u8 r_flags; 299 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 300 301 struct list_head rspwait; /* link for waiting to respond */ 302 303 struct rvt_sge_state r_sge; /* current receive data */ 304 struct rvt_rq r_rq; /* receive work queue */ 305 306 /* post send line */ 307 spinlock_t s_hlock ____cacheline_aligned_in_smp; 308 u32 s_head; /* new entries added here */ 309 u32 s_next_psn; /* PSN for next request */ 310 u32 s_avail; /* number of entries avail */ 311 u32 s_ssn; /* SSN of tail entry */ 312 313 spinlock_t s_lock ____cacheline_aligned_in_smp; 314 u32 s_flags; 315 struct rvt_sge_state *s_cur_sge; 316 struct rvt_swqe *s_wqe; 317 struct rvt_sge_state s_sge; /* current send request data */ 318 struct rvt_mregion *s_rdma_mr; 319 u32 s_cur_size; /* size of send packet in bytes */ 320 u32 s_len; /* total length of s_sge */ 321 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ 322 u32 s_last_psn; /* last response PSN processed */ 323 u32 s_sending_psn; /* lowest PSN that is being sent */ 324 u32 s_sending_hpsn; /* highest PSN that is being sent */ 325 u32 s_psn; /* current packet sequence number */ 326 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ 327 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ 328 u32 s_tail; /* next entry to process */ 329 u32 s_cur; /* current work queue entry */ 330 u32 s_acked; /* last un-ACK'ed entry */ 331 u32 s_last; /* last completed entry */ 332 u32 s_lsn; /* limit sequence number (credit) */ 333 u16 s_hdrwords; /* size of s_hdr in 32 bit words */ 334 u16 s_rdma_ack_cnt; 335 s8 s_ahgidx; 336 u8 s_state; /* opcode of last packet sent */ 337 u8 s_ack_state; /* opcode of packet to ACK */ 338 u8 s_nak_state; /* non-zero if NAK is pending */ 339 u8 r_nak_state; /* non-zero if NAK is pending */ 340 u8 s_retry; /* requester retry counter */ 341 u8 s_rnr_retry; /* requester RNR retry counter */ 342 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 343 u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 344 345 struct rvt_sge_state s_ack_rdma_sge; 346 struct timer_list s_timer; 347 348 /* 349 * This sge list MUST be last. Do not add anything below here. 350 */ 351 struct rvt_sge r_sg_list[0] /* verified SGEs */ 352 ____cacheline_aligned_in_smp; 353 }; 354 355 struct rvt_srq { 356 struct ib_srq ibsrq; 357 struct rvt_rq rq; 358 struct rvt_mmap_info *ip; 359 /* send signal when number of RWQEs < limit */ 360 u32 limit; 361 }; 362 363 #define RVT_QPN_MAX BIT(24) 364 #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) 365 #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) 366 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1) 367 #define RVT_QPN_MASK 0xFFFFFF 368 369 /* 370 * QPN-map pages start out as NULL, they get allocated upon 371 * first use and are never deallocated. This way, 372 * large bitmaps are not allocated unless large numbers of QPs are used. 373 */ 374 struct rvt_qpn_map { 375 void *page; 376 }; 377 378 struct rvt_qpn_table { 379 spinlock_t lock; /* protect changes to the qp table */ 380 unsigned flags; /* flags for QP0/1 allocated for each port */ 381 u32 last; /* last QP number allocated */ 382 u32 nmaps; /* size of the map table */ 383 u16 limit; 384 u8 incr; 385 /* bit map of free QP numbers other than 0/1 */ 386 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES]; 387 }; 388 389 struct rvt_qp_ibdev { 390 u32 qp_table_size; 391 u32 qp_table_bits; 392 struct rvt_qp __rcu **qp_table; 393 spinlock_t qpt_lock; /* qptable lock */ 394 struct rvt_qpn_table qpn_table; 395 }; 396 397 /* 398 * There is one struct rvt_mcast for each multicast GID. 399 * All attached QPs are then stored as a list of 400 * struct rvt_mcast_qp. 401 */ 402 struct rvt_mcast_qp { 403 struct list_head list; 404 struct rvt_qp *qp; 405 }; 406 407 struct rvt_mcast { 408 struct rb_node rb_node; 409 union ib_gid mgid; 410 struct list_head qp_list; 411 wait_queue_head_t wait; 412 atomic_t refcount; 413 int n_attached; 414 }; 415 416 /* 417 * Since struct rvt_swqe is not a fixed size, we can't simply index into 418 * struct rvt_qp.s_wq. This function does the array index computation. 419 */ 420 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp, 421 unsigned n) 422 { 423 return (struct rvt_swqe *)((char *)qp->s_wq + 424 (sizeof(struct rvt_swqe) + 425 qp->s_max_sge * 426 sizeof(struct rvt_sge)) * n); 427 } 428 429 /* 430 * Since struct rvt_rwqe is not a fixed size, we can't simply index into 431 * struct rvt_rwq.wq. This function does the array index computation. 432 */ 433 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) 434 { 435 return (struct rvt_rwqe *) 436 ((char *)rq->wq->wq + 437 (sizeof(struct rvt_rwqe) + 438 rq->max_sge * sizeof(struct ib_sge)) * n); 439 } 440 441 extern const int ib_rvt_state_ops[]; 442 443 struct rvt_dev_info; 444 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); 445 446 #endif /* DEF_RDMAVT_INCQP_H */ 447