1 #ifndef DEF_RDMAVT_INCQP_H 2 #define DEF_RDMAVT_INCQP_H 3 4 /* 5 * Copyright(c) 2016 - 2019 Intel Corporation. 6 * 7 * This file is provided under a dual BSD/GPLv2 license. When using or 8 * redistributing this file, you may do so under either license. 9 * 10 * GPL LICENSE SUMMARY 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * BSD LICENSE 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 27 * - Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * - Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in 31 * the documentation and/or other materials provided with the 32 * distribution. 33 * - Neither the name of Intel Corporation nor the names of its 34 * contributors may be used to endorse or promote products derived 35 * from this software without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 48 * 49 */ 50 51 #include <rdma/rdma_vt.h> 52 #include <rdma/ib_pack.h> 53 #include <rdma/ib_verbs.h> 54 #include <rdma/rdmavt_cq.h> 55 #include <rdma/rvt-abi.h> 56 /* 57 * Atomic bit definitions for r_aflags. 58 */ 59 #define RVT_R_WRID_VALID 0 60 #define RVT_R_REWIND_SGE 1 61 62 /* 63 * Bit definitions for r_flags. 64 */ 65 #define RVT_R_REUSE_SGE 0x01 66 #define RVT_R_RDMAR_SEQ 0x02 67 #define RVT_R_RSP_NAK 0x04 68 #define RVT_R_RSP_SEND 0x08 69 #define RVT_R_COMM_EST 0x10 70 71 /* 72 * Bit definitions for s_flags. 73 * 74 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled 75 * RVT_S_BUSY - send tasklet is processing the QP 76 * RVT_S_TIMER - the RC retry timer is active 77 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics 78 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs 79 * before processing the next SWQE 80 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete 81 * before processing the next SWQE 82 * RVT_S_WAIT_RNR - waiting for RNR timeout 83 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE 84 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating 85 * next send completion entry not via send DMA 86 * RVT_S_WAIT_PIO - waiting for a send buffer to be available 87 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available 88 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available 89 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available 90 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue 91 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests 92 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK 93 * RVT_S_ECN - a BECN was queued to the send engine 94 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt 95 */ 96 #define RVT_S_SIGNAL_REQ_WR 0x0001 97 #define RVT_S_BUSY 0x0002 98 #define RVT_S_TIMER 0x0004 99 #define RVT_S_RESP_PENDING 0x0008 100 #define RVT_S_ACK_PENDING 0x0010 101 #define RVT_S_WAIT_FENCE 0x0020 102 #define RVT_S_WAIT_RDMAR 0x0040 103 #define RVT_S_WAIT_RNR 0x0080 104 #define RVT_S_WAIT_SSN_CREDIT 0x0100 105 #define RVT_S_WAIT_DMA 0x0200 106 #define RVT_S_WAIT_PIO 0x0400 107 #define RVT_S_WAIT_TX 0x0800 108 #define RVT_S_WAIT_DMA_DESC 0x1000 109 #define RVT_S_WAIT_KMEM 0x2000 110 #define RVT_S_WAIT_PSN 0x4000 111 #define RVT_S_WAIT_ACK 0x8000 112 #define RVT_S_SEND_ONE 0x10000 113 #define RVT_S_UNLIMITED_CREDIT 0x20000 114 #define RVT_S_ECN 0x40000 115 #define RVT_S_MAX_BIT_MASK 0x800000 116 117 /* 118 * Drivers should use s_flags starting with bit 31 down to the bit next to 119 * RVT_S_MAX_BIT_MASK 120 */ 121 122 /* 123 * Wait flags that would prevent any packet type from being sent. 124 */ 125 #define RVT_S_ANY_WAIT_IO \ 126 (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \ 127 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM) 128 129 /* 130 * Wait flags that would prevent send work requests from making progress. 131 */ 132 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \ 133 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \ 134 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK) 135 136 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND) 137 138 /* Number of bits to pay attention to in the opcode for checking qp type */ 139 #define RVT_OPCODE_QP_MASK 0xE0 140 141 /* Flags for checking QP state (see ib_rvt_state_ops[]) */ 142 #define RVT_POST_SEND_OK 0x01 143 #define RVT_POST_RECV_OK 0x02 144 #define RVT_PROCESS_RECV_OK 0x04 145 #define RVT_PROCESS_SEND_OK 0x08 146 #define RVT_PROCESS_NEXT_SEND_OK 0x10 147 #define RVT_FLUSH_SEND 0x20 148 #define RVT_FLUSH_RECV 0x40 149 #define RVT_PROCESS_OR_FLUSH_SEND \ 150 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) 151 #define RVT_SEND_OR_FLUSH_OR_RECV_OK \ 152 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK) 153 154 /* 155 * Internal send flags 156 */ 157 #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START 158 #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) 159 160 /** 161 * rvt_ud_wr - IB UD work plus AH cache 162 * @wr: valid IB work request 163 * @attr: pointer to an allocated AH attribute 164 * 165 * Special case the UD WR so we can keep track of the AH attributes. 166 * 167 * NOTE: This data structure is stricly ordered wr then attr. I.e the attr 168 * MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr. 169 * The copy assumes that wr is first. 170 */ 171 struct rvt_ud_wr { 172 struct ib_ud_wr wr; 173 struct rdma_ah_attr *attr; 174 }; 175 176 /* 177 * Send work request queue entry. 178 * The size of the sg_list is determined when the QP is created and stored 179 * in qp->s_max_sge. 180 */ 181 struct rvt_swqe { 182 union { 183 struct ib_send_wr wr; /* don't use wr.sg_list */ 184 struct rvt_ud_wr ud_wr; 185 struct ib_reg_wr reg_wr; 186 struct ib_rdma_wr rdma_wr; 187 struct ib_atomic_wr atomic_wr; 188 }; 189 u32 psn; /* first packet sequence number */ 190 u32 lpsn; /* last packet sequence number */ 191 u32 ssn; /* send sequence number */ 192 u32 length; /* total length of data in sg_list */ 193 void *priv; /* driver dependent field */ 194 struct rvt_sge sg_list[0]; 195 }; 196 197 /** 198 * struct rvt_krwq - kernel struct receive work request 199 * @p_lock: lock to protect producer of the kernel buffer 200 * @head: index of next entry to fill 201 * @c_lock:lock to protect consumer of the kernel buffer 202 * @tail: index of next entry to pull 203 * @count: count is aproximate of total receive enteries posted 204 * @rvt_rwqe: struct of receive work request queue entry 205 * 206 * This structure is used to contain the head pointer, 207 * tail pointer and receive work queue entries for kernel 208 * mode user. 209 */ 210 struct rvt_krwq { 211 spinlock_t p_lock; /* protect producer */ 212 u32 head; /* new work requests posted to the head */ 213 214 /* protect consumer */ 215 spinlock_t c_lock ____cacheline_aligned_in_smp; 216 u32 tail; /* receives pull requests from here. */ 217 u32 count; /* approx count of receive entries posted */ 218 struct rvt_rwqe *curr_wq; 219 struct rvt_rwqe wq[]; 220 }; 221 222 /* 223 * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah 224 * @swqe: valid Send WQE 225 * 226 */ 227 static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe) 228 { 229 return ibah_to_rvtah(swqe->ud_wr.wr.ah); 230 } 231 232 /** 233 * rvt_get_swqe_ah_attr - Return the cached ah attribute information 234 * @swqe: valid Send WQE 235 * 236 */ 237 static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe) 238 { 239 return swqe->ud_wr.attr; 240 } 241 242 /** 243 * rvt_get_swqe_remote_qpn - Access the remote QPN value 244 * @swqe: valid Send WQE 245 * 246 */ 247 static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe) 248 { 249 return swqe->ud_wr.wr.remote_qpn; 250 } 251 252 /** 253 * rvt_get_swqe_remote_qkey - Acces the remote qkey value 254 * @swqe: valid Send WQE 255 * 256 */ 257 static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe) 258 { 259 return swqe->ud_wr.wr.remote_qkey; 260 } 261 262 /** 263 * rvt_get_swqe_pkey_index - Access the pkey index 264 * @swqe: valid Send WQE 265 * 266 */ 267 static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe) 268 { 269 return swqe->ud_wr.wr.pkey_index; 270 } 271 272 struct rvt_rq { 273 struct rvt_rwq *wq; 274 struct rvt_krwq *kwq; 275 u32 size; /* size of RWQE array */ 276 u8 max_sge; 277 /* protect changes in this struct */ 278 spinlock_t lock ____cacheline_aligned_in_smp; 279 }; 280 281 /* 282 * This structure holds the information that the send tasklet needs 283 * to send a RDMA read response or atomic operation. 284 */ 285 struct rvt_ack_entry { 286 struct rvt_sge rdma_sge; 287 u64 atomic_data; 288 u32 psn; 289 u32 lpsn; 290 u8 opcode; 291 u8 sent; 292 void *priv; 293 }; 294 295 #define RC_QP_SCALING_INTERVAL 5 296 297 #define RVT_OPERATION_PRIV 0x00000001 298 #define RVT_OPERATION_ATOMIC 0x00000002 299 #define RVT_OPERATION_ATOMIC_SGE 0x00000004 300 #define RVT_OPERATION_LOCAL 0x00000008 301 #define RVT_OPERATION_USE_RESERVE 0x00000010 302 #define RVT_OPERATION_IGN_RNR_CNT 0x00000020 303 304 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1) 305 306 /** 307 * rvt_operation_params - op table entry 308 * @length - the length to copy into the swqe entry 309 * @qpt_support - a bit mask indicating QP type support 310 * @flags - RVT_OPERATION flags (see above) 311 * 312 * This supports table driven post send so that 313 * the driver can have differing an potentially 314 * different sets of operations. 315 * 316 **/ 317 318 struct rvt_operation_params { 319 size_t length; 320 u32 qpt_support; 321 u32 flags; 322 }; 323 324 /* 325 * Common variables are protected by both r_rq.lock and s_lock in that order 326 * which only happens in modify_qp() or changing the QP 'state'. 327 */ 328 struct rvt_qp { 329 struct ib_qp ibqp; 330 void *priv; /* Driver private data */ 331 /* read mostly fields above and below */ 332 struct rdma_ah_attr remote_ah_attr; 333 struct rdma_ah_attr alt_ah_attr; 334 struct rvt_qp __rcu *next; /* link list for QPN hash table */ 335 struct rvt_swqe *s_wq; /* send work queue */ 336 struct rvt_mmap_info *ip; 337 338 unsigned long timeout_jiffies; /* computed from timeout */ 339 340 int srate_mbps; /* s_srate (below) converted to Mbit/s */ 341 pid_t pid; /* pid for user mode QPs */ 342 u32 remote_qpn; 343 u32 qkey; /* QKEY for this QP (for UD or RD) */ 344 u32 s_size; /* send work queue size */ 345 346 u16 pmtu; /* decoded from path_mtu */ 347 u8 log_pmtu; /* shift for pmtu */ 348 u8 state; /* QP state */ 349 u8 allowed_ops; /* high order bits of allowed opcodes */ 350 u8 qp_access_flags; 351 u8 alt_timeout; /* Alternate path timeout for this QP */ 352 u8 timeout; /* Timeout for this QP */ 353 u8 s_srate; 354 u8 s_mig_state; 355 u8 port_num; 356 u8 s_pkey_index; /* PKEY index to use */ 357 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ 358 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 359 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ 360 u8 s_retry_cnt; /* number of times to retry */ 361 u8 s_rnr_retry_cnt; 362 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 363 u8 s_max_sge; /* size of s_wq->sg_list */ 364 u8 s_draining; 365 366 /* start of read/write fields */ 367 atomic_t refcount ____cacheline_aligned_in_smp; 368 wait_queue_head_t wait; 369 370 struct rvt_ack_entry *s_ack_queue; 371 struct rvt_sge_state s_rdma_read_sge; 372 373 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ 374 u32 r_psn; /* expected rcv packet sequence number */ 375 unsigned long r_aflags; 376 u64 r_wr_id; /* ID for current receive WQE */ 377 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ 378 u32 r_len; /* total length of r_sge */ 379 u32 r_rcv_len; /* receive data len processed */ 380 u32 r_msn; /* message sequence number */ 381 382 u8 r_state; /* opcode of last packet received */ 383 u8 r_flags; 384 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 385 u8 r_adefered; /* defered ack count */ 386 387 struct list_head rspwait; /* link for waiting to respond */ 388 389 struct rvt_sge_state r_sge; /* current receive data */ 390 struct rvt_rq r_rq; /* receive work queue */ 391 392 /* post send line */ 393 spinlock_t s_hlock ____cacheline_aligned_in_smp; 394 u32 s_head; /* new entries added here */ 395 u32 s_next_psn; /* PSN for next request */ 396 u32 s_avail; /* number of entries avail */ 397 u32 s_ssn; /* SSN of tail entry */ 398 atomic_t s_reserved_used; /* reserved entries in use */ 399 400 spinlock_t s_lock ____cacheline_aligned_in_smp; 401 u32 s_flags; 402 struct rvt_sge_state *s_cur_sge; 403 struct rvt_swqe *s_wqe; 404 struct rvt_sge_state s_sge; /* current send request data */ 405 struct rvt_mregion *s_rdma_mr; 406 u32 s_len; /* total length of s_sge */ 407 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ 408 u32 s_last_psn; /* last response PSN processed */ 409 u32 s_sending_psn; /* lowest PSN that is being sent */ 410 u32 s_sending_hpsn; /* highest PSN that is being sent */ 411 u32 s_psn; /* current packet sequence number */ 412 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ 413 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ 414 u32 s_tail; /* next entry to process */ 415 u32 s_cur; /* current work queue entry */ 416 u32 s_acked; /* last un-ACK'ed entry */ 417 u32 s_last; /* last completed entry */ 418 u32 s_lsn; /* limit sequence number (credit) */ 419 u32 s_ahgpsn; /* set to the psn in the copy of the header */ 420 u16 s_cur_size; /* size of send packet in bytes */ 421 u16 s_rdma_ack_cnt; 422 u8 s_hdrwords; /* size of s_hdr in 32 bit words */ 423 s8 s_ahgidx; 424 u8 s_state; /* opcode of last packet sent */ 425 u8 s_ack_state; /* opcode of packet to ACK */ 426 u8 s_nak_state; /* non-zero if NAK is pending */ 427 u8 r_nak_state; /* non-zero if NAK is pending */ 428 u8 s_retry; /* requester retry counter */ 429 u8 s_rnr_retry; /* requester RNR retry counter */ 430 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 431 u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 432 u8 s_acked_ack_queue; /* index into s_ack_queue[] */ 433 434 struct rvt_sge_state s_ack_rdma_sge; 435 struct timer_list s_timer; 436 struct hrtimer s_rnr_timer; 437 438 atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ 439 440 /* 441 * This sge list MUST be last. Do not add anything below here. 442 */ 443 struct rvt_sge r_sg_list[0] /* verified SGEs */ 444 ____cacheline_aligned_in_smp; 445 }; 446 447 struct rvt_srq { 448 struct ib_srq ibsrq; 449 struct rvt_rq rq; 450 struct rvt_mmap_info *ip; 451 /* send signal when number of RWQEs < limit */ 452 u32 limit; 453 }; 454 455 static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq) 456 { 457 return container_of(ibsrq, struct rvt_srq, ibsrq); 458 } 459 460 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp) 461 { 462 return container_of(ibqp, struct rvt_qp, ibqp); 463 } 464 465 #define RVT_QPN_MAX BIT(24) 466 #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) 467 #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) 468 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1) 469 #define RVT_QPN_MASK IB_QPN_MASK 470 471 /* 472 * QPN-map pages start out as NULL, they get allocated upon 473 * first use and are never deallocated. This way, 474 * large bitmaps are not allocated unless large numbers of QPs are used. 475 */ 476 struct rvt_qpn_map { 477 void *page; 478 }; 479 480 struct rvt_qpn_table { 481 spinlock_t lock; /* protect changes to the qp table */ 482 unsigned flags; /* flags for QP0/1 allocated for each port */ 483 u32 last; /* last QP number allocated */ 484 u32 nmaps; /* size of the map table */ 485 u16 limit; 486 u8 incr; 487 /* bit map of free QP numbers other than 0/1 */ 488 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES]; 489 }; 490 491 struct rvt_qp_ibdev { 492 u32 qp_table_size; 493 u32 qp_table_bits; 494 struct rvt_qp __rcu **qp_table; 495 spinlock_t qpt_lock; /* qptable lock */ 496 struct rvt_qpn_table qpn_table; 497 }; 498 499 /* 500 * There is one struct rvt_mcast for each multicast GID. 501 * All attached QPs are then stored as a list of 502 * struct rvt_mcast_qp. 503 */ 504 struct rvt_mcast_qp { 505 struct list_head list; 506 struct rvt_qp *qp; 507 }; 508 509 struct rvt_mcast_addr { 510 union ib_gid mgid; 511 u16 lid; 512 }; 513 514 struct rvt_mcast { 515 struct rb_node rb_node; 516 struct rvt_mcast_addr mcast_addr; 517 struct list_head qp_list; 518 wait_queue_head_t wait; 519 atomic_t refcount; 520 int n_attached; 521 }; 522 523 /* 524 * Since struct rvt_swqe is not a fixed size, we can't simply index into 525 * struct rvt_qp.s_wq. This function does the array index computation. 526 */ 527 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp, 528 unsigned n) 529 { 530 return (struct rvt_swqe *)((char *)qp->s_wq + 531 (sizeof(struct rvt_swqe) + 532 qp->s_max_sge * 533 sizeof(struct rvt_sge)) * n); 534 } 535 536 /* 537 * Since struct rvt_rwqe is not a fixed size, we can't simply index into 538 * struct rvt_rwq.wq. This function does the array index computation. 539 */ 540 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) 541 { 542 return (struct rvt_rwqe *) 543 ((char *)rq->kwq->curr_wq + 544 (sizeof(struct rvt_rwqe) + 545 rq->max_sge * sizeof(struct ib_sge)) * n); 546 } 547 548 /** 549 * rvt_is_user_qp - return if this is user mode QP 550 * @qp - the target QP 551 */ 552 static inline bool rvt_is_user_qp(struct rvt_qp *qp) 553 { 554 return !!qp->pid; 555 } 556 557 /** 558 * rvt_get_qp - get a QP reference 559 * @qp - the QP to hold 560 */ 561 static inline void rvt_get_qp(struct rvt_qp *qp) 562 { 563 atomic_inc(&qp->refcount); 564 } 565 566 /** 567 * rvt_put_qp - release a QP reference 568 * @qp - the QP to release 569 */ 570 static inline void rvt_put_qp(struct rvt_qp *qp) 571 { 572 if (qp && atomic_dec_and_test(&qp->refcount)) 573 wake_up(&qp->wait); 574 } 575 576 /** 577 * rvt_put_swqe - drop mr refs held by swqe 578 * @wqe - the send wqe 579 * 580 * This drops any mr references held by the swqe 581 */ 582 static inline void rvt_put_swqe(struct rvt_swqe *wqe) 583 { 584 int i; 585 586 for (i = 0; i < wqe->wr.num_sge; i++) { 587 struct rvt_sge *sge = &wqe->sg_list[i]; 588 589 rvt_put_mr(sge->mr); 590 } 591 } 592 593 /** 594 * rvt_qp_wqe_reserve - reserve operation 595 * @qp - the rvt qp 596 * @wqe - the send wqe 597 * 598 * This routine used in post send to record 599 * a wqe relative reserved operation use. 600 */ 601 static inline void rvt_qp_wqe_reserve( 602 struct rvt_qp *qp, 603 struct rvt_swqe *wqe) 604 { 605 atomic_inc(&qp->s_reserved_used); 606 } 607 608 /** 609 * rvt_qp_wqe_unreserve - clean reserved operation 610 * @qp - the rvt qp 611 * @wqe - the send wqe 612 * 613 * This decrements the reserve use count. 614 * 615 * This call MUST precede the change to 616 * s_last to insure that post send sees a stable 617 * s_avail. 618 * 619 * An smp_mp__after_atomic() is used to insure 620 * the compiler does not juggle the order of the s_last 621 * ring index and the decrementing of s_reserved_used. 622 */ 623 static inline void rvt_qp_wqe_unreserve( 624 struct rvt_qp *qp, 625 struct rvt_swqe *wqe) 626 { 627 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { 628 atomic_dec(&qp->s_reserved_used); 629 /* insure no compiler re-order up to s_last change */ 630 smp_mb__after_atomic(); 631 } 632 } 633 634 extern const enum ib_wc_opcode ib_rvt_wc_opcode[]; 635 636 /* 637 * Compare the lower 24 bits of the msn values. 638 * Returns an integer <, ==, or > than zero. 639 */ 640 static inline int rvt_cmp_msn(u32 a, u32 b) 641 { 642 return (((int)a) - ((int)b)) << 8; 643 } 644 645 /** 646 * rvt_compute_aeth - compute the AETH (syndrome + MSN) 647 * @qp: the queue pair to compute the AETH for 648 * 649 * Returns the AETH. 650 */ 651 __be32 rvt_compute_aeth(struct rvt_qp *qp); 652 653 /** 654 * rvt_get_credit - flush the send work queue of a QP 655 * @qp: the qp who's send work queue to flush 656 * @aeth: the Acknowledge Extended Transport Header 657 * 658 * The QP s_lock should be held. 659 */ 660 void rvt_get_credit(struct rvt_qp *qp, u32 aeth); 661 662 /** 663 * rvt_restart_sge - rewind the sge state for a wqe 664 * @ss: the sge state pointer 665 * @wqe: the wqe to rewind 666 * @len: the data length from the start of the wqe in bytes 667 * 668 * Returns the remaining data length. 669 */ 670 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len); 671 672 /** 673 * @qp - the qp pair 674 * @len - the length 675 * 676 * Perform a shift based mtu round up divide 677 */ 678 static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len) 679 { 680 return (len + qp->pmtu - 1) >> qp->log_pmtu; 681 } 682 683 /** 684 * @qp - the qp pair 685 * @len - the length 686 * 687 * Perform a shift based mtu divide 688 */ 689 static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) 690 { 691 return len >> qp->log_pmtu; 692 } 693 694 /** 695 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies 696 * @timeout - timeout input(0 - 31). 697 * 698 * Return a timeout value in jiffies. 699 */ 700 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout) 701 { 702 if (timeout > 31) 703 timeout = 31; 704 705 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL; 706 } 707 708 /** 709 * rvt_lookup_qpn - return the QP with the given QPN 710 * @ibp: the ibport 711 * @qpn: the QP number to look up 712 * 713 * The caller must hold the rcu_read_lock(), and keep the lock until 714 * the returned qp is no longer in use. 715 */ 716 static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, 717 struct rvt_ibport *rvp, 718 u32 qpn) __must_hold(RCU) 719 { 720 struct rvt_qp *qp = NULL; 721 722 if (unlikely(qpn <= 1)) { 723 qp = rcu_dereference(rvp->qp[qpn]); 724 } else { 725 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits); 726 727 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; 728 qp = rcu_dereference(qp->next)) 729 if (qp->ibqp.qp_num == qpn) 730 break; 731 } 732 return qp; 733 } 734 735 /** 736 * rvt_mod_retry_timer - mod a retry timer 737 * @qp - the QP 738 * @shift - timeout shift to wait for multiple packets 739 * Modify a potentially already running retry timer 740 */ 741 static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift) 742 { 743 struct ib_qp *ibqp = &qp->ibqp; 744 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 745 746 lockdep_assert_held(&qp->s_lock); 747 qp->s_flags |= RVT_S_TIMER; 748 /* 4.096 usec. * (1 << qp->timeout) */ 749 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies + 750 (qp->timeout_jiffies << shift)); 751 } 752 753 static inline void rvt_mod_retry_timer(struct rvt_qp *qp) 754 { 755 return rvt_mod_retry_timer_ext(qp, 0); 756 } 757 758 /** 759 * rvt_put_qp_swqe - drop refs held by swqe 760 * @qp: the send qp 761 * @wqe: the send wqe 762 * 763 * This drops any references held by the swqe 764 */ 765 static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) 766 { 767 rvt_put_swqe(wqe); 768 if (qp->allowed_ops == IB_OPCODE_UD) 769 rdma_destroy_ah_attr(wqe->ud_wr.attr); 770 } 771 772 /** 773 * rvt_qp_sqwe_incr - increment ring index 774 * @qp: the qp 775 * @val: the starting value 776 * 777 * Return: the new value wrapping as appropriate 778 */ 779 static inline u32 780 rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val) 781 { 782 if (++val >= qp->s_size) 783 val = 0; 784 return val; 785 } 786 787 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); 788 789 /** 790 * rvt_recv_cq - add a new entry to completion queue 791 * by receive queue 792 * @qp: receive queue 793 * @wc: work completion entry to add 794 * @solicited: true if @entry is solicited 795 * 796 * This is wrapper function for rvt_enter_cq function call by 797 * receive queue. If rvt_cq_enter return false, it means cq is 798 * full and the qp is put into error state. 799 */ 800 static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc, 801 bool solicited) 802 { 803 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq); 804 805 if (unlikely(!rvt_cq_enter(cq, wc, solicited))) 806 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR); 807 } 808 809 /** 810 * rvt_send_cq - add a new entry to completion queue 811 * by send queue 812 * @qp: send queue 813 * @wc: work completion entry to add 814 * @solicited: true if @entry is solicited 815 * 816 * This is wrapper function for rvt_enter_cq function call by 817 * send queue. If rvt_cq_enter return false, it means cq is 818 * full and the qp is put into error state. 819 */ 820 static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc, 821 bool solicited) 822 { 823 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); 824 825 if (unlikely(!rvt_cq_enter(cq, wc, solicited))) 826 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR); 827 } 828 829 /** 830 * rvt_qp_complete_swqe - insert send completion 831 * @qp - the qp 832 * @wqe - the send wqe 833 * @opcode - wc operation (driver dependent) 834 * @status - completion status 835 * 836 * Update the s_last information, and then insert a send 837 * completion into the completion 838 * queue if the qp indicates it should be done. 839 * 840 * See IBTA 10.7.3.1 for info on completion 841 * control. 842 * 843 * Return: new last 844 */ 845 static inline u32 846 rvt_qp_complete_swqe(struct rvt_qp *qp, 847 struct rvt_swqe *wqe, 848 enum ib_wc_opcode opcode, 849 enum ib_wc_status status) 850 { 851 bool need_completion; 852 u64 wr_id; 853 u32 byte_len, last; 854 int flags = wqe->wr.send_flags; 855 856 rvt_put_qp_swqe(qp, wqe); 857 858 need_completion = 859 !(flags & RVT_SEND_RESERVE_USED) && 860 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || 861 (flags & IB_SEND_SIGNALED) || 862 status != IB_WC_SUCCESS); 863 if (need_completion) { 864 wr_id = wqe->wr.wr_id; 865 byte_len = wqe->length; 866 /* above fields required before writing s_last */ 867 } 868 last = rvt_qp_swqe_incr(qp, qp->s_last); 869 /* see rvt_qp_is_avail() */ 870 smp_store_release(&qp->s_last, last); 871 if (need_completion) { 872 struct ib_wc w = { 873 .wr_id = wr_id, 874 .status = status, 875 .opcode = opcode, 876 .qp = &qp->ibqp, 877 .byte_len = byte_len, 878 }; 879 rvt_send_cq(qp, &w, status != IB_WC_SUCCESS); 880 } 881 return last; 882 } 883 884 extern const int ib_rvt_state_ops[]; 885 886 struct rvt_dev_info; 887 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only); 888 void rvt_comm_est(struct rvt_qp *qp); 889 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err); 890 unsigned long rvt_rnr_tbl_to_usec(u32 index); 891 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); 892 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth); 893 void rvt_del_timers_sync(struct rvt_qp *qp); 894 void rvt_stop_rc_timers(struct rvt_qp *qp); 895 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift); 896 static inline void rvt_add_retry_timer(struct rvt_qp *qp) 897 { 898 rvt_add_retry_timer_ext(qp, 0); 899 } 900 901 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, 902 void *data, u32 length, 903 bool release, bool copy_last); 904 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 905 enum ib_wc_status status); 906 void rvt_ruc_loopback(struct rvt_qp *qp); 907 908 /** 909 * struct rvt_qp_iter - the iterator for QPs 910 * @qp - the current QP 911 * 912 * This structure defines the current iterator 913 * state for sequenced access to all QPs relative 914 * to an rvt_dev_info. 915 */ 916 struct rvt_qp_iter { 917 struct rvt_qp *qp; 918 /* private: backpointer */ 919 struct rvt_dev_info *rdi; 920 /* private: callback routine */ 921 void (*cb)(struct rvt_qp *qp, u64 v); 922 /* private: for arg to callback routine */ 923 u64 v; 924 /* private: number of SMI,GSI QPs for device */ 925 int specials; 926 /* private: current iterator index */ 927 int n; 928 }; 929 930 /** 931 * ib_cq_tail - Return tail index of cq buffer 932 * @send_cq - The cq for send 933 * 934 * This is called in qp_iter_print to get tail 935 * of cq buffer. 936 */ 937 static inline u32 ib_cq_tail(struct ib_cq *send_cq) 938 { 939 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); 940 941 return ibcq_to_rvtcq(send_cq)->ip ? 942 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) : 943 ibcq_to_rvtcq(send_cq)->kqueue->tail; 944 } 945 946 /** 947 * ib_cq_head - Return head index of cq buffer 948 * @send_cq - The cq for send 949 * 950 * This is called in qp_iter_print to get head 951 * of cq buffer. 952 */ 953 static inline u32 ib_cq_head(struct ib_cq *send_cq) 954 { 955 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); 956 957 return ibcq_to_rvtcq(send_cq)->ip ? 958 RDMA_READ_UAPI_ATOMIC(cq->queue->head) : 959 ibcq_to_rvtcq(send_cq)->kqueue->head; 960 } 961 962 /** 963 * rvt_free_rq - free memory allocated for rvt_rq struct 964 * @rvt_rq: request queue data structure 965 * 966 * This function should only be called if the rvt_mmap_info() 967 * has not succeeded. 968 */ 969 static inline void rvt_free_rq(struct rvt_rq *rq) 970 { 971 kvfree(rq->kwq); 972 rq->kwq = NULL; 973 vfree(rq->wq); 974 rq->wq = NULL; 975 } 976 977 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, 978 u64 v, 979 void (*cb)(struct rvt_qp *qp, u64 v)); 980 int rvt_qp_iter_next(struct rvt_qp_iter *iter); 981 void rvt_qp_iter(struct rvt_dev_info *rdi, 982 u64 v, 983 void (*cb)(struct rvt_qp *qp, u64 v)); 984 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey); 985 #endif /* DEF_RDMAVT_INCQP_H */ 986