1 #ifndef DEF_RDMAVT_INCQP_H 2 #define DEF_RDMAVT_INCQP_H 3 4 /* 5 * Copyright(c) 2016 - 2019 Intel Corporation. 6 * 7 * This file is provided under a dual BSD/GPLv2 license. When using or 8 * redistributing this file, you may do so under either license. 9 * 10 * GPL LICENSE SUMMARY 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * BSD LICENSE 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 27 * - Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * - Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in 31 * the documentation and/or other materials provided with the 32 * distribution. 33 * - Neither the name of Intel Corporation nor the names of its 34 * contributors may be used to endorse or promote products derived 35 * from this software without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 48 * 49 */ 50 51 #include <rdma/rdma_vt.h> 52 #include <rdma/ib_pack.h> 53 #include <rdma/ib_verbs.h> 54 #include <rdma/rdmavt_cq.h> 55 #include <rdma/rvt-abi.h> 56 /* 57 * Atomic bit definitions for r_aflags. 58 */ 59 #define RVT_R_WRID_VALID 0 60 #define RVT_R_REWIND_SGE 1 61 62 /* 63 * Bit definitions for r_flags. 64 */ 65 #define RVT_R_REUSE_SGE 0x01 66 #define RVT_R_RDMAR_SEQ 0x02 67 #define RVT_R_RSP_NAK 0x04 68 #define RVT_R_RSP_SEND 0x08 69 #define RVT_R_COMM_EST 0x10 70 71 /* 72 * Bit definitions for s_flags. 73 * 74 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled 75 * RVT_S_BUSY - send tasklet is processing the QP 76 * RVT_S_TIMER - the RC retry timer is active 77 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics 78 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs 79 * before processing the next SWQE 80 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete 81 * before processing the next SWQE 82 * RVT_S_WAIT_RNR - waiting for RNR timeout 83 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE 84 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating 85 * next send completion entry not via send DMA 86 * RVT_S_WAIT_PIO - waiting for a send buffer to be available 87 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available 88 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available 89 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available 90 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue 91 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests 92 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK 93 * RVT_S_ECN - a BECN was queued to the send engine 94 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt 95 */ 96 #define RVT_S_SIGNAL_REQ_WR 0x0001 97 #define RVT_S_BUSY 0x0002 98 #define RVT_S_TIMER 0x0004 99 #define RVT_S_RESP_PENDING 0x0008 100 #define RVT_S_ACK_PENDING 0x0010 101 #define RVT_S_WAIT_FENCE 0x0020 102 #define RVT_S_WAIT_RDMAR 0x0040 103 #define RVT_S_WAIT_RNR 0x0080 104 #define RVT_S_WAIT_SSN_CREDIT 0x0100 105 #define RVT_S_WAIT_DMA 0x0200 106 #define RVT_S_WAIT_PIO 0x0400 107 #define RVT_S_WAIT_TX 0x0800 108 #define RVT_S_WAIT_DMA_DESC 0x1000 109 #define RVT_S_WAIT_KMEM 0x2000 110 #define RVT_S_WAIT_PSN 0x4000 111 #define RVT_S_WAIT_ACK 0x8000 112 #define RVT_S_SEND_ONE 0x10000 113 #define RVT_S_UNLIMITED_CREDIT 0x20000 114 #define RVT_S_ECN 0x40000 115 #define RVT_S_MAX_BIT_MASK 0x800000 116 117 /* 118 * Drivers should use s_flags starting with bit 31 down to the bit next to 119 * RVT_S_MAX_BIT_MASK 120 */ 121 122 /* 123 * Wait flags that would prevent any packet type from being sent. 124 */ 125 #define RVT_S_ANY_WAIT_IO \ 126 (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \ 127 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM) 128 129 /* 130 * Wait flags that would prevent send work requests from making progress. 131 */ 132 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \ 133 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \ 134 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK) 135 136 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND) 137 138 /* Number of bits to pay attention to in the opcode for checking qp type */ 139 #define RVT_OPCODE_QP_MASK 0xE0 140 141 /* Flags for checking QP state (see ib_rvt_state_ops[]) */ 142 #define RVT_POST_SEND_OK 0x01 143 #define RVT_POST_RECV_OK 0x02 144 #define RVT_PROCESS_RECV_OK 0x04 145 #define RVT_PROCESS_SEND_OK 0x08 146 #define RVT_PROCESS_NEXT_SEND_OK 0x10 147 #define RVT_FLUSH_SEND 0x20 148 #define RVT_FLUSH_RECV 0x40 149 #define RVT_PROCESS_OR_FLUSH_SEND \ 150 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) 151 #define RVT_SEND_OR_FLUSH_OR_RECV_OK \ 152 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK) 153 154 /* 155 * Internal send flags 156 */ 157 #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START 158 #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) 159 160 /** 161 * rvt_ud_wr - IB UD work plus AH cache 162 * @wr: valid IB work request 163 * @attr: pointer to an allocated AH attribute 164 * 165 * Special case the UD WR so we can keep track of the AH attributes. 166 * 167 * NOTE: This data structure is stricly ordered wr then attr. I.e the attr 168 * MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr. 169 * The copy assumes that wr is first. 170 */ 171 struct rvt_ud_wr { 172 struct ib_ud_wr wr; 173 struct rdma_ah_attr *attr; 174 }; 175 176 /* 177 * Send work request queue entry. 178 * The size of the sg_list is determined when the QP is created and stored 179 * in qp->s_max_sge. 180 */ 181 struct rvt_swqe { 182 union { 183 struct ib_send_wr wr; /* don't use wr.sg_list */ 184 struct rvt_ud_wr ud_wr; 185 struct ib_reg_wr reg_wr; 186 struct ib_rdma_wr rdma_wr; 187 struct ib_atomic_wr atomic_wr; 188 }; 189 u32 psn; /* first packet sequence number */ 190 u32 lpsn; /* last packet sequence number */ 191 u32 ssn; /* send sequence number */ 192 u32 length; /* total length of data in sg_list */ 193 void *priv; /* driver dependent field */ 194 struct rvt_sge sg_list[0]; 195 }; 196 197 /** 198 * struct rvt_krwq - kernel struct receive work request 199 * @p_lock: lock to protect producer of the kernel buffer 200 * @head: index of next entry to fill 201 * @c_lock:lock to protect consumer of the kernel buffer 202 * @tail: index of next entry to pull 203 * @count: count is aproximate of total receive enteries posted 204 * @rvt_rwqe: struct of receive work request queue entry 205 * 206 * This structure is used to contain the head pointer, 207 * tail pointer and receive work queue entries for kernel 208 * mode user. 209 */ 210 struct rvt_krwq { 211 spinlock_t p_lock; /* protect producer */ 212 u32 head; /* new work requests posted to the head */ 213 214 /* protect consumer */ 215 spinlock_t c_lock ____cacheline_aligned_in_smp; 216 u32 tail; /* receives pull requests from here. */ 217 u32 count; /* approx count of receive entries posted */ 218 struct rvt_rwqe *curr_wq; 219 struct rvt_rwqe wq[]; 220 }; 221 222 struct rvt_rq { 223 struct rvt_rwq *wq; 224 struct rvt_krwq *kwq; 225 u32 size; /* size of RWQE array */ 226 u8 max_sge; 227 /* protect changes in this struct */ 228 spinlock_t lock ____cacheline_aligned_in_smp; 229 }; 230 231 /* 232 * This structure holds the information that the send tasklet needs 233 * to send a RDMA read response or atomic operation. 234 */ 235 struct rvt_ack_entry { 236 struct rvt_sge rdma_sge; 237 u64 atomic_data; 238 u32 psn; 239 u32 lpsn; 240 u8 opcode; 241 u8 sent; 242 void *priv; 243 }; 244 245 #define RC_QP_SCALING_INTERVAL 5 246 247 #define RVT_OPERATION_PRIV 0x00000001 248 #define RVT_OPERATION_ATOMIC 0x00000002 249 #define RVT_OPERATION_ATOMIC_SGE 0x00000004 250 #define RVT_OPERATION_LOCAL 0x00000008 251 #define RVT_OPERATION_USE_RESERVE 0x00000010 252 #define RVT_OPERATION_IGN_RNR_CNT 0x00000020 253 254 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1) 255 256 /** 257 * rvt_operation_params - op table entry 258 * @length - the length to copy into the swqe entry 259 * @qpt_support - a bit mask indicating QP type support 260 * @flags - RVT_OPERATION flags (see above) 261 * 262 * This supports table driven post send so that 263 * the driver can have differing an potentially 264 * different sets of operations. 265 * 266 **/ 267 268 struct rvt_operation_params { 269 size_t length; 270 u32 qpt_support; 271 u32 flags; 272 }; 273 274 /* 275 * Common variables are protected by both r_rq.lock and s_lock in that order 276 * which only happens in modify_qp() or changing the QP 'state'. 277 */ 278 struct rvt_qp { 279 struct ib_qp ibqp; 280 void *priv; /* Driver private data */ 281 /* read mostly fields above and below */ 282 struct rdma_ah_attr remote_ah_attr; 283 struct rdma_ah_attr alt_ah_attr; 284 struct rvt_qp __rcu *next; /* link list for QPN hash table */ 285 struct rvt_swqe *s_wq; /* send work queue */ 286 struct rvt_mmap_info *ip; 287 288 unsigned long timeout_jiffies; /* computed from timeout */ 289 290 int srate_mbps; /* s_srate (below) converted to Mbit/s */ 291 pid_t pid; /* pid for user mode QPs */ 292 u32 remote_qpn; 293 u32 qkey; /* QKEY for this QP (for UD or RD) */ 294 u32 s_size; /* send work queue size */ 295 296 u16 pmtu; /* decoded from path_mtu */ 297 u8 log_pmtu; /* shift for pmtu */ 298 u8 state; /* QP state */ 299 u8 allowed_ops; /* high order bits of allowed opcodes */ 300 u8 qp_access_flags; 301 u8 alt_timeout; /* Alternate path timeout for this QP */ 302 u8 timeout; /* Timeout for this QP */ 303 u8 s_srate; 304 u8 s_mig_state; 305 u8 port_num; 306 u8 s_pkey_index; /* PKEY index to use */ 307 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ 308 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 309 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ 310 u8 s_retry_cnt; /* number of times to retry */ 311 u8 s_rnr_retry_cnt; 312 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 313 u8 s_max_sge; /* size of s_wq->sg_list */ 314 u8 s_draining; 315 316 /* start of read/write fields */ 317 atomic_t refcount ____cacheline_aligned_in_smp; 318 wait_queue_head_t wait; 319 320 struct rvt_ack_entry *s_ack_queue; 321 struct rvt_sge_state s_rdma_read_sge; 322 323 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ 324 u32 r_psn; /* expected rcv packet sequence number */ 325 unsigned long r_aflags; 326 u64 r_wr_id; /* ID for current receive WQE */ 327 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ 328 u32 r_len; /* total length of r_sge */ 329 u32 r_rcv_len; /* receive data len processed */ 330 u32 r_msn; /* message sequence number */ 331 332 u8 r_state; /* opcode of last packet received */ 333 u8 r_flags; 334 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 335 u8 r_adefered; /* defered ack count */ 336 337 struct list_head rspwait; /* link for waiting to respond */ 338 339 struct rvt_sge_state r_sge; /* current receive data */ 340 struct rvt_rq r_rq; /* receive work queue */ 341 342 /* post send line */ 343 spinlock_t s_hlock ____cacheline_aligned_in_smp; 344 u32 s_head; /* new entries added here */ 345 u32 s_next_psn; /* PSN for next request */ 346 u32 s_avail; /* number of entries avail */ 347 u32 s_ssn; /* SSN of tail entry */ 348 atomic_t s_reserved_used; /* reserved entries in use */ 349 350 spinlock_t s_lock ____cacheline_aligned_in_smp; 351 u32 s_flags; 352 struct rvt_sge_state *s_cur_sge; 353 struct rvt_swqe *s_wqe; 354 struct rvt_sge_state s_sge; /* current send request data */ 355 struct rvt_mregion *s_rdma_mr; 356 u32 s_len; /* total length of s_sge */ 357 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ 358 u32 s_last_psn; /* last response PSN processed */ 359 u32 s_sending_psn; /* lowest PSN that is being sent */ 360 u32 s_sending_hpsn; /* highest PSN that is being sent */ 361 u32 s_psn; /* current packet sequence number */ 362 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ 363 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ 364 u32 s_tail; /* next entry to process */ 365 u32 s_cur; /* current work queue entry */ 366 u32 s_acked; /* last un-ACK'ed entry */ 367 u32 s_last; /* last completed entry */ 368 u32 s_lsn; /* limit sequence number (credit) */ 369 u32 s_ahgpsn; /* set to the psn in the copy of the header */ 370 u16 s_cur_size; /* size of send packet in bytes */ 371 u16 s_rdma_ack_cnt; 372 u8 s_hdrwords; /* size of s_hdr in 32 bit words */ 373 s8 s_ahgidx; 374 u8 s_state; /* opcode of last packet sent */ 375 u8 s_ack_state; /* opcode of packet to ACK */ 376 u8 s_nak_state; /* non-zero if NAK is pending */ 377 u8 r_nak_state; /* non-zero if NAK is pending */ 378 u8 s_retry; /* requester retry counter */ 379 u8 s_rnr_retry; /* requester RNR retry counter */ 380 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 381 u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 382 u8 s_acked_ack_queue; /* index into s_ack_queue[] */ 383 384 struct rvt_sge_state s_ack_rdma_sge; 385 struct timer_list s_timer; 386 struct hrtimer s_rnr_timer; 387 388 atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ 389 390 /* 391 * This sge list MUST be last. Do not add anything below here. 392 */ 393 struct rvt_sge r_sg_list[0] /* verified SGEs */ 394 ____cacheline_aligned_in_smp; 395 }; 396 397 struct rvt_srq { 398 struct ib_srq ibsrq; 399 struct rvt_rq rq; 400 struct rvt_mmap_info *ip; 401 /* send signal when number of RWQEs < limit */ 402 u32 limit; 403 }; 404 405 static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq) 406 { 407 return container_of(ibsrq, struct rvt_srq, ibsrq); 408 } 409 410 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp) 411 { 412 return container_of(ibqp, struct rvt_qp, ibqp); 413 } 414 415 #define RVT_QPN_MAX BIT(24) 416 #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) 417 #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) 418 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1) 419 #define RVT_QPN_MASK IB_QPN_MASK 420 421 /* 422 * QPN-map pages start out as NULL, they get allocated upon 423 * first use and are never deallocated. This way, 424 * large bitmaps are not allocated unless large numbers of QPs are used. 425 */ 426 struct rvt_qpn_map { 427 void *page; 428 }; 429 430 struct rvt_qpn_table { 431 spinlock_t lock; /* protect changes to the qp table */ 432 unsigned flags; /* flags for QP0/1 allocated for each port */ 433 u32 last; /* last QP number allocated */ 434 u32 nmaps; /* size of the map table */ 435 u16 limit; 436 u8 incr; 437 /* bit map of free QP numbers other than 0/1 */ 438 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES]; 439 }; 440 441 struct rvt_qp_ibdev { 442 u32 qp_table_size; 443 u32 qp_table_bits; 444 struct rvt_qp __rcu **qp_table; 445 spinlock_t qpt_lock; /* qptable lock */ 446 struct rvt_qpn_table qpn_table; 447 }; 448 449 /* 450 * There is one struct rvt_mcast for each multicast GID. 451 * All attached QPs are then stored as a list of 452 * struct rvt_mcast_qp. 453 */ 454 struct rvt_mcast_qp { 455 struct list_head list; 456 struct rvt_qp *qp; 457 }; 458 459 struct rvt_mcast_addr { 460 union ib_gid mgid; 461 u16 lid; 462 }; 463 464 struct rvt_mcast { 465 struct rb_node rb_node; 466 struct rvt_mcast_addr mcast_addr; 467 struct list_head qp_list; 468 wait_queue_head_t wait; 469 atomic_t refcount; 470 int n_attached; 471 }; 472 473 /* 474 * Since struct rvt_swqe is not a fixed size, we can't simply index into 475 * struct rvt_qp.s_wq. This function does the array index computation. 476 */ 477 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp, 478 unsigned n) 479 { 480 return (struct rvt_swqe *)((char *)qp->s_wq + 481 (sizeof(struct rvt_swqe) + 482 qp->s_max_sge * 483 sizeof(struct rvt_sge)) * n); 484 } 485 486 /* 487 * Since struct rvt_rwqe is not a fixed size, we can't simply index into 488 * struct rvt_rwq.wq. This function does the array index computation. 489 */ 490 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) 491 { 492 return (struct rvt_rwqe *) 493 ((char *)rq->kwq->curr_wq + 494 (sizeof(struct rvt_rwqe) + 495 rq->max_sge * sizeof(struct ib_sge)) * n); 496 } 497 498 /** 499 * rvt_is_user_qp - return if this is user mode QP 500 * @qp - the target QP 501 */ 502 static inline bool rvt_is_user_qp(struct rvt_qp *qp) 503 { 504 return !!qp->pid; 505 } 506 507 /** 508 * rvt_get_qp - get a QP reference 509 * @qp - the QP to hold 510 */ 511 static inline void rvt_get_qp(struct rvt_qp *qp) 512 { 513 atomic_inc(&qp->refcount); 514 } 515 516 /** 517 * rvt_put_qp - release a QP reference 518 * @qp - the QP to release 519 */ 520 static inline void rvt_put_qp(struct rvt_qp *qp) 521 { 522 if (qp && atomic_dec_and_test(&qp->refcount)) 523 wake_up(&qp->wait); 524 } 525 526 /** 527 * rvt_put_swqe - drop mr refs held by swqe 528 * @wqe - the send wqe 529 * 530 * This drops any mr references held by the swqe 531 */ 532 static inline void rvt_put_swqe(struct rvt_swqe *wqe) 533 { 534 int i; 535 536 for (i = 0; i < wqe->wr.num_sge; i++) { 537 struct rvt_sge *sge = &wqe->sg_list[i]; 538 539 rvt_put_mr(sge->mr); 540 } 541 } 542 543 /** 544 * rvt_qp_wqe_reserve - reserve operation 545 * @qp - the rvt qp 546 * @wqe - the send wqe 547 * 548 * This routine used in post send to record 549 * a wqe relative reserved operation use. 550 */ 551 static inline void rvt_qp_wqe_reserve( 552 struct rvt_qp *qp, 553 struct rvt_swqe *wqe) 554 { 555 atomic_inc(&qp->s_reserved_used); 556 } 557 558 /** 559 * rvt_qp_wqe_unreserve - clean reserved operation 560 * @qp - the rvt qp 561 * @wqe - the send wqe 562 * 563 * This decrements the reserve use count. 564 * 565 * This call MUST precede the change to 566 * s_last to insure that post send sees a stable 567 * s_avail. 568 * 569 * An smp_mp__after_atomic() is used to insure 570 * the compiler does not juggle the order of the s_last 571 * ring index and the decrementing of s_reserved_used. 572 */ 573 static inline void rvt_qp_wqe_unreserve( 574 struct rvt_qp *qp, 575 struct rvt_swqe *wqe) 576 { 577 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { 578 atomic_dec(&qp->s_reserved_used); 579 /* insure no compiler re-order up to s_last change */ 580 smp_mb__after_atomic(); 581 } 582 } 583 584 extern const enum ib_wc_opcode ib_rvt_wc_opcode[]; 585 586 /* 587 * Compare the lower 24 bits of the msn values. 588 * Returns an integer <, ==, or > than zero. 589 */ 590 static inline int rvt_cmp_msn(u32 a, u32 b) 591 { 592 return (((int)a) - ((int)b)) << 8; 593 } 594 595 /** 596 * rvt_compute_aeth - compute the AETH (syndrome + MSN) 597 * @qp: the queue pair to compute the AETH for 598 * 599 * Returns the AETH. 600 */ 601 __be32 rvt_compute_aeth(struct rvt_qp *qp); 602 603 /** 604 * rvt_get_credit - flush the send work queue of a QP 605 * @qp: the qp who's send work queue to flush 606 * @aeth: the Acknowledge Extended Transport Header 607 * 608 * The QP s_lock should be held. 609 */ 610 void rvt_get_credit(struct rvt_qp *qp, u32 aeth); 611 612 /** 613 * rvt_restart_sge - rewind the sge state for a wqe 614 * @ss: the sge state pointer 615 * @wqe: the wqe to rewind 616 * @len: the data length from the start of the wqe in bytes 617 * 618 * Returns the remaining data length. 619 */ 620 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len); 621 622 /** 623 * @qp - the qp pair 624 * @len - the length 625 * 626 * Perform a shift based mtu round up divide 627 */ 628 static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len) 629 { 630 return (len + qp->pmtu - 1) >> qp->log_pmtu; 631 } 632 633 /** 634 * @qp - the qp pair 635 * @len - the length 636 * 637 * Perform a shift based mtu divide 638 */ 639 static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) 640 { 641 return len >> qp->log_pmtu; 642 } 643 644 /** 645 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies 646 * @timeout - timeout input(0 - 31). 647 * 648 * Return a timeout value in jiffies. 649 */ 650 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout) 651 { 652 if (timeout > 31) 653 timeout = 31; 654 655 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL; 656 } 657 658 /** 659 * rvt_lookup_qpn - return the QP with the given QPN 660 * @ibp: the ibport 661 * @qpn: the QP number to look up 662 * 663 * The caller must hold the rcu_read_lock(), and keep the lock until 664 * the returned qp is no longer in use. 665 */ 666 static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, 667 struct rvt_ibport *rvp, 668 u32 qpn) __must_hold(RCU) 669 { 670 struct rvt_qp *qp = NULL; 671 672 if (unlikely(qpn <= 1)) { 673 qp = rcu_dereference(rvp->qp[qpn]); 674 } else { 675 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits); 676 677 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; 678 qp = rcu_dereference(qp->next)) 679 if (qp->ibqp.qp_num == qpn) 680 break; 681 } 682 return qp; 683 } 684 685 /** 686 * rvt_mod_retry_timer - mod a retry timer 687 * @qp - the QP 688 * @shift - timeout shift to wait for multiple packets 689 * Modify a potentially already running retry timer 690 */ 691 static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift) 692 { 693 struct ib_qp *ibqp = &qp->ibqp; 694 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 695 696 lockdep_assert_held(&qp->s_lock); 697 qp->s_flags |= RVT_S_TIMER; 698 /* 4.096 usec. * (1 << qp->timeout) */ 699 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies + 700 (qp->timeout_jiffies << shift)); 701 } 702 703 static inline void rvt_mod_retry_timer(struct rvt_qp *qp) 704 { 705 return rvt_mod_retry_timer_ext(qp, 0); 706 } 707 708 /** 709 * rvt_put_qp_swqe - drop refs held by swqe 710 * @qp: the send qp 711 * @wqe: the send wqe 712 * 713 * This drops any references held by the swqe 714 */ 715 static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) 716 { 717 rvt_put_swqe(wqe); 718 if (qp->allowed_ops == IB_OPCODE_UD) 719 rdma_destroy_ah_attr(wqe->ud_wr.attr); 720 } 721 722 /** 723 * rvt_qp_sqwe_incr - increment ring index 724 * @qp: the qp 725 * @val: the starting value 726 * 727 * Return: the new value wrapping as appropriate 728 */ 729 static inline u32 730 rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val) 731 { 732 if (++val >= qp->s_size) 733 val = 0; 734 return val; 735 } 736 737 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); 738 739 /** 740 * rvt_recv_cq - add a new entry to completion queue 741 * by receive queue 742 * @qp: receive queue 743 * @wc: work completion entry to add 744 * @solicited: true if @entry is solicited 745 * 746 * This is wrapper function for rvt_enter_cq function call by 747 * receive queue. If rvt_cq_enter return false, it means cq is 748 * full and the qp is put into error state. 749 */ 750 static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc, 751 bool solicited) 752 { 753 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq); 754 755 if (unlikely(!rvt_cq_enter(cq, wc, solicited))) 756 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR); 757 } 758 759 /** 760 * rvt_send_cq - add a new entry to completion queue 761 * by send queue 762 * @qp: send queue 763 * @wc: work completion entry to add 764 * @solicited: true if @entry is solicited 765 * 766 * This is wrapper function for rvt_enter_cq function call by 767 * send queue. If rvt_cq_enter return false, it means cq is 768 * full and the qp is put into error state. 769 */ 770 static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc, 771 bool solicited) 772 { 773 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); 774 775 if (unlikely(!rvt_cq_enter(cq, wc, solicited))) 776 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR); 777 } 778 779 /** 780 * rvt_qp_complete_swqe - insert send completion 781 * @qp - the qp 782 * @wqe - the send wqe 783 * @opcode - wc operation (driver dependent) 784 * @status - completion status 785 * 786 * Update the s_last information, and then insert a send 787 * completion into the completion 788 * queue if the qp indicates it should be done. 789 * 790 * See IBTA 10.7.3.1 for info on completion 791 * control. 792 * 793 * Return: new last 794 */ 795 static inline u32 796 rvt_qp_complete_swqe(struct rvt_qp *qp, 797 struct rvt_swqe *wqe, 798 enum ib_wc_opcode opcode, 799 enum ib_wc_status status) 800 { 801 bool need_completion; 802 u64 wr_id; 803 u32 byte_len, last; 804 int flags = wqe->wr.send_flags; 805 806 rvt_put_qp_swqe(qp, wqe); 807 808 need_completion = 809 !(flags & RVT_SEND_RESERVE_USED) && 810 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || 811 (flags & IB_SEND_SIGNALED) || 812 status != IB_WC_SUCCESS); 813 if (need_completion) { 814 wr_id = wqe->wr.wr_id; 815 byte_len = wqe->length; 816 /* above fields required before writing s_last */ 817 } 818 last = rvt_qp_swqe_incr(qp, qp->s_last); 819 /* see rvt_qp_is_avail() */ 820 smp_store_release(&qp->s_last, last); 821 if (need_completion) { 822 struct ib_wc w = { 823 .wr_id = wr_id, 824 .status = status, 825 .opcode = opcode, 826 .qp = &qp->ibqp, 827 .byte_len = byte_len, 828 }; 829 rvt_send_cq(qp, &w, status != IB_WC_SUCCESS); 830 } 831 return last; 832 } 833 834 extern const int ib_rvt_state_ops[]; 835 836 struct rvt_dev_info; 837 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only); 838 void rvt_comm_est(struct rvt_qp *qp); 839 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err); 840 unsigned long rvt_rnr_tbl_to_usec(u32 index); 841 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); 842 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth); 843 void rvt_del_timers_sync(struct rvt_qp *qp); 844 void rvt_stop_rc_timers(struct rvt_qp *qp); 845 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift); 846 static inline void rvt_add_retry_timer(struct rvt_qp *qp) 847 { 848 rvt_add_retry_timer_ext(qp, 0); 849 } 850 851 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, 852 void *data, u32 length, 853 bool release, bool copy_last); 854 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 855 enum ib_wc_status status); 856 void rvt_ruc_loopback(struct rvt_qp *qp); 857 858 /** 859 * struct rvt_qp_iter - the iterator for QPs 860 * @qp - the current QP 861 * 862 * This structure defines the current iterator 863 * state for sequenced access to all QPs relative 864 * to an rvt_dev_info. 865 */ 866 struct rvt_qp_iter { 867 struct rvt_qp *qp; 868 /* private: backpointer */ 869 struct rvt_dev_info *rdi; 870 /* private: callback routine */ 871 void (*cb)(struct rvt_qp *qp, u64 v); 872 /* private: for arg to callback routine */ 873 u64 v; 874 /* private: number of SMI,GSI QPs for device */ 875 int specials; 876 /* private: current iterator index */ 877 int n; 878 }; 879 880 /** 881 * ib_cq_tail - Return tail index of cq buffer 882 * @send_cq - The cq for send 883 * 884 * This is called in qp_iter_print to get tail 885 * of cq buffer. 886 */ 887 static inline u32 ib_cq_tail(struct ib_cq *send_cq) 888 { 889 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); 890 891 return ibcq_to_rvtcq(send_cq)->ip ? 892 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) : 893 ibcq_to_rvtcq(send_cq)->kqueue->tail; 894 } 895 896 /** 897 * ib_cq_head - Return head index of cq buffer 898 * @send_cq - The cq for send 899 * 900 * This is called in qp_iter_print to get head 901 * of cq buffer. 902 */ 903 static inline u32 ib_cq_head(struct ib_cq *send_cq) 904 { 905 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); 906 907 return ibcq_to_rvtcq(send_cq)->ip ? 908 RDMA_READ_UAPI_ATOMIC(cq->queue->head) : 909 ibcq_to_rvtcq(send_cq)->kqueue->head; 910 } 911 912 /** 913 * rvt_free_rq - free memory allocated for rvt_rq struct 914 * @rvt_rq: request queue data structure 915 * 916 * This function should only be called if the rvt_mmap_info() 917 * has not succeeded. 918 */ 919 static inline void rvt_free_rq(struct rvt_rq *rq) 920 { 921 kvfree(rq->kwq); 922 rq->kwq = NULL; 923 vfree(rq->wq); 924 rq->wq = NULL; 925 } 926 927 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, 928 u64 v, 929 void (*cb)(struct rvt_qp *qp, u64 v)); 930 int rvt_qp_iter_next(struct rvt_qp_iter *iter); 931 void rvt_qp_iter(struct rvt_dev_info *rdi, 932 u64 v, 933 void (*cb)(struct rvt_qp *qp, u64 v)); 934 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey); 935 #endif /* DEF_RDMAVT_INCQP_H */ 936