1 /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ 2 3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 4 /* Copyright (c) 2008-2019, IBM Corporation */ 5 6 #ifndef _SIW_H 7 #define _SIW_H 8 9 #include <rdma/ib_verbs.h> 10 #include <rdma/restrack.h> 11 #include <linux/socket.h> 12 #include <linux/skbuff.h> 13 #include <crypto/hash.h> 14 #include <linux/crc32.h> 15 #include <linux/crc32c.h> 16 17 #include <rdma/siw-abi.h> 18 #include "iwarp.h" 19 20 #define SIW_VENDOR_ID 0x626d74 /* ascii 'bmt' for now */ 21 #define SIW_VENDORT_PART_ID 0 22 #define SIW_MAX_QP (1024 * 100) 23 #define SIW_MAX_QP_WR (1024 * 32) 24 #define SIW_MAX_ORD_QP 128 25 #define SIW_MAX_IRD_QP 128 26 #define SIW_MAX_SGE_PBL 256 /* max num sge's for PBL */ 27 #define SIW_MAX_SGE_RD 1 /* iwarp limitation. we could relax */ 28 #define SIW_MAX_CQ (1024 * 100) 29 #define SIW_MAX_CQE (SIW_MAX_QP_WR * 100) 30 #define SIW_MAX_MR (SIW_MAX_QP * 10) 31 #define SIW_MAX_PD SIW_MAX_QP 32 #define SIW_MAX_MW 0 /* to be set if MW's are supported */ 33 #define SIW_MAX_FMR SIW_MAX_MR 34 #define SIW_MAX_SRQ SIW_MAX_QP 35 #define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10) 36 #define SIW_MAX_CONTEXT SIW_MAX_PD 37 38 /* Min number of bytes for using zero copy transmit */ 39 #define SENDPAGE_THRESH PAGE_SIZE 40 41 /* Maximum number of frames which can be send in one SQ processing */ 42 #define SQ_USER_MAXBURST 100 43 44 /* Maximum number of consecutive IRQ elements which get served 45 * if SQ has pending work. Prevents starving local SQ processing 46 * by serving peer Read Requests. 47 */ 48 #define SIW_IRQ_MAXBURST_SQ_ACTIVE 4 49 50 struct siw_dev_cap { 51 int max_qp; 52 int max_qp_wr; 53 int max_ord; /* max. outbound read queue depth */ 54 int max_ird; /* max. inbound read queue depth */ 55 int max_sge; 56 int max_sge_rd; 57 int max_cq; 58 int max_cqe; 59 int max_mr; 60 int max_pd; 61 int max_mw; 62 int max_fmr; 63 int max_srq; 64 int max_srq_wr; 65 int max_srq_sge; 66 }; 67 68 struct siw_pd { 69 struct ib_pd base_pd; 70 }; 71 72 struct siw_device { 73 struct ib_device base_dev; 74 struct device_dma_parameters dma_parms; 75 struct net_device *netdev; 76 struct siw_dev_cap attrs; 77 78 u32 vendor_part_id; 79 int numa_node; 80 81 /* physical port state (only one port per device) */ 82 enum ib_port_state state; 83 84 spinlock_t lock; 85 86 struct xarray qp_xa; 87 struct xarray mem_xa; 88 89 struct list_head cep_list; 90 struct list_head qp_list; 91 92 /* active objects statistics to enforce limits */ 93 atomic_t num_qp; 94 atomic_t num_cq; 95 atomic_t num_pd; 96 atomic_t num_mr; 97 atomic_t num_srq; 98 atomic_t num_ctx; 99 100 struct work_struct netdev_down; 101 }; 102 103 struct siw_ucontext { 104 struct ib_ucontext base_ucontext; 105 struct siw_device *sdev; 106 }; 107 108 /* 109 * The RDMA core does not define LOCAL_READ access, which is always 110 * enabled implictely. 111 */ 112 #define IWARP_ACCESS_MASK \ 113 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | \ 114 IB_ACCESS_REMOTE_READ) 115 116 /* 117 * siw presentation of user memory registered as source 118 * or target of RDMA operations. 119 */ 120 121 struct siw_page_chunk { 122 struct page **plist; 123 }; 124 125 struct siw_umem { 126 struct siw_page_chunk *page_chunk; 127 int num_pages; 128 bool writable; 129 u64 fp_addr; /* First page base address */ 130 struct mm_struct *owning_mm; 131 }; 132 133 struct siw_pble { 134 dma_addr_t addr; /* Address of assigned buffer */ 135 unsigned int size; /* Size of this entry */ 136 unsigned long pbl_off; /* Total offset from start of PBL */ 137 }; 138 139 struct siw_pbl { 140 unsigned int num_buf; 141 unsigned int max_buf; 142 struct siw_pble pbe[1]; 143 }; 144 145 /* 146 * Generic memory representation for registered siw memory. 147 * Memory lookup always via higher 24 bit of STag (STag index). 148 */ 149 struct siw_mem { 150 struct siw_device *sdev; 151 struct kref ref; 152 u64 va; /* VA of memory */ 153 u64 len; /* length of the memory buffer in bytes */ 154 u32 stag; /* iWarp memory access steering tag */ 155 u8 stag_valid; /* VALID or INVALID */ 156 u8 is_pbl; /* PBL or user space mem */ 157 u8 is_mw; /* Memory Region or Memory Window */ 158 enum ib_access_flags perms; /* local/remote READ & WRITE */ 159 union { 160 struct siw_umem *umem; 161 struct siw_pbl *pbl; 162 void *mem_obj; 163 }; 164 struct ib_pd *pd; 165 }; 166 167 struct siw_mr { 168 struct ib_mr base_mr; 169 struct siw_mem *mem; 170 struct rcu_head rcu; 171 }; 172 173 /* 174 * Error codes for local or remote 175 * access to registered memory 176 */ 177 enum siw_access_state { 178 E_ACCESS_OK, 179 E_STAG_INVALID, 180 E_BASE_BOUNDS, 181 E_ACCESS_PERM, 182 E_PD_MISMATCH 183 }; 184 185 enum siw_wr_state { 186 SIW_WR_IDLE, 187 SIW_WR_QUEUED, /* processing has not started yet */ 188 SIW_WR_INPROGRESS /* initiated processing of the WR */ 189 }; 190 191 /* The WQE currently being processed (RX or TX) */ 192 struct siw_wqe { 193 /* Copy of applications SQE or RQE */ 194 union { 195 struct siw_sqe sqe; 196 struct siw_rqe rqe; 197 }; 198 struct siw_mem *mem[SIW_MAX_SGE]; /* per sge's resolved mem */ 199 enum siw_wr_state wr_status; 200 enum siw_wc_status wc_status; 201 u32 bytes; /* total bytes to process */ 202 u32 processed; /* bytes processed */ 203 }; 204 205 struct siw_cq { 206 struct ib_cq base_cq; 207 spinlock_t lock; 208 struct siw_cq_ctrl *notify; 209 struct siw_cqe *queue; 210 u32 cq_put; 211 u32 cq_get; 212 u32 num_cqe; 213 struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */ 214 u32 id; /* For debugging only */ 215 }; 216 217 enum siw_qp_state { 218 SIW_QP_STATE_IDLE, 219 SIW_QP_STATE_RTR, 220 SIW_QP_STATE_RTS, 221 SIW_QP_STATE_CLOSING, 222 SIW_QP_STATE_TERMINATE, 223 SIW_QP_STATE_ERROR, 224 SIW_QP_STATE_COUNT 225 }; 226 227 enum siw_qp_flags { 228 SIW_RDMA_BIND_ENABLED = (1 << 0), 229 SIW_RDMA_WRITE_ENABLED = (1 << 1), 230 SIW_RDMA_READ_ENABLED = (1 << 2), 231 SIW_SIGNAL_ALL_WR = (1 << 3), 232 SIW_MPA_CRC = (1 << 4), 233 SIW_QP_IN_DESTROY = (1 << 5) 234 }; 235 236 enum siw_qp_attr_mask { 237 SIW_QP_ATTR_STATE = (1 << 0), 238 SIW_QP_ATTR_ACCESS_FLAGS = (1 << 1), 239 SIW_QP_ATTR_LLP_HANDLE = (1 << 2), 240 SIW_QP_ATTR_ORD = (1 << 3), 241 SIW_QP_ATTR_IRD = (1 << 4), 242 SIW_QP_ATTR_SQ_SIZE = (1 << 5), 243 SIW_QP_ATTR_RQ_SIZE = (1 << 6), 244 SIW_QP_ATTR_MPA = (1 << 7) 245 }; 246 247 struct siw_srq { 248 struct ib_srq base_srq; 249 spinlock_t lock; 250 u32 max_sge; 251 u32 limit; /* low watermark for async event */ 252 struct siw_rqe *recvq; 253 u32 rq_put; 254 u32 rq_get; 255 u32 num_rqe; /* max # of wqe's allowed */ 256 struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */ 257 bool armed:1; /* inform user if limit hit */ 258 bool is_kernel_res:1; /* true if kernel client */ 259 }; 260 261 struct siw_qp_attrs { 262 enum siw_qp_state state; 263 u32 sq_size; 264 u32 rq_size; 265 u32 orq_size; 266 u32 irq_size; 267 u32 sq_max_sges; 268 u32 rq_max_sges; 269 enum siw_qp_flags flags; 270 271 struct socket *sk; 272 }; 273 274 enum siw_tx_ctx { 275 SIW_SEND_HDR, /* start or continue sending HDR */ 276 SIW_SEND_DATA, /* start or continue sending DDP payload */ 277 SIW_SEND_TRAILER, /* start or continue sending TRAILER */ 278 SIW_SEND_SHORT_FPDU/* send whole FPDU hdr|data|trailer at once */ 279 }; 280 281 enum siw_rx_state { 282 SIW_GET_HDR, /* await new hdr or within hdr */ 283 SIW_GET_DATA_START, /* start of inbound DDP payload */ 284 SIW_GET_DATA_MORE, /* continuation of (misaligned) DDP payload */ 285 SIW_GET_TRAILER/* await new trailer or within trailer */ 286 }; 287 288 struct siw_rx_stream { 289 struct sk_buff *skb; 290 int skb_new; /* pending unread bytes in skb */ 291 int skb_offset; /* offset in skb */ 292 int skb_copied; /* processed bytes in skb */ 293 294 union iwarp_hdr hdr; 295 struct mpa_trailer trailer; 296 297 enum siw_rx_state state; 298 299 /* 300 * For each FPDU, main RX loop runs through 3 stages: 301 * Receiving protocol headers, placing DDP payload and receiving 302 * trailer information (CRC + possibly padding). 303 * Next two variables keep state on receive status of the 304 * current FPDU part (hdr, data, trailer). 305 */ 306 int fpdu_part_rcvd; /* bytes in pkt part copied */ 307 int fpdu_part_rem; /* bytes in pkt part not seen */ 308 309 /* 310 * Next expected DDP MSN for each QN + 311 * expected steering tag + 312 * expected DDP tagget offset (all HBO) 313 */ 314 u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT]; 315 u32 ddp_stag; 316 u64 ddp_to; 317 u32 inval_stag; /* Stag to be invalidated */ 318 319 struct shash_desc *mpa_crc_hd; 320 u8 rx_suspend : 1; 321 u8 pad : 2; /* # of pad bytes expected */ 322 u8 rdmap_op : 4; /* opcode of current frame */ 323 }; 324 325 struct siw_rx_fpdu { 326 /* 327 * Local destination memory of inbound RDMA operation. 328 * Valid, according to wqe->wr_status 329 */ 330 struct siw_wqe wqe_active; 331 332 unsigned int pbl_idx; /* Index into current PBL */ 333 unsigned int sge_idx; /* current sge in rx */ 334 unsigned int sge_off; /* already rcvd in curr. sge */ 335 336 char first_ddp_seg; /* this is the first DDP seg */ 337 char more_ddp_segs; /* more DDP segs expected */ 338 u8 prev_rdmap_op : 4; /* opcode of prev frame */ 339 }; 340 341 /* 342 * Shorthands for short packets w/o payload 343 * to be transmitted more efficient. 344 */ 345 struct siw_send_pkt { 346 struct iwarp_send send; 347 __be32 crc; 348 }; 349 350 struct siw_write_pkt { 351 struct iwarp_rdma_write write; 352 __be32 crc; 353 }; 354 355 struct siw_rreq_pkt { 356 struct iwarp_rdma_rreq rreq; 357 __be32 crc; 358 }; 359 360 struct siw_rresp_pkt { 361 struct iwarp_rdma_rresp rresp; 362 __be32 crc; 363 }; 364 365 struct siw_iwarp_tx { 366 union { 367 union iwarp_hdr hdr; 368 369 /* Generic part of FPDU header */ 370 struct iwarp_ctrl ctrl; 371 struct iwarp_ctrl_untagged c_untagged; 372 struct iwarp_ctrl_tagged c_tagged; 373 374 /* FPDU headers */ 375 struct iwarp_rdma_write rwrite; 376 struct iwarp_rdma_rreq rreq; 377 struct iwarp_rdma_rresp rresp; 378 struct iwarp_terminate terminate; 379 struct iwarp_send send; 380 struct iwarp_send_inv send_inv; 381 382 /* complete short FPDUs */ 383 struct siw_send_pkt send_pkt; 384 struct siw_write_pkt write_pkt; 385 struct siw_rreq_pkt rreq_pkt; 386 struct siw_rresp_pkt rresp_pkt; 387 } pkt; 388 389 struct mpa_trailer trailer; 390 /* DDP MSN for untagged messages */ 391 u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT]; 392 393 enum siw_tx_ctx state; 394 u16 ctrl_len; /* ddp+rdmap hdr */ 395 u16 ctrl_sent; 396 int burst; 397 int bytes_unsent; /* ddp payload bytes */ 398 399 struct shash_desc *mpa_crc_hd; 400 401 u8 do_crc : 1; /* do crc for segment */ 402 u8 use_sendpage : 1; /* send w/o copy */ 403 u8 tx_suspend : 1; /* stop sending DDP segs. */ 404 u8 pad : 2; /* # pad in current fpdu */ 405 u8 orq_fence : 1; /* ORQ full or Send fenced */ 406 u8 in_syscall : 1; /* TX out of user context */ 407 u8 zcopy_tx : 1; /* Use TCP_SENDPAGE if possible */ 408 u8 gso_seg_limit; /* Maximum segments for GSO, 0 = unbound */ 409 410 u16 fpdu_len; /* len of FPDU to tx */ 411 unsigned int tcp_seglen; /* remaining tcp seg space */ 412 413 struct siw_wqe wqe_active; 414 415 int pbl_idx; /* Index into current PBL */ 416 int sge_idx; /* current sge in tx */ 417 u32 sge_off; /* already sent in curr. sge */ 418 }; 419 420 struct siw_qp { 421 struct ib_qp base_qp; 422 struct siw_device *sdev; 423 struct kref ref; 424 struct list_head devq; 425 int tx_cpu; 426 struct siw_qp_attrs attrs; 427 428 struct siw_cep *cep; 429 struct rw_semaphore state_lock; 430 431 struct ib_pd *pd; 432 struct siw_cq *scq; 433 struct siw_cq *rcq; 434 struct siw_srq *srq; 435 436 struct siw_iwarp_tx tx_ctx; /* Transmit context */ 437 spinlock_t sq_lock; 438 struct siw_sqe *sendq; /* send queue element array */ 439 uint32_t sq_get; /* consumer index into sq array */ 440 uint32_t sq_put; /* kernel prod. index into sq array */ 441 struct llist_node tx_list; 442 443 struct siw_sqe *orq; /* outbound read queue element array */ 444 spinlock_t orq_lock; 445 uint32_t orq_get; /* consumer index into orq array */ 446 uint32_t orq_put; /* shared producer index for ORQ */ 447 448 struct siw_rx_stream rx_stream; 449 struct siw_rx_fpdu *rx_fpdu; 450 struct siw_rx_fpdu rx_tagged; 451 struct siw_rx_fpdu rx_untagged; 452 spinlock_t rq_lock; 453 struct siw_rqe *recvq; /* recv queue element array */ 454 uint32_t rq_get; /* consumer index into rq array */ 455 uint32_t rq_put; /* kernel prod. index into rq array */ 456 457 struct siw_sqe *irq; /* inbound read queue element array */ 458 uint32_t irq_get; /* consumer index into irq array */ 459 uint32_t irq_put; /* producer index into irq array */ 460 int irq_burst; 461 462 struct { /* information to be carried in TERMINATE pkt, if valid */ 463 u8 valid; 464 u8 in_tx; 465 u8 layer : 4, etype : 4; 466 u8 ecode; 467 } term_info; 468 struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */ 469 struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */ 470 struct rcu_head rcu; 471 }; 472 473 /* helper macros */ 474 #define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream) 475 #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx) 476 #define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active) 477 #define rx_wqe(rctx) (&(rctx)->wqe_active) 478 #define rx_mem(rctx) ((rctx)->wqe_active.mem[0]) 479 #define tx_type(wqe) ((wqe)->sqe.opcode) 480 #define rx_type(wqe) ((wqe)->rqe.opcode) 481 #define tx_flags(wqe) ((wqe)->sqe.flags) 482 483 struct iwarp_msg_info { 484 int hdr_len; 485 struct iwarp_ctrl ctrl; 486 int (*rx_data)(struct siw_qp *qp); 487 }; 488 489 struct siw_user_mmap_entry { 490 struct rdma_user_mmap_entry rdma_entry; 491 void *address; 492 }; 493 494 /* Global siw parameters. Currently set in siw_main.c */ 495 extern const bool zcopy_tx; 496 extern const bool try_gso; 497 extern const bool loopback_enabled; 498 extern const bool mpa_crc_required; 499 extern const bool mpa_crc_strict; 500 extern const bool siw_tcp_nagle; 501 extern u_char mpa_version; 502 extern const bool peer_to_peer; 503 extern struct task_struct *siw_tx_thread[]; 504 505 extern struct crypto_shash *siw_crypto_shash; 506 extern struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1]; 507 508 /* QP general functions */ 509 int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attr, 510 enum siw_qp_attr_mask mask); 511 int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl); 512 void siw_qp_llp_close(struct siw_qp *qp); 513 void siw_qp_cm_drop(struct siw_qp *qp, int schedule); 514 void siw_send_terminate(struct siw_qp *qp); 515 516 void siw_qp_get_ref(struct ib_qp *qp); 517 void siw_qp_put_ref(struct ib_qp *qp); 518 int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp); 519 void siw_free_qp(struct kref *ref); 520 521 void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, 522 u8 etype, u8 ecode, int in_tx); 523 enum ddp_ecode siw_tagged_error(enum siw_access_state state); 524 enum rdmap_ecode siw_rdmap_error(enum siw_access_state state); 525 526 void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe); 527 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, 528 enum siw_wc_status status); 529 int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes, 530 u32 inval_stag, enum siw_wc_status status); 531 void siw_qp_llp_data_ready(struct sock *sk); 532 void siw_qp_llp_write_space(struct sock *sk); 533 534 /* QP TX path functions */ 535 int siw_run_sq(void *arg); 536 int siw_qp_sq_process(struct siw_qp *qp); 537 int siw_sq_start(struct siw_qp *qp); 538 int siw_activate_tx(struct siw_qp *qp); 539 void siw_stop_tx_thread(int nr_cpu); 540 int siw_get_tx_cpu(struct siw_device *sdev); 541 void siw_put_tx_cpu(int cpu); 542 543 /* QP RX path functions */ 544 int siw_proc_send(struct siw_qp *qp); 545 int siw_proc_rreq(struct siw_qp *qp); 546 int siw_proc_rresp(struct siw_qp *qp); 547 int siw_proc_write(struct siw_qp *qp); 548 int siw_proc_terminate(struct siw_qp *qp); 549 550 int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb, 551 unsigned int off, size_t len); 552 553 static inline void set_rx_fpdu_context(struct siw_qp *qp, u8 opcode) 554 { 555 if (opcode == RDMAP_RDMA_WRITE || opcode == RDMAP_RDMA_READ_RESP) 556 qp->rx_fpdu = &qp->rx_tagged; 557 else 558 qp->rx_fpdu = &qp->rx_untagged; 559 560 qp->rx_stream.rdmap_op = opcode; 561 } 562 563 static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx) 564 { 565 return container_of(base_ctx, struct siw_ucontext, base_ucontext); 566 } 567 568 static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp) 569 { 570 return container_of(base_qp, struct siw_qp, base_qp); 571 } 572 573 static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq) 574 { 575 return container_of(base_cq, struct siw_cq, base_cq); 576 } 577 578 static inline struct siw_srq *to_siw_srq(struct ib_srq *base_srq) 579 { 580 return container_of(base_srq, struct siw_srq, base_srq); 581 } 582 583 static inline struct siw_device *to_siw_dev(struct ib_device *base_dev) 584 { 585 return container_of(base_dev, struct siw_device, base_dev); 586 } 587 588 static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr) 589 { 590 return container_of(base_mr, struct siw_mr, base_mr); 591 } 592 593 static inline struct siw_user_mmap_entry * 594 to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap) 595 { 596 return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry); 597 } 598 599 static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id) 600 { 601 struct siw_qp *qp; 602 603 rcu_read_lock(); 604 qp = xa_load(&sdev->qp_xa, id); 605 if (likely(qp && kref_get_unless_zero(&qp->ref))) { 606 rcu_read_unlock(); 607 return qp; 608 } 609 rcu_read_unlock(); 610 return NULL; 611 } 612 613 static inline u32 qp_id(struct siw_qp *qp) 614 { 615 return qp->base_qp.qp_num; 616 } 617 618 static inline void siw_qp_get(struct siw_qp *qp) 619 { 620 kref_get(&qp->ref); 621 } 622 623 static inline void siw_qp_put(struct siw_qp *qp) 624 { 625 kref_put(&qp->ref, siw_free_qp); 626 } 627 628 static inline int siw_sq_empty(struct siw_qp *qp) 629 { 630 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; 631 632 return READ_ONCE(sqe->flags) == 0; 633 } 634 635 static inline struct siw_sqe *sq_get_next(struct siw_qp *qp) 636 { 637 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; 638 639 if (READ_ONCE(sqe->flags) & SIW_WQE_VALID) 640 return sqe; 641 642 return NULL; 643 } 644 645 static inline struct siw_sqe *orq_get_current(struct siw_qp *qp) 646 { 647 return &qp->orq[qp->orq_get % qp->attrs.orq_size]; 648 } 649 650 static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp) 651 { 652 return &qp->orq[qp->orq_put % qp->attrs.orq_size]; 653 } 654 655 static inline struct siw_sqe *orq_get_free(struct siw_qp *qp) 656 { 657 struct siw_sqe *orq_e = orq_get_tail(qp); 658 659 if (orq_e && READ_ONCE(orq_e->flags) == 0) 660 return orq_e; 661 662 return NULL; 663 } 664 665 static inline int siw_orq_empty(struct siw_qp *qp) 666 { 667 return qp->orq[qp->orq_get % qp->attrs.orq_size].flags == 0 ? 1 : 0; 668 } 669 670 static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp) 671 { 672 struct siw_sqe *irq_e = &qp->irq[qp->irq_put % qp->attrs.irq_size]; 673 674 if (READ_ONCE(irq_e->flags) == 0) { 675 qp->irq_put++; 676 return irq_e; 677 } 678 return NULL; 679 } 680 681 static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum) 682 { 683 return (__force __wsum)crc32c((__force __u32)sum, buff, len); 684 } 685 686 static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset, 687 int len) 688 { 689 return (__force __wsum)__crc32c_le_combine((__force __u32)csum, 690 (__force __u32)csum2, len); 691 } 692 693 static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) 694 { 695 const struct skb_checksum_ops siw_cs_ops = { 696 .update = siw_csum_update, 697 .combine = siw_csum_combine, 698 }; 699 __wsum crc = *(u32 *)shash_desc_ctx(srx->mpa_crc_hd); 700 701 crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc, 702 &siw_cs_ops); 703 *(u32 *)shash_desc_ctx(srx->mpa_crc_hd) = crc; 704 } 705 706 #define siw_dbg(ibdev, fmt, ...) \ 707 ibdev_dbg(ibdev, "%s: " fmt, __func__, ##__VA_ARGS__) 708 709 #define siw_dbg_qp(qp, fmt, ...) \ 710 ibdev_dbg(&qp->sdev->base_dev, "QP[%u] %s: " fmt, qp_id(qp), __func__, \ 711 ##__VA_ARGS__) 712 713 #define siw_dbg_cq(cq, fmt, ...) \ 714 ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__, \ 715 ##__VA_ARGS__) 716 717 #define siw_dbg_pd(pd, fmt, ...) \ 718 ibdev_dbg(pd->device, "PD[%u] %s: " fmt, pd->res.id, __func__, \ 719 ##__VA_ARGS__) 720 721 #define siw_dbg_mem(mem, fmt, ...) \ 722 ibdev_dbg(&mem->sdev->base_dev, \ 723 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) 724 725 #define siw_dbg_cep(cep, fmt, ...) \ 726 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \ 727 cep, __func__, ##__VA_ARGS__) 728 729 void siw_cq_flush(struct siw_cq *cq); 730 void siw_sq_flush(struct siw_qp *qp); 731 void siw_rq_flush(struct siw_qp *qp); 732 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc); 733 734 #endif 735