1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _HNS_ROCE_DEVICE_H 34 #define _HNS_ROCE_DEVICE_H 35 36 #include <rdma/ib_verbs.h> 37 38 #define DRV_NAME "hns_roce" 39 40 #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') 41 42 #define MAC_ADDR_OCTET_NUM 6 43 #define HNS_ROCE_MAX_MSG_LEN 0x80000000 44 45 #define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) 46 47 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 48 49 #define HNS_ROCE_BA_SIZE (32 * 4096) 50 51 /* Hardware specification only for v1 engine */ 52 #define HNS_ROCE_MIN_CQE_NUM 0x40 53 #define HNS_ROCE_MIN_WQE_NUM 0x20 54 55 /* Hardware specification only for v1 engine */ 56 #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 57 #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 58 59 #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 60 #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ 61 (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS) 62 #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 63 #define HNS_ROCE_MIN_CQE_CNT 16 64 65 #define HNS_ROCE_MAX_IRQ_NUM 128 66 67 #define EQ_ENABLE 1 68 #define EQ_DISABLE 0 69 70 #define HNS_ROCE_CEQ 0 71 #define HNS_ROCE_AEQ 1 72 73 #define HNS_ROCE_CEQ_ENTRY_SIZE 0x4 74 #define HNS_ROCE_AEQ_ENTRY_SIZE 0x10 75 76 /* 4G/4K = 1M */ 77 #define HNS_ROCE_SL_SHIFT 28 78 #define HNS_ROCE_TCLASS_SHIFT 20 79 #define HNS_ROCE_FLOW_LABEL_MASK 0xfffff 80 81 #define HNS_ROCE_MAX_PORTS 6 82 #define HNS_ROCE_MAX_GID_NUM 16 83 #define HNS_ROCE_GID_SIZE 16 84 85 #define HNS_ROCE_HOP_NUM_0 0xff 86 87 #define BITMAP_NO_RR 0 88 #define BITMAP_RR 1 89 90 #define MR_TYPE_MR 0x00 91 #define MR_TYPE_DMA 0x03 92 93 #define PKEY_ID 0xffff 94 #define GUID_LEN 8 95 #define NODE_DESC_SIZE 64 96 #define DB_REG_OFFSET 0x1000 97 98 #define SERV_TYPE_RC 0 99 #define SERV_TYPE_RD 1 100 #define SERV_TYPE_UC 2 101 #define SERV_TYPE_UD 3 102 103 /* Configure to HW for PAGE_SIZE larger than 4KB */ 104 #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) 105 106 #define PAGES_SHIFT_8 8 107 #define PAGES_SHIFT_16 16 108 #define PAGES_SHIFT_24 24 109 #define PAGES_SHIFT_32 32 110 111 enum { 112 HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0, 113 HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1, 114 }; 115 116 enum { 117 HNS_ROCE_SUPPORT_CQ_RECORD_DB = 1 << 0, 118 }; 119 120 enum hns_roce_qp_state { 121 HNS_ROCE_QP_STATE_RST, 122 HNS_ROCE_QP_STATE_INIT, 123 HNS_ROCE_QP_STATE_RTR, 124 HNS_ROCE_QP_STATE_RTS, 125 HNS_ROCE_QP_STATE_SQD, 126 HNS_ROCE_QP_STATE_ERR, 127 HNS_ROCE_QP_NUM_STATE, 128 }; 129 130 enum hns_roce_event { 131 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, 132 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, 133 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03, 134 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04, 135 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 136 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06, 137 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07, 138 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08, 139 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09, 140 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a, 141 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b, 142 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c, 143 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d, 144 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f, 145 /* 0x10 and 0x11 is unused in currently application case */ 146 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12, 147 HNS_ROCE_EVENT_TYPE_MB = 0x13, 148 HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14, 149 HNS_ROCE_EVENT_TYPE_FLR = 0x15, 150 }; 151 152 /* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */ 153 enum { 154 HNS_ROCE_LWQCE_QPC_ERROR = 1, 155 HNS_ROCE_LWQCE_MTU_ERROR = 2, 156 HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3, 157 HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4, 158 HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5, 159 HNS_ROCE_LWQCE_SL_ERROR = 6, 160 HNS_ROCE_LWQCE_PORT_ERROR = 7, 161 }; 162 163 /* Local Access Violation Work Queue Error,SUBTYPE 0x7 */ 164 enum { 165 HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1, 166 HNS_ROCE_LAVWQE_LENGTH_ERROR = 2, 167 HNS_ROCE_LAVWQE_VA_ERROR = 3, 168 HNS_ROCE_LAVWQE_PD_ERROR = 4, 169 HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5, 170 HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6, 171 HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7, 172 }; 173 174 /* DOORBELL overflow subtype */ 175 enum { 176 HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1, 177 HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2, 178 HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3, 179 HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4, 180 HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5, 181 HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6, 182 }; 183 184 enum { 185 /* RQ&SRQ related operations */ 186 HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06, 187 HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07, 188 }; 189 190 enum { 191 HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0), 192 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1), 193 HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2), 194 HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3), 195 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4), 196 }; 197 198 enum hns_roce_mtt_type { 199 MTT_TYPE_WQE, 200 MTT_TYPE_CQE, 201 }; 202 203 enum { 204 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4 205 }; 206 207 #define HNS_ROCE_CMD_SUCCESS 1 208 209 #define HNS_ROCE_PORT_DOWN 0 210 #define HNS_ROCE_PORT_UP 1 211 212 #define HNS_ROCE_MTT_ENTRY_PER_SEG 8 213 214 #define PAGE_ADDR_SHIFT 12 215 216 struct hns_roce_uar { 217 u64 pfn; 218 unsigned long index; 219 unsigned long logic_idx; 220 }; 221 222 struct hns_roce_vma_data { 223 struct list_head list; 224 struct vm_area_struct *vma; 225 struct mutex *vma_list_mutex; 226 }; 227 228 struct hns_roce_ucontext { 229 struct ib_ucontext ibucontext; 230 struct hns_roce_uar uar; 231 struct list_head page_list; 232 struct mutex page_mutex; 233 struct list_head vma_list; 234 struct mutex vma_list_mutex; 235 }; 236 237 struct hns_roce_pd { 238 struct ib_pd ibpd; 239 unsigned long pdn; 240 }; 241 242 struct hns_roce_bitmap { 243 /* Bitmap Traversal last a bit which is 1 */ 244 unsigned long last; 245 unsigned long top; 246 unsigned long max; 247 unsigned long reserved_top; 248 unsigned long mask; 249 spinlock_t lock; 250 unsigned long *table; 251 }; 252 253 /* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */ 254 /* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */ 255 /* Every bit repesent to a partner free/used status in bitmap */ 256 /* 257 * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1 258 * Bit = 1 represent to idle and available; bit = 0: not available 259 */ 260 struct hns_roce_buddy { 261 /* Members point to every order level bitmap */ 262 unsigned long **bits; 263 /* Represent to avail bits of the order level bitmap */ 264 u32 *num_free; 265 int max_order; 266 spinlock_t lock; 267 }; 268 269 /* For Hardware Entry Memory */ 270 struct hns_roce_hem_table { 271 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */ 272 u32 type; 273 /* HEM array elment num */ 274 unsigned long num_hem; 275 /* HEM entry record obj total num */ 276 unsigned long num_obj; 277 /*Single obj size */ 278 unsigned long obj_size; 279 unsigned long table_chunk_size; 280 int lowmem; 281 struct mutex mutex; 282 struct hns_roce_hem **hem; 283 u64 **bt_l1; 284 dma_addr_t *bt_l1_dma_addr; 285 u64 **bt_l0; 286 dma_addr_t *bt_l0_dma_addr; 287 }; 288 289 struct hns_roce_mtt { 290 unsigned long first_seg; 291 int order; 292 int page_shift; 293 enum hns_roce_mtt_type mtt_type; 294 }; 295 296 /* Only support 4K page size for mr register */ 297 #define MR_SIZE_4K 0 298 299 struct hns_roce_mr { 300 struct ib_mr ibmr; 301 struct ib_umem *umem; 302 u64 iova; /* MR's virtual orignal addr */ 303 u64 size; /* Address range of MR */ 304 u32 key; /* Key of MR */ 305 u32 pd; /* PD num of MR */ 306 u32 access;/* Access permission of MR */ 307 int enabled; /* MR's active status */ 308 int type; /* MR's register type */ 309 u64 *pbl_buf;/* MR's PBL space */ 310 dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ 311 u32 pbl_size;/* PA number in the PBL */ 312 u64 pbl_ba;/* page table address */ 313 u32 l0_chunk_last_num;/* L0 last number */ 314 u32 l1_chunk_last_num;/* L1 last number */ 315 u64 **pbl_bt_l2;/* PBL BT L2 */ 316 u64 **pbl_bt_l1;/* PBL BT L1 */ 317 u64 *pbl_bt_l0;/* PBL BT L0 */ 318 dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */ 319 dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */ 320 dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */ 321 u32 pbl_ba_pg_sz;/* BT chunk page size */ 322 u32 pbl_buf_pg_sz;/* buf chunk page size */ 323 u32 pbl_hop_num;/* multi-hop number */ 324 }; 325 326 struct hns_roce_mr_table { 327 struct hns_roce_bitmap mtpt_bitmap; 328 struct hns_roce_buddy mtt_buddy; 329 struct hns_roce_hem_table mtt_table; 330 struct hns_roce_hem_table mtpt_table; 331 struct hns_roce_buddy mtt_cqe_buddy; 332 struct hns_roce_hem_table mtt_cqe_table; 333 }; 334 335 struct hns_roce_wq { 336 u64 *wrid; /* Work request ID */ 337 spinlock_t lock; 338 int wqe_cnt; /* WQE num */ 339 u32 max_post; 340 int max_gs; 341 int offset; 342 int wqe_shift;/* WQE size */ 343 u32 head; 344 u32 tail; 345 void __iomem *db_reg_l; 346 }; 347 348 struct hns_roce_sge { 349 int sge_cnt; /* SGE num */ 350 int offset; 351 int sge_shift;/* SGE size */ 352 }; 353 354 struct hns_roce_buf_list { 355 void *buf; 356 dma_addr_t map; 357 }; 358 359 struct hns_roce_buf { 360 struct hns_roce_buf_list direct; 361 struct hns_roce_buf_list *page_list; 362 int nbufs; 363 u32 npages; 364 int page_shift; 365 }; 366 367 struct hns_roce_db_pgdir { 368 struct list_head list; 369 DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE); 370 DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / 2); 371 unsigned long *bits[2]; 372 u32 *page; 373 dma_addr_t db_dma; 374 }; 375 376 struct hns_roce_user_db_page { 377 struct list_head list; 378 struct ib_umem *umem; 379 unsigned long user_virt; 380 refcount_t refcount; 381 }; 382 383 struct hns_roce_db { 384 u32 *db_record; 385 union { 386 struct hns_roce_db_pgdir *pgdir; 387 struct hns_roce_user_db_page *user_page; 388 } u; 389 dma_addr_t dma; 390 void *virt_addr; 391 int index; 392 int order; 393 }; 394 395 struct hns_roce_cq_buf { 396 struct hns_roce_buf hr_buf; 397 struct hns_roce_mtt hr_mtt; 398 }; 399 400 struct hns_roce_cq { 401 struct ib_cq ib_cq; 402 struct hns_roce_cq_buf hr_buf; 403 struct hns_roce_db db; 404 u8 db_en; 405 spinlock_t lock; 406 struct ib_umem *umem; 407 void (*comp)(struct hns_roce_cq *cq); 408 void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type); 409 410 struct hns_roce_uar *uar; 411 u32 cq_depth; 412 u32 cons_index; 413 u32 *set_ci_db; 414 void __iomem *cq_db_l; 415 u16 *tptr_addr; 416 int arm_sn; 417 unsigned long cqn; 418 u32 vector; 419 atomic_t refcount; 420 struct completion free; 421 }; 422 423 struct hns_roce_srq { 424 struct ib_srq ibsrq; 425 int srqn; 426 }; 427 428 struct hns_roce_uar_table { 429 struct hns_roce_bitmap bitmap; 430 }; 431 432 struct hns_roce_qp_table { 433 struct hns_roce_bitmap bitmap; 434 spinlock_t lock; 435 struct hns_roce_hem_table qp_table; 436 struct hns_roce_hem_table irrl_table; 437 struct hns_roce_hem_table trrl_table; 438 }; 439 440 struct hns_roce_cq_table { 441 struct hns_roce_bitmap bitmap; 442 spinlock_t lock; 443 struct radix_tree_root tree; 444 struct hns_roce_hem_table table; 445 }; 446 447 struct hns_roce_raq_table { 448 struct hns_roce_buf_list *e_raq_buf; 449 }; 450 451 struct hns_roce_av { 452 __le32 port_pd; 453 u8 gid_index; 454 u8 stat_rate; 455 u8 hop_limit; 456 __le32 sl_tclass_flowlabel; 457 u8 dgid[HNS_ROCE_GID_SIZE]; 458 u8 mac[6]; 459 __le16 vlan; 460 }; 461 462 struct hns_roce_ah { 463 struct ib_ah ibah; 464 struct hns_roce_av av; 465 }; 466 467 struct hns_roce_cmd_context { 468 struct completion done; 469 int result; 470 int next; 471 u64 out_param; 472 u16 token; 473 }; 474 475 struct hns_roce_cmdq { 476 struct dma_pool *pool; 477 struct mutex hcr_mutex; 478 struct semaphore poll_sem; 479 /* 480 * Event mode: cmd register mutex protection, 481 * ensure to not exceed max_cmds and user use limit region 482 */ 483 struct semaphore event_sem; 484 int max_cmds; 485 spinlock_t context_lock; 486 int free_head; 487 struct hns_roce_cmd_context *context; 488 /* 489 * Result of get integer part 490 * which max_comds compute according a power of 2 491 */ 492 u16 token_mask; 493 /* 494 * Process whether use event mode, init default non-zero 495 * After the event queue of cmd event ready, 496 * can switch into event mode 497 * close device, switch into poll mode(non event mode) 498 */ 499 u8 use_events; 500 u8 toggle; 501 }; 502 503 struct hns_roce_cmd_mailbox { 504 void *buf; 505 dma_addr_t dma; 506 }; 507 508 struct hns_roce_dev; 509 510 struct hns_roce_rinl_sge { 511 void *addr; 512 u32 len; 513 }; 514 515 struct hns_roce_rinl_wqe { 516 struct hns_roce_rinl_sge *sg_list; 517 u32 sge_cnt; 518 }; 519 520 struct hns_roce_rinl_buf { 521 struct hns_roce_rinl_wqe *wqe_list; 522 u32 wqe_cnt; 523 }; 524 525 struct hns_roce_qp { 526 struct ib_qp ibqp; 527 struct hns_roce_buf hr_buf; 528 struct hns_roce_wq rq; 529 struct hns_roce_db rdb; 530 struct hns_roce_db sdb; 531 u8 rdb_en; 532 u8 sdb_en; 533 u32 doorbell_qpn; 534 __le32 sq_signal_bits; 535 u32 sq_next_wqe; 536 int sq_max_wqes_per_wr; 537 int sq_spare_wqes; 538 struct hns_roce_wq sq; 539 540 struct ib_umem *umem; 541 struct hns_roce_mtt mtt; 542 u32 buff_size; 543 struct mutex mutex; 544 u8 port; 545 u8 phy_port; 546 u8 sl; 547 u8 resp_depth; 548 u8 state; 549 u32 access_flags; 550 u32 atomic_rd_en; 551 u32 pkey_index; 552 u32 qkey; 553 void (*event)(struct hns_roce_qp *qp, 554 enum hns_roce_event event_type); 555 unsigned long qpn; 556 557 atomic_t refcount; 558 struct completion free; 559 560 struct hns_roce_sge sge; 561 u32 next_sge; 562 563 struct hns_roce_rinl_buf rq_inl_buf; 564 }; 565 566 struct hns_roce_sqp { 567 struct hns_roce_qp hr_qp; 568 }; 569 570 struct hns_roce_ib_iboe { 571 spinlock_t lock; 572 struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; 573 struct notifier_block nb; 574 u8 phy_port[HNS_ROCE_MAX_PORTS]; 575 }; 576 577 enum { 578 HNS_ROCE_EQ_STAT_INVALID = 0, 579 HNS_ROCE_EQ_STAT_VALID = 2, 580 }; 581 582 struct hns_roce_ceqe { 583 u32 comp; 584 }; 585 586 struct hns_roce_aeqe { 587 __le32 asyn; 588 union { 589 struct { 590 __le32 qp; 591 u32 rsv0; 592 u32 rsv1; 593 } qp_event; 594 595 struct { 596 __le32 cq; 597 u32 rsv0; 598 u32 rsv1; 599 } cq_event; 600 601 struct { 602 __le32 ceqe; 603 u32 rsv0; 604 u32 rsv1; 605 } ce_event; 606 607 struct { 608 __le64 out_param; 609 __le16 token; 610 u8 status; 611 u8 rsv0; 612 } __packed cmd; 613 } event; 614 }; 615 616 struct hns_roce_eq { 617 struct hns_roce_dev *hr_dev; 618 void __iomem *doorbell; 619 620 int type_flag;/* Aeq:1 ceq:0 */ 621 int eqn; 622 u32 entries; 623 int log_entries; 624 int eqe_size; 625 int irq; 626 int log_page_size; 627 int cons_index; 628 struct hns_roce_buf_list *buf_list; 629 int over_ignore; 630 int coalesce; 631 int arm_st; 632 u64 eqe_ba; 633 int eqe_ba_pg_sz; 634 int eqe_buf_pg_sz; 635 int hop_num; 636 u64 *bt_l0; /* Base address table for L0 */ 637 u64 **bt_l1; /* Base address table for L1 */ 638 u64 **buf; 639 dma_addr_t l0_dma; 640 dma_addr_t *l1_dma; 641 dma_addr_t *buf_dma; 642 u32 l0_last_num; /* L0 last chunk num */ 643 u32 l1_last_num; /* L1 last chunk num */ 644 int eq_max_cnt; 645 int eq_period; 646 int shift; 647 dma_addr_t cur_eqe_ba; 648 dma_addr_t nxt_eqe_ba; 649 int event_type; 650 int sub_type; 651 }; 652 653 struct hns_roce_eq_table { 654 struct hns_roce_eq *eq; 655 void __iomem **eqc_base; /* only for hw v1 */ 656 }; 657 658 struct hns_roce_caps { 659 u8 num_ports; 660 int gid_table_len[HNS_ROCE_MAX_PORTS]; 661 int pkey_table_len[HNS_ROCE_MAX_PORTS]; 662 int local_ca_ack_delay; 663 int num_uars; 664 u32 phy_num_uars; 665 u32 max_sq_sg; /* 2 */ 666 u32 max_sq_inline; /* 32 */ 667 u32 max_rq_sg; /* 2 */ 668 int num_qps; /* 256k */ 669 u32 max_wqes; /* 16k */ 670 u32 max_sq_desc_sz; /* 64 */ 671 u32 max_rq_desc_sz; /* 64 */ 672 u32 max_srq_desc_sz; 673 int max_qp_init_rdma; 674 int max_qp_dest_rdma; 675 int num_cqs; 676 int max_cqes; 677 int min_cqes; 678 u32 min_wqes; 679 int reserved_cqs; 680 int num_aeq_vectors; /* 1 */ 681 int num_comp_vectors; 682 int num_other_vectors; 683 int num_mtpts; 684 u32 num_mtt_segs; 685 u32 num_cqe_segs; 686 int reserved_mrws; 687 int reserved_uars; 688 int num_pds; 689 int reserved_pds; 690 u32 mtt_entry_sz; 691 u32 cq_entry_sz; 692 u32 page_size_cap; 693 u32 reserved_lkey; 694 int mtpt_entry_sz; 695 int qpc_entry_sz; 696 int irrl_entry_sz; 697 int trrl_entry_sz; 698 int cqc_entry_sz; 699 u32 pbl_ba_pg_sz; 700 u32 pbl_buf_pg_sz; 701 u32 pbl_hop_num; 702 int aeqe_depth; 703 int ceqe_depth; 704 enum ib_mtu max_mtu; 705 u32 qpc_bt_num; 706 u32 srqc_bt_num; 707 u32 cqc_bt_num; 708 u32 mpt_bt_num; 709 u32 qpc_ba_pg_sz; 710 u32 qpc_buf_pg_sz; 711 u32 qpc_hop_num; 712 u32 srqc_ba_pg_sz; 713 u32 srqc_buf_pg_sz; 714 u32 srqc_hop_num; 715 u32 cqc_ba_pg_sz; 716 u32 cqc_buf_pg_sz; 717 u32 cqc_hop_num; 718 u32 mpt_ba_pg_sz; 719 u32 mpt_buf_pg_sz; 720 u32 mpt_hop_num; 721 u32 mtt_ba_pg_sz; 722 u32 mtt_buf_pg_sz; 723 u32 mtt_hop_num; 724 u32 cqe_ba_pg_sz; 725 u32 cqe_buf_pg_sz; 726 u32 cqe_hop_num; 727 u32 eqe_ba_pg_sz; 728 u32 eqe_buf_pg_sz; 729 u32 eqe_hop_num; 730 u32 sl_num; 731 u32 tsq_buf_pg_sz; 732 u32 tpq_buf_pg_sz; 733 u32 chunk_sz; /* chunk size in non multihop mode*/ 734 u64 flags; 735 }; 736 737 struct hns_roce_work { 738 struct hns_roce_dev *hr_dev; 739 struct work_struct work; 740 u32 qpn; 741 int event_type; 742 int sub_type; 743 }; 744 745 struct hns_roce_hw { 746 int (*reset)(struct hns_roce_dev *hr_dev, bool enable); 747 int (*cmq_init)(struct hns_roce_dev *hr_dev); 748 void (*cmq_exit)(struct hns_roce_dev *hr_dev); 749 int (*hw_profile)(struct hns_roce_dev *hr_dev); 750 int (*hw_init)(struct hns_roce_dev *hr_dev); 751 void (*hw_exit)(struct hns_roce_dev *hr_dev); 752 int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param, 753 u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, 754 u16 token, int event); 755 int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout); 756 int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, 757 const union ib_gid *gid, const struct ib_gid_attr *attr); 758 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); 759 void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, 760 enum ib_mtu mtu); 761 int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr, 762 unsigned long mtpt_idx); 763 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, 764 struct hns_roce_mr *mr, int flags, u32 pdn, 765 int mr_access_flags, u64 iova, u64 size, 766 void *mb_buf); 767 void (*write_cqc)(struct hns_roce_dev *hr_dev, 768 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, 769 dma_addr_t dma_handle, int nent, u32 vector); 770 int (*set_hem)(struct hns_roce_dev *hr_dev, 771 struct hns_roce_hem_table *table, int obj, int step_idx); 772 int (*clear_hem)(struct hns_roce_dev *hr_dev, 773 struct hns_roce_hem_table *table, int obj, 774 int step_idx); 775 int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 776 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 777 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 778 int attr_mask, enum ib_qp_state cur_state, 779 enum ib_qp_state new_state); 780 int (*destroy_qp)(struct ib_qp *ibqp); 781 int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, 782 const struct ib_send_wr **bad_wr); 783 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, 784 const struct ib_recv_wr **bad_recv_wr); 785 int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 786 int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 787 int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); 788 int (*destroy_cq)(struct ib_cq *ibcq); 789 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); 790 int (*init_eq)(struct hns_roce_dev *hr_dev); 791 void (*cleanup_eq)(struct hns_roce_dev *hr_dev); 792 }; 793 794 struct hns_roce_dev { 795 struct ib_device ib_dev; 796 struct platform_device *pdev; 797 struct pci_dev *pci_dev; 798 struct device *dev; 799 struct hns_roce_uar priv_uar; 800 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; 801 spinlock_t sm_lock; 802 spinlock_t bt_cmd_lock; 803 bool active; 804 bool is_reset; 805 struct hns_roce_ib_iboe iboe; 806 807 struct list_head pgdir_list; 808 struct mutex pgdir_mutex; 809 int irq[HNS_ROCE_MAX_IRQ_NUM]; 810 u8 __iomem *reg_base; 811 struct hns_roce_caps caps; 812 struct radix_tree_root qp_table_tree; 813 814 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM]; 815 u64 sys_image_guid; 816 u32 vendor_id; 817 u32 vendor_part_id; 818 u32 hw_rev; 819 void __iomem *priv_addr; 820 821 struct hns_roce_cmdq cmd; 822 struct hns_roce_bitmap pd_bitmap; 823 struct hns_roce_uar_table uar_table; 824 struct hns_roce_mr_table mr_table; 825 struct hns_roce_cq_table cq_table; 826 struct hns_roce_qp_table qp_table; 827 struct hns_roce_eq_table eq_table; 828 829 int cmd_mod; 830 int loop_idc; 831 u32 sdb_offset; 832 u32 odb_offset; 833 dma_addr_t tptr_dma_addr; /*only for hw v1*/ 834 u32 tptr_size; /*only for hw v1*/ 835 const struct hns_roce_hw *hw; 836 void *priv; 837 struct workqueue_struct *irq_workq; 838 }; 839 840 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) 841 { 842 return container_of(ib_dev, struct hns_roce_dev, ib_dev); 843 } 844 845 static inline struct hns_roce_ucontext 846 *to_hr_ucontext(struct ib_ucontext *ibucontext) 847 { 848 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext); 849 } 850 851 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) 852 { 853 return container_of(ibpd, struct hns_roce_pd, ibpd); 854 } 855 856 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah) 857 { 858 return container_of(ibah, struct hns_roce_ah, ibah); 859 } 860 861 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) 862 { 863 return container_of(ibmr, struct hns_roce_mr, ibmr); 864 } 865 866 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) 867 { 868 return container_of(ibqp, struct hns_roce_qp, ibqp); 869 } 870 871 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq) 872 { 873 return container_of(ib_cq, struct hns_roce_cq, ib_cq); 874 } 875 876 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq) 877 { 878 return container_of(ibsrq, struct hns_roce_srq, ibsrq); 879 } 880 881 static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp) 882 { 883 return container_of(hr_qp, struct hns_roce_sqp, hr_qp); 884 } 885 886 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest) 887 { 888 __raw_writeq(*(u64 *) val, dest); 889 } 890 891 static inline struct hns_roce_qp 892 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn) 893 { 894 return radix_tree_lookup(&hr_dev->qp_table_tree, 895 qpn & (hr_dev->caps.num_qps - 1)); 896 } 897 898 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) 899 { 900 u32 page_size = 1 << buf->page_shift; 901 902 if (buf->nbufs == 1) 903 return (char *)(buf->direct.buf) + offset; 904 else 905 return (char *)(buf->page_list[offset >> buf->page_shift].buf) + 906 (offset & (page_size - 1)); 907 } 908 909 int hns_roce_init_uar_table(struct hns_roce_dev *dev); 910 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar); 911 void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar); 912 void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev); 913 914 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev); 915 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev); 916 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, 917 u64 out_param); 918 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev); 919 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); 920 921 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, 922 struct hns_roce_mtt *mtt); 923 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, 924 struct hns_roce_mtt *mtt); 925 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, 926 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf); 927 928 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); 929 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); 930 int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev); 931 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); 932 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); 933 934 void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev); 935 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev); 936 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); 937 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); 938 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); 939 940 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj); 941 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, 942 int rr); 943 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask, 944 u32 reserved_bot, u32 resetrved_top); 945 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap); 946 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev); 947 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, 948 int align, unsigned long *obj); 949 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, 950 unsigned long obj, int cnt, 951 int rr); 952 953 struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, 954 struct rdma_ah_attr *ah_attr, 955 struct ib_udata *udata); 956 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 957 int hns_roce_destroy_ah(struct ib_ah *ah); 958 959 struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, 960 struct ib_ucontext *context, 961 struct ib_udata *udata); 962 int hns_roce_dealloc_pd(struct ib_pd *pd); 963 964 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); 965 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 966 u64 virt_addr, int access_flags, 967 struct ib_udata *udata); 968 int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, 969 u64 virt_addr, int mr_access_flags, struct ib_pd *pd, 970 struct ib_udata *udata); 971 int hns_roce_dereg_mr(struct ib_mr *ibmr); 972 int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, 973 struct hns_roce_cmd_mailbox *mailbox, 974 unsigned long mpt_index); 975 unsigned long key_to_hw_index(u32 key); 976 977 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, 978 struct hns_roce_buf *buf); 979 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, 980 struct hns_roce_buf *buf, u32 page_shift); 981 982 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, 983 struct hns_roce_mtt *mtt, struct ib_umem *umem); 984 985 struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, 986 struct ib_qp_init_attr *init_attr, 987 struct ib_udata *udata); 988 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 989 int attr_mask, struct ib_udata *udata); 990 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); 991 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); 992 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n); 993 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, 994 struct ib_cq *ib_cq); 995 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); 996 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, 997 struct hns_roce_cq *recv_cq); 998 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 999 struct hns_roce_cq *recv_cq); 1000 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 1001 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 1002 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, 1003 int cnt); 1004 __be32 send_ieth(const struct ib_send_wr *wr); 1005 int to_hr_qp_type(int qp_type); 1006 1007 struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, 1008 const struct ib_cq_init_attr *attr, 1009 struct ib_ucontext *context, 1010 struct ib_udata *udata); 1011 1012 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq); 1013 void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq); 1014 1015 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, 1016 struct hns_roce_db *db); 1017 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context, 1018 struct hns_roce_db *db); 1019 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, 1020 int order); 1021 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db); 1022 1023 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); 1024 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); 1025 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); 1026 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); 1027 int hns_roce_init(struct hns_roce_dev *hr_dev); 1028 void hns_roce_exit(struct hns_roce_dev *hr_dev); 1029 1030 #endif /* _HNS_ROCE_DEVICE_H */ 1031