1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _HNS_ROCE_DEVICE_H 34 #define _HNS_ROCE_DEVICE_H 35 36 #include <rdma/ib_verbs.h> 37 38 #define DRV_NAME "hns_roce" 39 40 #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') 41 42 #define MAC_ADDR_OCTET_NUM 6 43 #define HNS_ROCE_MAX_MSG_LEN 0x80000000 44 45 #define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) 46 47 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 48 49 #define HNS_ROCE_BA_SIZE (32 * 4096) 50 51 /* Hardware specification only for v1 engine */ 52 #define HNS_ROCE_MIN_CQE_NUM 0x40 53 #define HNS_ROCE_MIN_WQE_NUM 0x20 54 55 /* Hardware specification only for v1 engine */ 56 #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 57 #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 58 59 #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 60 #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ 61 (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS) 62 #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 63 #define HNS_ROCE_MIN_CQE_CNT 16 64 65 #define HNS_ROCE_MAX_IRQ_NUM 128 66 67 #define EQ_ENABLE 1 68 #define EQ_DISABLE 0 69 70 #define HNS_ROCE_CEQ 0 71 #define HNS_ROCE_AEQ 1 72 73 #define HNS_ROCE_CEQ_ENTRY_SIZE 0x4 74 #define HNS_ROCE_AEQ_ENTRY_SIZE 0x10 75 76 /* 4G/4K = 1M */ 77 #define HNS_ROCE_SL_SHIFT 28 78 #define HNS_ROCE_TCLASS_SHIFT 20 79 #define HNS_ROCE_FLOW_LABLE_MASK 0xfffff 80 81 #define HNS_ROCE_MAX_PORTS 6 82 #define HNS_ROCE_MAX_GID_NUM 16 83 #define HNS_ROCE_GID_SIZE 16 84 85 #define HNS_ROCE_HOP_NUM_0 0xff 86 87 #define BITMAP_NO_RR 0 88 #define BITMAP_RR 1 89 90 #define MR_TYPE_MR 0x00 91 #define MR_TYPE_DMA 0x03 92 93 #define PKEY_ID 0xffff 94 #define GUID_LEN 8 95 #define NODE_DESC_SIZE 64 96 #define DB_REG_OFFSET 0x1000 97 98 #define SERV_TYPE_RC 0 99 #define SERV_TYPE_RD 1 100 #define SERV_TYPE_UC 2 101 #define SERV_TYPE_UD 3 102 103 #define PAGES_SHIFT_8 8 104 #define PAGES_SHIFT_16 16 105 #define PAGES_SHIFT_24 24 106 #define PAGES_SHIFT_32 32 107 108 enum hns_roce_qp_state { 109 HNS_ROCE_QP_STATE_RST, 110 HNS_ROCE_QP_STATE_INIT, 111 HNS_ROCE_QP_STATE_RTR, 112 HNS_ROCE_QP_STATE_RTS, 113 HNS_ROCE_QP_STATE_SQD, 114 HNS_ROCE_QP_STATE_ERR, 115 HNS_ROCE_QP_NUM_STATE, 116 }; 117 118 enum hns_roce_event { 119 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, 120 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, 121 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03, 122 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04, 123 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 124 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06, 125 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07, 126 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08, 127 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09, 128 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a, 129 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b, 130 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c, 131 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d, 132 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f, 133 /* 0x10 and 0x11 is unused in currently application case */ 134 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12, 135 HNS_ROCE_EVENT_TYPE_MB = 0x13, 136 HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14, 137 HNS_ROCE_EVENT_TYPE_FLR = 0x15, 138 }; 139 140 /* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */ 141 enum { 142 HNS_ROCE_LWQCE_QPC_ERROR = 1, 143 HNS_ROCE_LWQCE_MTU_ERROR = 2, 144 HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3, 145 HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4, 146 HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5, 147 HNS_ROCE_LWQCE_SL_ERROR = 6, 148 HNS_ROCE_LWQCE_PORT_ERROR = 7, 149 }; 150 151 /* Local Access Violation Work Queue Error,SUBTYPE 0x7 */ 152 enum { 153 HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1, 154 HNS_ROCE_LAVWQE_LENGTH_ERROR = 2, 155 HNS_ROCE_LAVWQE_VA_ERROR = 3, 156 HNS_ROCE_LAVWQE_PD_ERROR = 4, 157 HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5, 158 HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6, 159 HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7, 160 }; 161 162 /* DOORBELL overflow subtype */ 163 enum { 164 HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1, 165 HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2, 166 HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3, 167 HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4, 168 HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5, 169 HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6, 170 }; 171 172 enum { 173 /* RQ&SRQ related operations */ 174 HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06, 175 HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07, 176 }; 177 178 enum { 179 HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0), 180 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1), 181 HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2) 182 }; 183 184 enum hns_roce_mtt_type { 185 MTT_TYPE_WQE, 186 MTT_TYPE_CQE, 187 }; 188 189 #define HNS_ROCE_CMD_SUCCESS 1 190 191 #define HNS_ROCE_PORT_DOWN 0 192 #define HNS_ROCE_PORT_UP 1 193 194 #define HNS_ROCE_MTT_ENTRY_PER_SEG 8 195 196 #define PAGE_ADDR_SHIFT 12 197 198 struct hns_roce_uar { 199 u64 pfn; 200 unsigned long index; 201 }; 202 203 struct hns_roce_ucontext { 204 struct ib_ucontext ibucontext; 205 struct hns_roce_uar uar; 206 }; 207 208 struct hns_roce_pd { 209 struct ib_pd ibpd; 210 unsigned long pdn; 211 }; 212 213 struct hns_roce_bitmap { 214 /* Bitmap Traversal last a bit which is 1 */ 215 unsigned long last; 216 unsigned long top; 217 unsigned long max; 218 unsigned long reserved_top; 219 unsigned long mask; 220 spinlock_t lock; 221 unsigned long *table; 222 }; 223 224 /* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */ 225 /* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */ 226 /* Every bit repesent to a partner free/used status in bitmap */ 227 /* 228 * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1 229 * Bit = 1 represent to idle and available; bit = 0: not available 230 */ 231 struct hns_roce_buddy { 232 /* Members point to every order level bitmap */ 233 unsigned long **bits; 234 /* Represent to avail bits of the order level bitmap */ 235 u32 *num_free; 236 int max_order; 237 spinlock_t lock; 238 }; 239 240 /* For Hardware Entry Memory */ 241 struct hns_roce_hem_table { 242 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */ 243 u32 type; 244 /* HEM array elment num */ 245 unsigned long num_hem; 246 /* HEM entry record obj total num */ 247 unsigned long num_obj; 248 /*Single obj size */ 249 unsigned long obj_size; 250 unsigned long table_chunk_size; 251 int lowmem; 252 struct mutex mutex; 253 struct hns_roce_hem **hem; 254 u64 **bt_l1; 255 dma_addr_t *bt_l1_dma_addr; 256 u64 **bt_l0; 257 dma_addr_t *bt_l0_dma_addr; 258 }; 259 260 struct hns_roce_mtt { 261 unsigned long first_seg; 262 int order; 263 int page_shift; 264 enum hns_roce_mtt_type mtt_type; 265 }; 266 267 /* Only support 4K page size for mr register */ 268 #define MR_SIZE_4K 0 269 270 struct hns_roce_mr { 271 struct ib_mr ibmr; 272 struct ib_umem *umem; 273 u64 iova; /* MR's virtual orignal addr */ 274 u64 size; /* Address range of MR */ 275 u32 key; /* Key of MR */ 276 u32 pd; /* PD num of MR */ 277 u32 access;/* Access permission of MR */ 278 int enabled; /* MR's active status */ 279 int type; /* MR's register type */ 280 u64 *pbl_buf;/* MR's PBL space */ 281 dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ 282 u32 pbl_size;/* PA number in the PBL */ 283 u64 pbl_ba;/* page table address */ 284 u32 l0_chunk_last_num;/* L0 last number */ 285 u32 l1_chunk_last_num;/* L1 last number */ 286 u64 **pbl_bt_l2;/* PBL BT L2 */ 287 u64 **pbl_bt_l1;/* PBL BT L1 */ 288 u64 *pbl_bt_l0;/* PBL BT L0 */ 289 dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */ 290 dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */ 291 dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */ 292 u32 pbl_ba_pg_sz;/* BT chunk page size */ 293 u32 pbl_buf_pg_sz;/* buf chunk page size */ 294 u32 pbl_hop_num;/* multi-hop number */ 295 }; 296 297 struct hns_roce_mr_table { 298 struct hns_roce_bitmap mtpt_bitmap; 299 struct hns_roce_buddy mtt_buddy; 300 struct hns_roce_hem_table mtt_table; 301 struct hns_roce_hem_table mtpt_table; 302 struct hns_roce_buddy mtt_cqe_buddy; 303 struct hns_roce_hem_table mtt_cqe_table; 304 }; 305 306 struct hns_roce_wq { 307 u64 *wrid; /* Work request ID */ 308 spinlock_t lock; 309 int wqe_cnt; /* WQE num */ 310 u32 max_post; 311 int max_gs; 312 int offset; 313 int wqe_shift;/* WQE size */ 314 u32 head; 315 u32 tail; 316 void __iomem *db_reg_l; 317 }; 318 319 struct hns_roce_sge { 320 int sge_cnt; /* SGE num */ 321 int offset; 322 int sge_shift;/* SGE size */ 323 }; 324 325 struct hns_roce_buf_list { 326 void *buf; 327 dma_addr_t map; 328 }; 329 330 struct hns_roce_buf { 331 struct hns_roce_buf_list direct; 332 struct hns_roce_buf_list *page_list; 333 int nbufs; 334 u32 npages; 335 int page_shift; 336 }; 337 338 struct hns_roce_cq_buf { 339 struct hns_roce_buf hr_buf; 340 struct hns_roce_mtt hr_mtt; 341 }; 342 343 struct hns_roce_cq { 344 struct ib_cq ib_cq; 345 struct hns_roce_cq_buf hr_buf; 346 spinlock_t lock; 347 struct ib_umem *umem; 348 void (*comp)(struct hns_roce_cq *cq); 349 void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type); 350 351 struct hns_roce_uar *uar; 352 u32 cq_depth; 353 u32 cons_index; 354 void __iomem *cq_db_l; 355 u16 *tptr_addr; 356 int arm_sn; 357 unsigned long cqn; 358 u32 vector; 359 atomic_t refcount; 360 struct completion free; 361 }; 362 363 struct hns_roce_srq { 364 struct ib_srq ibsrq; 365 int srqn; 366 }; 367 368 struct hns_roce_uar_table { 369 struct hns_roce_bitmap bitmap; 370 }; 371 372 struct hns_roce_qp_table { 373 struct hns_roce_bitmap bitmap; 374 spinlock_t lock; 375 struct hns_roce_hem_table qp_table; 376 struct hns_roce_hem_table irrl_table; 377 struct hns_roce_hem_table trrl_table; 378 }; 379 380 struct hns_roce_cq_table { 381 struct hns_roce_bitmap bitmap; 382 spinlock_t lock; 383 struct radix_tree_root tree; 384 struct hns_roce_hem_table table; 385 }; 386 387 struct hns_roce_raq_table { 388 struct hns_roce_buf_list *e_raq_buf; 389 }; 390 391 struct hns_roce_av { 392 __le32 port_pd; 393 u8 gid_index; 394 u8 stat_rate; 395 u8 hop_limit; 396 __le32 sl_tclass_flowlabel; 397 u8 dgid[HNS_ROCE_GID_SIZE]; 398 u8 mac[6]; 399 __le16 vlan; 400 }; 401 402 struct hns_roce_ah { 403 struct ib_ah ibah; 404 struct hns_roce_av av; 405 }; 406 407 struct hns_roce_cmd_context { 408 struct completion done; 409 int result; 410 int next; 411 u64 out_param; 412 u16 token; 413 }; 414 415 struct hns_roce_cmdq { 416 struct dma_pool *pool; 417 struct mutex hcr_mutex; 418 struct semaphore poll_sem; 419 /* 420 * Event mode: cmd register mutex protection, 421 * ensure to not exceed max_cmds and user use limit region 422 */ 423 struct semaphore event_sem; 424 int max_cmds; 425 spinlock_t context_lock; 426 int free_head; 427 struct hns_roce_cmd_context *context; 428 /* 429 * Result of get integer part 430 * which max_comds compute according a power of 2 431 */ 432 u16 token_mask; 433 /* 434 * Process whether use event mode, init default non-zero 435 * After the event queue of cmd event ready, 436 * can switch into event mode 437 * close device, switch into poll mode(non event mode) 438 */ 439 u8 use_events; 440 u8 toggle; 441 }; 442 443 struct hns_roce_cmd_mailbox { 444 void *buf; 445 dma_addr_t dma; 446 }; 447 448 struct hns_roce_dev; 449 450 struct hns_roce_rinl_sge { 451 void *addr; 452 u32 len; 453 }; 454 455 struct hns_roce_rinl_wqe { 456 struct hns_roce_rinl_sge *sg_list; 457 u32 sge_cnt; 458 }; 459 460 struct hns_roce_rinl_buf { 461 struct hns_roce_rinl_wqe *wqe_list; 462 u32 wqe_cnt; 463 }; 464 465 struct hns_roce_qp { 466 struct ib_qp ibqp; 467 struct hns_roce_buf hr_buf; 468 struct hns_roce_wq rq; 469 u32 doorbell_qpn; 470 __le32 sq_signal_bits; 471 u32 sq_next_wqe; 472 int sq_max_wqes_per_wr; 473 int sq_spare_wqes; 474 struct hns_roce_wq sq; 475 476 struct ib_umem *umem; 477 struct hns_roce_mtt mtt; 478 u32 buff_size; 479 struct mutex mutex; 480 u8 port; 481 u8 phy_port; 482 u8 sl; 483 u8 resp_depth; 484 u8 state; 485 u32 access_flags; 486 u32 atomic_rd_en; 487 u32 pkey_index; 488 u32 qkey; 489 void (*event)(struct hns_roce_qp *qp, 490 enum hns_roce_event event_type); 491 unsigned long qpn; 492 493 atomic_t refcount; 494 struct completion free; 495 496 struct hns_roce_sge sge; 497 u32 next_sge; 498 499 struct hns_roce_rinl_buf rq_inl_buf; 500 }; 501 502 struct hns_roce_sqp { 503 struct hns_roce_qp hr_qp; 504 }; 505 506 struct hns_roce_ib_iboe { 507 spinlock_t lock; 508 struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; 509 struct notifier_block nb; 510 u8 phy_port[HNS_ROCE_MAX_PORTS]; 511 }; 512 513 enum { 514 HNS_ROCE_EQ_STAT_INVALID = 0, 515 HNS_ROCE_EQ_STAT_VALID = 2, 516 }; 517 518 struct hns_roce_ceqe { 519 u32 comp; 520 }; 521 522 struct hns_roce_aeqe { 523 u32 asyn; 524 union { 525 struct { 526 u32 qp; 527 u32 rsv0; 528 u32 rsv1; 529 } qp_event; 530 531 struct { 532 u32 cq; 533 u32 rsv0; 534 u32 rsv1; 535 } cq_event; 536 537 struct { 538 u32 ceqe; 539 u32 rsv0; 540 u32 rsv1; 541 } ce_event; 542 543 struct { 544 __le64 out_param; 545 __le16 token; 546 u8 status; 547 u8 rsv0; 548 } __packed cmd; 549 } event; 550 }; 551 552 struct hns_roce_eq { 553 struct hns_roce_dev *hr_dev; 554 void __iomem *doorbell; 555 556 int type_flag;/* Aeq:1 ceq:0 */ 557 int eqn; 558 u32 entries; 559 int log_entries; 560 int eqe_size; 561 int irq; 562 int log_page_size; 563 int cons_index; 564 struct hns_roce_buf_list *buf_list; 565 int over_ignore; 566 int coalesce; 567 int arm_st; 568 u64 eqe_ba; 569 int eqe_ba_pg_sz; 570 int eqe_buf_pg_sz; 571 int hop_num; 572 u64 *bt_l0; /* Base address table for L0 */ 573 u64 **bt_l1; /* Base address table for L1 */ 574 u64 **buf; 575 dma_addr_t l0_dma; 576 dma_addr_t *l1_dma; 577 dma_addr_t *buf_dma; 578 u32 l0_last_num; /* L0 last chunk num */ 579 u32 l1_last_num; /* L1 last chunk num */ 580 int eq_max_cnt; 581 int eq_period; 582 int shift; 583 dma_addr_t cur_eqe_ba; 584 dma_addr_t nxt_eqe_ba; 585 }; 586 587 struct hns_roce_eq_table { 588 struct hns_roce_eq *eq; 589 void __iomem **eqc_base; /* only for hw v1 */ 590 }; 591 592 struct hns_roce_caps { 593 u8 num_ports; 594 int gid_table_len[HNS_ROCE_MAX_PORTS]; 595 int pkey_table_len[HNS_ROCE_MAX_PORTS]; 596 int local_ca_ack_delay; 597 int num_uars; 598 u32 phy_num_uars; 599 u32 max_sq_sg; /* 2 */ 600 u32 max_sq_inline; /* 32 */ 601 u32 max_rq_sg; /* 2 */ 602 int num_qps; /* 256k */ 603 u32 max_wqes; /* 16k */ 604 u32 max_sq_desc_sz; /* 64 */ 605 u32 max_rq_desc_sz; /* 64 */ 606 u32 max_srq_desc_sz; 607 int max_qp_init_rdma; 608 int max_qp_dest_rdma; 609 int num_cqs; 610 int max_cqes; 611 int min_cqes; 612 u32 min_wqes; 613 int reserved_cqs; 614 int num_aeq_vectors; /* 1 */ 615 int num_comp_vectors; 616 int num_other_vectors; 617 int num_mtpts; 618 u32 num_mtt_segs; 619 u32 num_cqe_segs; 620 int reserved_mrws; 621 int reserved_uars; 622 int num_pds; 623 int reserved_pds; 624 u32 mtt_entry_sz; 625 u32 cq_entry_sz; 626 u32 page_size_cap; 627 u32 reserved_lkey; 628 int mtpt_entry_sz; 629 int qpc_entry_sz; 630 int irrl_entry_sz; 631 int trrl_entry_sz; 632 int cqc_entry_sz; 633 u32 pbl_ba_pg_sz; 634 u32 pbl_buf_pg_sz; 635 u32 pbl_hop_num; 636 int aeqe_depth; 637 int ceqe_depth; 638 enum ib_mtu max_mtu; 639 u32 qpc_bt_num; 640 u32 srqc_bt_num; 641 u32 cqc_bt_num; 642 u32 mpt_bt_num; 643 u32 qpc_ba_pg_sz; 644 u32 qpc_buf_pg_sz; 645 u32 qpc_hop_num; 646 u32 srqc_ba_pg_sz; 647 u32 srqc_buf_pg_sz; 648 u32 srqc_hop_num; 649 u32 cqc_ba_pg_sz; 650 u32 cqc_buf_pg_sz; 651 u32 cqc_hop_num; 652 u32 mpt_ba_pg_sz; 653 u32 mpt_buf_pg_sz; 654 u32 mpt_hop_num; 655 u32 mtt_ba_pg_sz; 656 u32 mtt_buf_pg_sz; 657 u32 mtt_hop_num; 658 u32 cqe_ba_pg_sz; 659 u32 cqe_buf_pg_sz; 660 u32 cqe_hop_num; 661 u32 eqe_ba_pg_sz; 662 u32 eqe_buf_pg_sz; 663 u32 eqe_hop_num; 664 u32 chunk_sz; /* chunk size in non multihop mode*/ 665 u64 flags; 666 }; 667 668 struct hns_roce_hw { 669 int (*reset)(struct hns_roce_dev *hr_dev, bool enable); 670 int (*cmq_init)(struct hns_roce_dev *hr_dev); 671 void (*cmq_exit)(struct hns_roce_dev *hr_dev); 672 int (*hw_profile)(struct hns_roce_dev *hr_dev); 673 int (*hw_init)(struct hns_roce_dev *hr_dev); 674 void (*hw_exit)(struct hns_roce_dev *hr_dev); 675 int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param, 676 u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, 677 u16 token, int event); 678 int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout); 679 int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, 680 union ib_gid *gid, const struct ib_gid_attr *attr); 681 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); 682 void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, 683 enum ib_mtu mtu); 684 int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr, 685 unsigned long mtpt_idx); 686 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, 687 struct hns_roce_mr *mr, int flags, u32 pdn, 688 int mr_access_flags, u64 iova, u64 size, 689 void *mb_buf); 690 void (*write_cqc)(struct hns_roce_dev *hr_dev, 691 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, 692 dma_addr_t dma_handle, int nent, u32 vector); 693 int (*set_hem)(struct hns_roce_dev *hr_dev, 694 struct hns_roce_hem_table *table, int obj, int step_idx); 695 int (*clear_hem)(struct hns_roce_dev *hr_dev, 696 struct hns_roce_hem_table *table, int obj, 697 int step_idx); 698 int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 699 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 700 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 701 int attr_mask, enum ib_qp_state cur_state, 702 enum ib_qp_state new_state); 703 int (*destroy_qp)(struct ib_qp *ibqp); 704 int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr, 705 struct ib_send_wr **bad_wr); 706 int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr, 707 struct ib_recv_wr **bad_recv_wr); 708 int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 709 int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 710 int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); 711 int (*destroy_cq)(struct ib_cq *ibcq); 712 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); 713 int (*init_eq)(struct hns_roce_dev *hr_dev); 714 void (*cleanup_eq)(struct hns_roce_dev *hr_dev); 715 }; 716 717 struct hns_roce_dev { 718 struct ib_device ib_dev; 719 struct platform_device *pdev; 720 struct pci_dev *pci_dev; 721 struct device *dev; 722 struct hns_roce_uar priv_uar; 723 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; 724 spinlock_t sm_lock; 725 spinlock_t bt_cmd_lock; 726 struct hns_roce_ib_iboe iboe; 727 728 int irq[HNS_ROCE_MAX_IRQ_NUM]; 729 u8 __iomem *reg_base; 730 struct hns_roce_caps caps; 731 struct radix_tree_root qp_table_tree; 732 733 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM]; 734 u64 sys_image_guid; 735 u32 vendor_id; 736 u32 vendor_part_id; 737 u32 hw_rev; 738 void __iomem *priv_addr; 739 740 struct hns_roce_cmdq cmd; 741 struct hns_roce_bitmap pd_bitmap; 742 struct hns_roce_uar_table uar_table; 743 struct hns_roce_mr_table mr_table; 744 struct hns_roce_cq_table cq_table; 745 struct hns_roce_qp_table qp_table; 746 struct hns_roce_eq_table eq_table; 747 748 int cmd_mod; 749 int loop_idc; 750 u32 sdb_offset; 751 u32 odb_offset; 752 dma_addr_t tptr_dma_addr; /*only for hw v1*/ 753 u32 tptr_size; /*only for hw v1*/ 754 const struct hns_roce_hw *hw; 755 void *priv; 756 }; 757 758 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) 759 { 760 return container_of(ib_dev, struct hns_roce_dev, ib_dev); 761 } 762 763 static inline struct hns_roce_ucontext 764 *to_hr_ucontext(struct ib_ucontext *ibucontext) 765 { 766 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext); 767 } 768 769 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) 770 { 771 return container_of(ibpd, struct hns_roce_pd, ibpd); 772 } 773 774 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah) 775 { 776 return container_of(ibah, struct hns_roce_ah, ibah); 777 } 778 779 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) 780 { 781 return container_of(ibmr, struct hns_roce_mr, ibmr); 782 } 783 784 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) 785 { 786 return container_of(ibqp, struct hns_roce_qp, ibqp); 787 } 788 789 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq) 790 { 791 return container_of(ib_cq, struct hns_roce_cq, ib_cq); 792 } 793 794 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq) 795 { 796 return container_of(ibsrq, struct hns_roce_srq, ibsrq); 797 } 798 799 static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp) 800 { 801 return container_of(hr_qp, struct hns_roce_sqp, hr_qp); 802 } 803 804 static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest) 805 { 806 __raw_writeq(*(u64 *) val, dest); 807 } 808 809 static inline struct hns_roce_qp 810 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn) 811 { 812 return radix_tree_lookup(&hr_dev->qp_table_tree, 813 qpn & (hr_dev->caps.num_qps - 1)); 814 } 815 816 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) 817 { 818 u32 page_size = 1 << buf->page_shift; 819 820 if (buf->nbufs == 1) 821 return (char *)(buf->direct.buf) + offset; 822 else 823 return (char *)(buf->page_list[offset >> buf->page_shift].buf) + 824 (offset & (page_size - 1)); 825 } 826 827 int hns_roce_init_uar_table(struct hns_roce_dev *dev); 828 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar); 829 void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar); 830 void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev); 831 832 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev); 833 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev); 834 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, 835 u64 out_param); 836 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev); 837 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); 838 839 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, 840 struct hns_roce_mtt *mtt); 841 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, 842 struct hns_roce_mtt *mtt); 843 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, 844 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf); 845 846 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); 847 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); 848 int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev); 849 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); 850 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); 851 852 void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev); 853 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev); 854 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); 855 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); 856 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); 857 858 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj); 859 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, 860 int rr); 861 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask, 862 u32 reserved_bot, u32 resetrved_top); 863 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap); 864 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev); 865 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, 866 int align, unsigned long *obj); 867 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, 868 unsigned long obj, int cnt, 869 int rr); 870 871 struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, 872 struct rdma_ah_attr *ah_attr, 873 struct ib_udata *udata); 874 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 875 int hns_roce_destroy_ah(struct ib_ah *ah); 876 877 struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, 878 struct ib_ucontext *context, 879 struct ib_udata *udata); 880 int hns_roce_dealloc_pd(struct ib_pd *pd); 881 882 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); 883 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 884 u64 virt_addr, int access_flags, 885 struct ib_udata *udata); 886 int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, 887 u64 virt_addr, int mr_access_flags, struct ib_pd *pd, 888 struct ib_udata *udata); 889 int hns_roce_dereg_mr(struct ib_mr *ibmr); 890 int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, 891 struct hns_roce_cmd_mailbox *mailbox, 892 unsigned long mpt_index); 893 unsigned long key_to_hw_index(u32 key); 894 895 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, 896 struct hns_roce_buf *buf); 897 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, 898 struct hns_roce_buf *buf, u32 page_shift); 899 900 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, 901 struct hns_roce_mtt *mtt, struct ib_umem *umem); 902 903 struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, 904 struct ib_qp_init_attr *init_attr, 905 struct ib_udata *udata); 906 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 907 int attr_mask, struct ib_udata *udata); 908 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); 909 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); 910 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n); 911 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, 912 struct ib_cq *ib_cq); 913 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); 914 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, 915 struct hns_roce_cq *recv_cq); 916 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 917 struct hns_roce_cq *recv_cq); 918 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 919 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 920 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, 921 int cnt); 922 __be32 send_ieth(struct ib_send_wr *wr); 923 int to_hr_qp_type(int qp_type); 924 925 struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, 926 const struct ib_cq_init_attr *attr, 927 struct ib_ucontext *context, 928 struct ib_udata *udata); 929 930 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq); 931 void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq); 932 933 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); 934 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); 935 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); 936 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); 937 int hns_roce_init(struct hns_roce_dev *hr_dev); 938 void hns_roce_exit(struct hns_roce_dev *hr_dev); 939 940 #endif /* _HNS_ROCE_DEVICE_H */ 941