1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Huawei HiNIC PCI Express Linux driver 4 * Copyright(c) 2017 Huawei Technologies Co., Ltd 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/pci.h> 10 #include <linux/device.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/semaphore.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/io.h> 16 #include <linux/err.h> 17 18 #include "hinic_hw_dev.h" 19 #include "hinic_hw_if.h" 20 #include "hinic_hw_eqs.h" 21 #include "hinic_hw_wqe.h" 22 #include "hinic_hw_wq.h" 23 #include "hinic_hw_cmdq.h" 24 #include "hinic_hw_qp_ctxt.h" 25 #include "hinic_hw_qp.h" 26 #include "hinic_hw_io.h" 27 28 #define CI_Q_ADDR_SIZE sizeof(u32) 29 30 #define CI_ADDR(base_addr, q_id) ((base_addr) + \ 31 (q_id) * CI_Q_ADDR_SIZE) 32 33 #define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE) 34 35 #define DB_IDX(db, db_base) \ 36 (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE) 37 38 #define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) 39 40 enum io_cmd { 41 IO_CMD_MODIFY_QUEUE_CTXT = 0, 42 IO_CMD_CLEAN_QUEUE_CTXT, 43 }; 44 45 static void init_db_area_idx(struct hinic_free_db_area *free_db_area) 46 { 47 int i; 48 49 for (i = 0; i < HINIC_DB_MAX_AREAS; i++) 50 free_db_area->db_idx[i] = i; 51 52 free_db_area->alloc_pos = 0; 53 free_db_area->return_pos = HINIC_DB_MAX_AREAS; 54 55 free_db_area->num_free = HINIC_DB_MAX_AREAS; 56 57 sema_init(&free_db_area->idx_lock, 1); 58 } 59 60 static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io) 61 { 62 struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; 63 int pos, idx; 64 65 down(&free_db_area->idx_lock); 66 67 free_db_area->num_free--; 68 69 if (free_db_area->num_free < 0) { 70 free_db_area->num_free++; 71 up(&free_db_area->idx_lock); 72 return ERR_PTR(-ENOMEM); 73 } 74 75 pos = free_db_area->alloc_pos++; 76 pos &= HINIC_DB_MAX_AREAS - 1; 77 78 idx = free_db_area->db_idx[pos]; 79 80 free_db_area->db_idx[pos] = -1; 81 82 up(&free_db_area->idx_lock); 83 84 return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE; 85 } 86 87 static void return_db_area(struct hinic_func_to_io *func_to_io, 88 void __iomem *db_base) 89 { 90 struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; 91 int pos, idx = DB_IDX(db_base, func_to_io->db_base); 92 93 down(&free_db_area->idx_lock); 94 95 pos = free_db_area->return_pos++; 96 pos &= HINIC_DB_MAX_AREAS - 1; 97 98 free_db_area->db_idx[pos] = idx; 99 100 free_db_area->num_free++; 101 102 up(&free_db_area->idx_lock); 103 } 104 105 static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, 106 u16 num_sqs) 107 { 108 struct hinic_hwif *hwif = func_to_io->hwif; 109 struct hinic_sq_ctxt_block *sq_ctxt_block; 110 struct pci_dev *pdev = hwif->pdev; 111 struct hinic_cmdq_buf cmdq_buf; 112 struct hinic_sq_ctxt *sq_ctxt; 113 struct hinic_qp *qp; 114 u64 out_param; 115 int err, i; 116 117 err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 118 if (err) { 119 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); 120 return err; 121 } 122 123 sq_ctxt_block = cmdq_buf.buf; 124 sq_ctxt = sq_ctxt_block->sq_ctxt; 125 126 hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ, 127 num_sqs, func_to_io->max_qps); 128 for (i = 0; i < num_sqs; i++) { 129 qp = &func_to_io->qps[i]; 130 131 hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq, 132 base_qpn + qp->q_id); 133 } 134 135 cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs); 136 137 err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, 138 IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, 139 &out_param); 140 if ((err) || (out_param != 0)) { 141 dev_err(&pdev->dev, "Failed to set SQ ctxts\n"); 142 err = -EFAULT; 143 } 144 145 hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 146 return err; 147 } 148 149 static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, 150 u16 num_rqs) 151 { 152 struct hinic_hwif *hwif = func_to_io->hwif; 153 struct hinic_rq_ctxt_block *rq_ctxt_block; 154 struct pci_dev *pdev = hwif->pdev; 155 struct hinic_cmdq_buf cmdq_buf; 156 struct hinic_rq_ctxt *rq_ctxt; 157 struct hinic_qp *qp; 158 u64 out_param; 159 int err, i; 160 161 err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 162 if (err) { 163 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); 164 return err; 165 } 166 167 rq_ctxt_block = cmdq_buf.buf; 168 rq_ctxt = rq_ctxt_block->rq_ctxt; 169 170 hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ, 171 num_rqs, func_to_io->max_qps); 172 for (i = 0; i < num_rqs; i++) { 173 qp = &func_to_io->qps[i]; 174 175 hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq, 176 base_qpn + qp->q_id); 177 } 178 179 cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs); 180 181 err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, 182 IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, 183 &out_param); 184 if ((err) || (out_param != 0)) { 185 dev_err(&pdev->dev, "Failed to set RQ ctxts\n"); 186 err = -EFAULT; 187 } 188 189 hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 190 return err; 191 } 192 193 /** 194 * write_qp_ctxts - write the qp ctxt to HW 195 * @func_to_io: func to io channel that holds the IO components 196 * @base_qpn: first qp number 197 * @num_qps: number of qps to write 198 * 199 * Return 0 - Success, negative - Failure 200 **/ 201 static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, 202 u16 num_qps) 203 { 204 return (write_sq_ctxts(func_to_io, base_qpn, num_qps) || 205 write_rq_ctxts(func_to_io, base_qpn, num_qps)); 206 } 207 208 static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io, 209 enum hinic_qp_ctxt_type ctxt_type) 210 { 211 struct hinic_hwif *hwif = func_to_io->hwif; 212 struct hinic_clean_queue_ctxt *ctxt_block; 213 struct pci_dev *pdev = hwif->pdev; 214 struct hinic_cmdq_buf cmdq_buf; 215 u64 out_param = 0; 216 int err; 217 218 err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 219 if (err) { 220 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); 221 return err; 222 } 223 224 ctxt_block = cmdq_buf.buf; 225 ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps; 226 ctxt_block->cmdq_hdr.queue_type = ctxt_type; 227 ctxt_block->cmdq_hdr.addr_offset = 0; 228 229 /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */ 230 ctxt_block->ctxt_size = 0x3; 231 232 hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); 233 234 cmdq_buf.size = sizeof(*ctxt_block); 235 236 err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, 237 IO_CMD_CLEAN_QUEUE_CTXT, 238 &cmdq_buf, &out_param); 239 240 if (err || out_param) { 241 dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n", 242 err, out_param); 243 244 err = -EFAULT; 245 } 246 247 hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 248 249 return err; 250 } 251 252 static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io) 253 { 254 /* clean LRO/TSO context space */ 255 return (hinic_clean_queue_offload_ctxt(func_to_io, 256 HINIC_QP_CTXT_TYPE_SQ) || 257 hinic_clean_queue_offload_ctxt(func_to_io, 258 HINIC_QP_CTXT_TYPE_RQ)); 259 } 260 261 /** 262 * init_qp - Initialize a Queue Pair 263 * @func_to_io: func to io channel that holds the IO components 264 * @qp: pointer to the qp to initialize 265 * @q_id: the id of the qp 266 * @sq_msix_entry: msix entry for sq 267 * @rq_msix_entry: msix entry for rq 268 * 269 * Return 0 - Success, negative - Failure 270 **/ 271 static int init_qp(struct hinic_func_to_io *func_to_io, 272 struct hinic_qp *qp, int q_id, 273 struct msix_entry *sq_msix_entry, 274 struct msix_entry *rq_msix_entry) 275 { 276 struct hinic_hwif *hwif = func_to_io->hwif; 277 struct pci_dev *pdev = hwif->pdev; 278 void __iomem *db_base; 279 int err; 280 281 qp->q_id = q_id; 282 283 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], 284 HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE, 285 func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE); 286 if (err) { 287 dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n"); 288 return err; 289 } 290 291 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], 292 HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE, 293 func_to_io->rq_depth, HINIC_RQ_WQE_SIZE); 294 if (err) { 295 dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n"); 296 goto err_rq_alloc; 297 } 298 299 db_base = get_db_area(func_to_io); 300 if (IS_ERR(db_base)) { 301 dev_err(&pdev->dev, "Failed to get DB area for SQ\n"); 302 err = PTR_ERR(db_base); 303 goto err_get_db; 304 } 305 306 func_to_io->sq_db[q_id] = db_base; 307 308 qp->sq.qid = q_id; 309 err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], 310 sq_msix_entry, 311 CI_ADDR(func_to_io->ci_addr_base, q_id), 312 CI_ADDR(func_to_io->ci_dma_base, q_id), db_base); 313 if (err) { 314 dev_err(&pdev->dev, "Failed to init SQ\n"); 315 goto err_sq_init; 316 } 317 318 qp->rq.qid = q_id; 319 err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id], 320 rq_msix_entry); 321 if (err) { 322 dev_err(&pdev->dev, "Failed to init RQ\n"); 323 goto err_rq_init; 324 } 325 326 return 0; 327 328 err_rq_init: 329 hinic_clean_sq(&qp->sq); 330 331 err_sq_init: 332 return_db_area(func_to_io, db_base); 333 334 err_get_db: 335 hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); 336 337 err_rq_alloc: 338 hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); 339 return err; 340 } 341 342 /** 343 * destroy_qp - Clean the resources of a Queue Pair 344 * @func_to_io: func to io channel that holds the IO components 345 * @qp: pointer to the qp to clean 346 **/ 347 static void destroy_qp(struct hinic_func_to_io *func_to_io, 348 struct hinic_qp *qp) 349 { 350 int q_id = qp->q_id; 351 352 hinic_clean_rq(&qp->rq); 353 hinic_clean_sq(&qp->sq); 354 355 return_db_area(func_to_io, func_to_io->sq_db[q_id]); 356 357 hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); 358 hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); 359 } 360 361 /** 362 * hinic_io_create_qps - Create Queue Pairs 363 * @func_to_io: func to io channel that holds the IO components 364 * @base_qpn: base qp number 365 * @num_qps: number queue pairs to create 366 * @sq_msix_entries: msix entries for sq 367 * @rq_msix_entries: msix entries for rq 368 * 369 * Return 0 - Success, negative - Failure 370 **/ 371 int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, 372 u16 base_qpn, int num_qps, 373 struct msix_entry *sq_msix_entries, 374 struct msix_entry *rq_msix_entries) 375 { 376 struct hinic_hwif *hwif = func_to_io->hwif; 377 struct pci_dev *pdev = hwif->pdev; 378 size_t qps_size, wq_size, db_size; 379 void *ci_addr_base; 380 int i, j, err; 381 382 qps_size = num_qps * sizeof(*func_to_io->qps); 383 func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL); 384 if (!func_to_io->qps) 385 return -ENOMEM; 386 387 wq_size = num_qps * sizeof(*func_to_io->sq_wq); 388 func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); 389 if (!func_to_io->sq_wq) { 390 err = -ENOMEM; 391 goto err_sq_wq; 392 } 393 394 wq_size = num_qps * sizeof(*func_to_io->rq_wq); 395 func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); 396 if (!func_to_io->rq_wq) { 397 err = -ENOMEM; 398 goto err_rq_wq; 399 } 400 401 db_size = num_qps * sizeof(*func_to_io->sq_db); 402 func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL); 403 if (!func_to_io->sq_db) { 404 err = -ENOMEM; 405 goto err_sq_db; 406 } 407 408 ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 409 &func_to_io->ci_dma_base, 410 GFP_KERNEL); 411 if (!ci_addr_base) { 412 dev_err(&pdev->dev, "Failed to allocate CI area\n"); 413 err = -ENOMEM; 414 goto err_ci_base; 415 } 416 417 func_to_io->ci_addr_base = ci_addr_base; 418 419 for (i = 0; i < num_qps; i++) { 420 err = init_qp(func_to_io, &func_to_io->qps[i], i, 421 &sq_msix_entries[i], &rq_msix_entries[i]); 422 if (err) { 423 dev_err(&pdev->dev, "Failed to create QP %d\n", i); 424 goto err_init_qp; 425 } 426 } 427 428 err = write_qp_ctxts(func_to_io, base_qpn, num_qps); 429 if (err) { 430 dev_err(&pdev->dev, "Failed to init QP ctxts\n"); 431 goto err_write_qp_ctxts; 432 } 433 434 err = hinic_clean_qp_offload_ctxt(func_to_io); 435 if (err) { 436 dev_err(&pdev->dev, "Failed to clean QP contexts space\n"); 437 goto err_write_qp_ctxts; 438 } 439 440 return 0; 441 442 err_write_qp_ctxts: 443 err_init_qp: 444 for (j = 0; j < i; j++) 445 destroy_qp(func_to_io, &func_to_io->qps[j]); 446 447 dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 448 func_to_io->ci_addr_base, func_to_io->ci_dma_base); 449 450 err_ci_base: 451 devm_kfree(&pdev->dev, func_to_io->sq_db); 452 453 err_sq_db: 454 devm_kfree(&pdev->dev, func_to_io->rq_wq); 455 456 err_rq_wq: 457 devm_kfree(&pdev->dev, func_to_io->sq_wq); 458 459 err_sq_wq: 460 devm_kfree(&pdev->dev, func_to_io->qps); 461 return err; 462 } 463 464 /** 465 * hinic_io_destroy_qps - Destroy the IO Queue Pairs 466 * @func_to_io: func to io channel that holds the IO components 467 * @num_qps: number queue pairs to destroy 468 **/ 469 void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) 470 { 471 struct hinic_hwif *hwif = func_to_io->hwif; 472 struct pci_dev *pdev = hwif->pdev; 473 size_t ci_table_size; 474 int i; 475 476 ci_table_size = CI_TABLE_SIZE(num_qps); 477 478 for (i = 0; i < num_qps; i++) 479 destroy_qp(func_to_io, &func_to_io->qps[i]); 480 481 dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base, 482 func_to_io->ci_dma_base); 483 484 devm_kfree(&pdev->dev, func_to_io->sq_db); 485 486 devm_kfree(&pdev->dev, func_to_io->rq_wq); 487 devm_kfree(&pdev->dev, func_to_io->sq_wq); 488 489 devm_kfree(&pdev->dev, func_to_io->qps); 490 } 491 492 int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, 493 u32 page_size) 494 { 495 struct hinic_wq_page_size page_size_info = {0}; 496 u16 out_size = sizeof(page_size_info); 497 struct hinic_pfhwdev *pfhwdev; 498 int err; 499 500 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); 501 502 page_size_info.func_idx = func_idx; 503 page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); 504 page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size); 505 506 err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, 507 HINIC_COMM_CMD_PAGESIZE_SET, &page_size_info, 508 sizeof(page_size_info), &page_size_info, 509 &out_size, HINIC_MGMT_MSG_SYNC); 510 if (err || !out_size || page_size_info.status) { 511 dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n", 512 err, page_size_info.status, out_size); 513 return -EFAULT; 514 } 515 516 return 0; 517 } 518 519 /** 520 * hinic_io_init - Initialize the IO components 521 * @func_to_io: func to io channel that holds the IO components 522 * @hwif: HW interface for accessing IO 523 * @max_qps: maximum QPs in HW 524 * @num_ceqs: number completion event queues 525 * @ceq_msix_entries: msix entries for ceqs 526 * 527 * Return 0 - Success, negative - Failure 528 **/ 529 int hinic_io_init(struct hinic_func_to_io *func_to_io, 530 struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, 531 struct msix_entry *ceq_msix_entries) 532 { 533 struct pci_dev *pdev = hwif->pdev; 534 enum hinic_cmdq_type cmdq, type; 535 void __iomem *db_area; 536 int err; 537 538 func_to_io->hwif = hwif; 539 func_to_io->qps = NULL; 540 func_to_io->max_qps = max_qps; 541 func_to_io->ceqs.hwdev = func_to_io->hwdev; 542 543 err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs, 544 HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE, 545 ceq_msix_entries); 546 if (err) { 547 dev_err(&pdev->dev, "Failed to init CEQs\n"); 548 return err; 549 } 550 551 err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); 552 if (err) { 553 dev_err(&pdev->dev, "Failed to allocate WQS for IO\n"); 554 goto err_wqs_alloc; 555 } 556 557 func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR); 558 if (!func_to_io->db_base) { 559 dev_err(&pdev->dev, "Failed to remap IO DB area\n"); 560 err = -ENOMEM; 561 goto err_db_ioremap; 562 } 563 564 init_db_area_idx(&func_to_io->free_db_area); 565 566 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { 567 db_area = get_db_area(func_to_io); 568 if (IS_ERR(db_area)) { 569 dev_err(&pdev->dev, "Failed to get cmdq db area\n"); 570 err = PTR_ERR(db_area); 571 goto err_db_area; 572 } 573 574 func_to_io->cmdq_db_area[cmdq] = db_area; 575 } 576 577 err = hinic_set_wq_page_size(func_to_io->hwdev, 578 HINIC_HWIF_FUNC_IDX(hwif), 579 HINIC_DEFAULT_WQ_PAGE_SIZE); 580 if (err) { 581 dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n"); 582 goto init_wq_pg_size_err; 583 } 584 585 err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif, 586 func_to_io->cmdq_db_area); 587 if (err) { 588 dev_err(&pdev->dev, "Failed to initialize cmdqs\n"); 589 goto err_init_cmdqs; 590 } 591 592 return 0; 593 594 err_init_cmdqs: 595 if (!HINIC_IS_VF(func_to_io->hwif)) 596 hinic_set_wq_page_size(func_to_io->hwdev, 597 HINIC_HWIF_FUNC_IDX(hwif), 598 HINIC_HW_WQ_PAGE_SIZE); 599 init_wq_pg_size_err: 600 err_db_area: 601 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) 602 return_db_area(func_to_io, func_to_io->cmdq_db_area[type]); 603 604 iounmap(func_to_io->db_base); 605 606 err_db_ioremap: 607 hinic_wqs_free(&func_to_io->wqs); 608 609 err_wqs_alloc: 610 hinic_ceqs_free(&func_to_io->ceqs); 611 return err; 612 } 613 614 /** 615 * hinic_io_free - Free the IO components 616 * @func_to_io: func to io channel that holds the IO components 617 **/ 618 void hinic_io_free(struct hinic_func_to_io *func_to_io) 619 { 620 enum hinic_cmdq_type cmdq; 621 622 hinic_free_cmdqs(&func_to_io->cmdqs); 623 624 if (!HINIC_IS_VF(func_to_io->hwif)) 625 hinic_set_wq_page_size(func_to_io->hwdev, 626 HINIC_HWIF_FUNC_IDX(func_to_io->hwif), 627 HINIC_HW_WQ_PAGE_SIZE); 628 629 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) 630 return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); 631 632 iounmap(func_to_io->db_base); 633 hinic_wqs_free(&func_to_io->wqs); 634 hinic_ceqs_free(&func_to_io->ceqs); 635 } 636