1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Huawei HiNIC PCI Express Linux driver 4 * Copyright(c) 2017 Huawei Technologies Co., Ltd 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/pci.h> 10 #include <linux/device.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/semaphore.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/io.h> 16 #include <linux/err.h> 17 18 #include "hinic_hw_if.h" 19 #include "hinic_hw_eqs.h" 20 #include "hinic_hw_wqe.h" 21 #include "hinic_hw_wq.h" 22 #include "hinic_hw_cmdq.h" 23 #include "hinic_hw_qp_ctxt.h" 24 #include "hinic_hw_qp.h" 25 #include "hinic_hw_io.h" 26 27 #define CI_Q_ADDR_SIZE sizeof(u32) 28 29 #define CI_ADDR(base_addr, q_id) ((base_addr) + \ 30 (q_id) * CI_Q_ADDR_SIZE) 31 32 #define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE) 33 34 #define DB_IDX(db, db_base) \ 35 (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE) 36 37 enum io_cmd { 38 IO_CMD_MODIFY_QUEUE_CTXT = 0, 39 }; 40 41 static void init_db_area_idx(struct hinic_free_db_area *free_db_area) 42 { 43 int i; 44 45 for (i = 0; i < HINIC_DB_MAX_AREAS; i++) 46 free_db_area->db_idx[i] = i; 47 48 free_db_area->alloc_pos = 0; 49 free_db_area->return_pos = HINIC_DB_MAX_AREAS; 50 51 free_db_area->num_free = HINIC_DB_MAX_AREAS; 52 53 sema_init(&free_db_area->idx_lock, 1); 54 } 55 56 static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io) 57 { 58 struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; 59 int pos, idx; 60 61 down(&free_db_area->idx_lock); 62 63 free_db_area->num_free--; 64 65 if (free_db_area->num_free < 0) { 66 free_db_area->num_free++; 67 up(&free_db_area->idx_lock); 68 return ERR_PTR(-ENOMEM); 69 } 70 71 pos = free_db_area->alloc_pos++; 72 pos &= HINIC_DB_MAX_AREAS - 1; 73 74 idx = free_db_area->db_idx[pos]; 75 76 free_db_area->db_idx[pos] = -1; 77 78 up(&free_db_area->idx_lock); 79 80 return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE; 81 } 82 83 static void return_db_area(struct hinic_func_to_io *func_to_io, 84 void __iomem *db_base) 85 { 86 struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; 87 int pos, idx = DB_IDX(db_base, func_to_io->db_base); 88 89 down(&free_db_area->idx_lock); 90 91 pos = free_db_area->return_pos++; 92 pos &= HINIC_DB_MAX_AREAS - 1; 93 94 free_db_area->db_idx[pos] = idx; 95 96 free_db_area->num_free++; 97 98 up(&free_db_area->idx_lock); 99 } 100 101 static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, 102 u16 num_sqs) 103 { 104 struct hinic_hwif *hwif = func_to_io->hwif; 105 struct hinic_sq_ctxt_block *sq_ctxt_block; 106 struct pci_dev *pdev = hwif->pdev; 107 struct hinic_cmdq_buf cmdq_buf; 108 struct hinic_sq_ctxt *sq_ctxt; 109 struct hinic_qp *qp; 110 u64 out_param; 111 int err, i; 112 113 err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 114 if (err) { 115 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); 116 return err; 117 } 118 119 sq_ctxt_block = cmdq_buf.buf; 120 sq_ctxt = sq_ctxt_block->sq_ctxt; 121 122 hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ, 123 num_sqs, func_to_io->max_qps); 124 for (i = 0; i < num_sqs; i++) { 125 qp = &func_to_io->qps[i]; 126 127 hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq, 128 base_qpn + qp->q_id); 129 } 130 131 cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs); 132 133 err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, 134 IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, 135 &out_param); 136 if ((err) || (out_param != 0)) { 137 dev_err(&pdev->dev, "Failed to set SQ ctxts\n"); 138 err = -EFAULT; 139 } 140 141 hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 142 return err; 143 } 144 145 static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, 146 u16 num_rqs) 147 { 148 struct hinic_hwif *hwif = func_to_io->hwif; 149 struct hinic_rq_ctxt_block *rq_ctxt_block; 150 struct pci_dev *pdev = hwif->pdev; 151 struct hinic_cmdq_buf cmdq_buf; 152 struct hinic_rq_ctxt *rq_ctxt; 153 struct hinic_qp *qp; 154 u64 out_param; 155 int err, i; 156 157 err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 158 if (err) { 159 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); 160 return err; 161 } 162 163 rq_ctxt_block = cmdq_buf.buf; 164 rq_ctxt = rq_ctxt_block->rq_ctxt; 165 166 hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ, 167 num_rqs, func_to_io->max_qps); 168 for (i = 0; i < num_rqs; i++) { 169 qp = &func_to_io->qps[i]; 170 171 hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq, 172 base_qpn + qp->q_id); 173 } 174 175 cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs); 176 177 err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, 178 IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, 179 &out_param); 180 if ((err) || (out_param != 0)) { 181 dev_err(&pdev->dev, "Failed to set RQ ctxts\n"); 182 err = -EFAULT; 183 } 184 185 hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); 186 return err; 187 } 188 189 /** 190 * write_qp_ctxts - write the qp ctxt to HW 191 * @func_to_io: func to io channel that holds the IO components 192 * @base_qpn: first qp number 193 * @num_qps: number of qps to write 194 * 195 * Return 0 - Success, negative - Failure 196 **/ 197 static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, 198 u16 num_qps) 199 { 200 return (write_sq_ctxts(func_to_io, base_qpn, num_qps) || 201 write_rq_ctxts(func_to_io, base_qpn, num_qps)); 202 } 203 204 /** 205 * init_qp - Initialize a Queue Pair 206 * @func_to_io: func to io channel that holds the IO components 207 * @qp: pointer to the qp to initialize 208 * @q_id: the id of the qp 209 * @sq_msix_entry: msix entry for sq 210 * @rq_msix_entry: msix entry for rq 211 * 212 * Return 0 - Success, negative - Failure 213 **/ 214 static int init_qp(struct hinic_func_to_io *func_to_io, 215 struct hinic_qp *qp, int q_id, 216 struct msix_entry *sq_msix_entry, 217 struct msix_entry *rq_msix_entry) 218 { 219 struct hinic_hwif *hwif = func_to_io->hwif; 220 struct pci_dev *pdev = hwif->pdev; 221 void __iomem *db_base; 222 int err; 223 224 qp->q_id = q_id; 225 226 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], 227 HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE, 228 HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE); 229 if (err) { 230 dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n"); 231 return err; 232 } 233 234 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], 235 HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE, 236 HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE); 237 if (err) { 238 dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n"); 239 goto err_rq_alloc; 240 } 241 242 db_base = get_db_area(func_to_io); 243 if (IS_ERR(db_base)) { 244 dev_err(&pdev->dev, "Failed to get DB area for SQ\n"); 245 err = PTR_ERR(db_base); 246 goto err_get_db; 247 } 248 249 func_to_io->sq_db[q_id] = db_base; 250 251 err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], 252 sq_msix_entry, 253 CI_ADDR(func_to_io->ci_addr_base, q_id), 254 CI_ADDR(func_to_io->ci_dma_base, q_id), db_base); 255 if (err) { 256 dev_err(&pdev->dev, "Failed to init SQ\n"); 257 goto err_sq_init; 258 } 259 260 err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id], 261 rq_msix_entry); 262 if (err) { 263 dev_err(&pdev->dev, "Failed to init RQ\n"); 264 goto err_rq_init; 265 } 266 267 return 0; 268 269 err_rq_init: 270 hinic_clean_sq(&qp->sq); 271 272 err_sq_init: 273 return_db_area(func_to_io, db_base); 274 275 err_get_db: 276 hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); 277 278 err_rq_alloc: 279 hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); 280 return err; 281 } 282 283 /** 284 * destroy_qp - Clean the resources of a Queue Pair 285 * @func_to_io: func to io channel that holds the IO components 286 * @qp: pointer to the qp to clean 287 **/ 288 static void destroy_qp(struct hinic_func_to_io *func_to_io, 289 struct hinic_qp *qp) 290 { 291 int q_id = qp->q_id; 292 293 hinic_clean_rq(&qp->rq); 294 hinic_clean_sq(&qp->sq); 295 296 return_db_area(func_to_io, func_to_io->sq_db[q_id]); 297 298 hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); 299 hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); 300 } 301 302 /** 303 * hinic_io_create_qps - Create Queue Pairs 304 * @func_to_io: func to io channel that holds the IO components 305 * @base_qpn: base qp number 306 * @num_qps: number queue pairs to create 307 * @sq_msix_entry: msix entries for sq 308 * @rq_msix_entry: msix entries for rq 309 * 310 * Return 0 - Success, negative - Failure 311 **/ 312 int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, 313 u16 base_qpn, int num_qps, 314 struct msix_entry *sq_msix_entries, 315 struct msix_entry *rq_msix_entries) 316 { 317 struct hinic_hwif *hwif = func_to_io->hwif; 318 struct pci_dev *pdev = hwif->pdev; 319 size_t qps_size, wq_size, db_size; 320 void *ci_addr_base; 321 int i, j, err; 322 323 qps_size = num_qps * sizeof(*func_to_io->qps); 324 func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL); 325 if (!func_to_io->qps) 326 return -ENOMEM; 327 328 wq_size = num_qps * sizeof(*func_to_io->sq_wq); 329 func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); 330 if (!func_to_io->sq_wq) { 331 err = -ENOMEM; 332 goto err_sq_wq; 333 } 334 335 wq_size = num_qps * sizeof(*func_to_io->rq_wq); 336 func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); 337 if (!func_to_io->rq_wq) { 338 err = -ENOMEM; 339 goto err_rq_wq; 340 } 341 342 db_size = num_qps * sizeof(*func_to_io->sq_db); 343 func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL); 344 if (!func_to_io->sq_db) { 345 err = -ENOMEM; 346 goto err_sq_db; 347 } 348 349 ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 350 &func_to_io->ci_dma_base, 351 GFP_KERNEL); 352 if (!ci_addr_base) { 353 dev_err(&pdev->dev, "Failed to allocate CI area\n"); 354 err = -ENOMEM; 355 goto err_ci_base; 356 } 357 358 func_to_io->ci_addr_base = ci_addr_base; 359 360 for (i = 0; i < num_qps; i++) { 361 err = init_qp(func_to_io, &func_to_io->qps[i], i, 362 &sq_msix_entries[i], &rq_msix_entries[i]); 363 if (err) { 364 dev_err(&pdev->dev, "Failed to create QP %d\n", i); 365 goto err_init_qp; 366 } 367 } 368 369 err = write_qp_ctxts(func_to_io, base_qpn, num_qps); 370 if (err) { 371 dev_err(&pdev->dev, "Failed to init QP ctxts\n"); 372 goto err_write_qp_ctxts; 373 } 374 375 return 0; 376 377 err_write_qp_ctxts: 378 err_init_qp: 379 for (j = 0; j < i; j++) 380 destroy_qp(func_to_io, &func_to_io->qps[j]); 381 382 dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 383 func_to_io->ci_addr_base, func_to_io->ci_dma_base); 384 385 err_ci_base: 386 devm_kfree(&pdev->dev, func_to_io->sq_db); 387 388 err_sq_db: 389 devm_kfree(&pdev->dev, func_to_io->rq_wq); 390 391 err_rq_wq: 392 devm_kfree(&pdev->dev, func_to_io->sq_wq); 393 394 err_sq_wq: 395 devm_kfree(&pdev->dev, func_to_io->qps); 396 return err; 397 } 398 399 /** 400 * hinic_io_destroy_qps - Destroy the IO Queue Pairs 401 * @func_to_io: func to io channel that holds the IO components 402 * @num_qps: number queue pairs to destroy 403 **/ 404 void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) 405 { 406 struct hinic_hwif *hwif = func_to_io->hwif; 407 struct pci_dev *pdev = hwif->pdev; 408 size_t ci_table_size; 409 int i; 410 411 ci_table_size = CI_TABLE_SIZE(num_qps); 412 413 for (i = 0; i < num_qps; i++) 414 destroy_qp(func_to_io, &func_to_io->qps[i]); 415 416 dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base, 417 func_to_io->ci_dma_base); 418 419 devm_kfree(&pdev->dev, func_to_io->sq_db); 420 421 devm_kfree(&pdev->dev, func_to_io->rq_wq); 422 devm_kfree(&pdev->dev, func_to_io->sq_wq); 423 424 devm_kfree(&pdev->dev, func_to_io->qps); 425 } 426 427 /** 428 * hinic_io_init - Initialize the IO components 429 * @func_to_io: func to io channel that holds the IO components 430 * @hwif: HW interface for accessing IO 431 * @max_qps: maximum QPs in HW 432 * @num_ceqs: number completion event queues 433 * @ceq_msix_entries: msix entries for ceqs 434 * 435 * Return 0 - Success, negative - Failure 436 **/ 437 int hinic_io_init(struct hinic_func_to_io *func_to_io, 438 struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, 439 struct msix_entry *ceq_msix_entries) 440 { 441 struct pci_dev *pdev = hwif->pdev; 442 enum hinic_cmdq_type cmdq, type; 443 void __iomem *db_area; 444 int err; 445 446 func_to_io->hwif = hwif; 447 func_to_io->qps = NULL; 448 func_to_io->max_qps = max_qps; 449 450 err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs, 451 HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE, 452 ceq_msix_entries); 453 if (err) { 454 dev_err(&pdev->dev, "Failed to init CEQs\n"); 455 return err; 456 } 457 458 err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); 459 if (err) { 460 dev_err(&pdev->dev, "Failed to allocate WQS for IO\n"); 461 goto err_wqs_alloc; 462 } 463 464 func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR); 465 if (!func_to_io->db_base) { 466 dev_err(&pdev->dev, "Failed to remap IO DB area\n"); 467 err = -ENOMEM; 468 goto err_db_ioremap; 469 } 470 471 init_db_area_idx(&func_to_io->free_db_area); 472 473 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { 474 db_area = get_db_area(func_to_io); 475 if (IS_ERR(db_area)) { 476 dev_err(&pdev->dev, "Failed to get cmdq db area\n"); 477 err = PTR_ERR(db_area); 478 goto err_db_area; 479 } 480 481 func_to_io->cmdq_db_area[cmdq] = db_area; 482 } 483 484 err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif, 485 func_to_io->cmdq_db_area); 486 if (err) { 487 dev_err(&pdev->dev, "Failed to initialize cmdqs\n"); 488 goto err_init_cmdqs; 489 } 490 491 return 0; 492 493 err_init_cmdqs: 494 err_db_area: 495 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) 496 return_db_area(func_to_io, func_to_io->cmdq_db_area[type]); 497 498 iounmap(func_to_io->db_base); 499 500 err_db_ioremap: 501 hinic_wqs_free(&func_to_io->wqs); 502 503 err_wqs_alloc: 504 hinic_ceqs_free(&func_to_io->ceqs); 505 return err; 506 } 507 508 /** 509 * hinic_io_free - Free the IO components 510 * @func_to_io: func to io channel that holds the IO components 511 **/ 512 void hinic_io_free(struct hinic_func_to_io *func_to_io) 513 { 514 enum hinic_cmdq_type cmdq; 515 516 hinic_free_cmdqs(&func_to_io->cmdqs); 517 518 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) 519 return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); 520 521 iounmap(func_to_io->db_base); 522 hinic_wqs_free(&func_to_io->wqs); 523 hinic_ceqs_free(&func_to_io->ceqs); 524 } 525