1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2014 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/mempool.h> 25 #include <linux/slab.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_transport_fc.h> 32 #include <scsi/fc/fc_fs.h> 33 34 #include "lpfc_hw4.h" 35 #include "lpfc_hw.h" 36 #include "lpfc_sli.h" 37 #include "lpfc_sli4.h" 38 #include "lpfc_nl.h" 39 #include "lpfc_disc.h" 40 #include "lpfc.h" 41 #include "lpfc_scsi.h" 42 #include "lpfc_crtn.h" 43 #include "lpfc_logmsg.h" 44 45 #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 46 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 47 #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ 48 #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */ 49 #define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */ 50 51 int 52 lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { 53 size_t bytes; 54 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 55 56 if (max_xri <= 0) 57 return -ENOMEM; 58 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * 59 sizeof(unsigned long); 60 phba->cfg_rrq_xri_bitmap_sz = bytes; 61 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 62 bytes); 63 if (!phba->active_rrq_pool) 64 return -ENOMEM; 65 else 66 return 0; 67 } 68 69 /** 70 * lpfc_mem_alloc - create and allocate all PCI and memory pools 71 * @phba: HBA to allocate pools for 72 * @align: alignment requirement for blocks; must be a power of two 73 * 74 * Description: Creates and allocates PCI pools lpfc_mbuf_pool, 75 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools 76 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 77 * 78 * Notes: Not interrupt-safe. Must be called with no locks held. If any 79 * allocation fails, frees all successfully allocated memory before returning. 80 * 81 * Returns: 82 * 0 on success 83 * -ENOMEM on failure (if any memory allocations fail) 84 **/ 85 int 86 lpfc_mem_alloc(struct lpfc_hba *phba, int align) 87 { 88 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 89 int i; 90 91 92 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, 93 LPFC_BPL_SIZE, 94 align, 0); 95 if (!phba->lpfc_mbuf_pool) 96 goto fail; 97 98 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, 99 sizeof(struct lpfc_dmabuf), 100 GFP_KERNEL); 101 if (!pool->elements) 102 goto fail_free_lpfc_mbuf_pool; 103 104 pool->max_count = 0; 105 pool->current_count = 0; 106 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { 107 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, 108 GFP_KERNEL, &pool->elements[i].phys); 109 if (!pool->elements[i].virt) 110 goto fail_free_mbuf_pool; 111 pool->max_count++; 112 pool->current_count++; 113 } 114 115 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE, 116 sizeof(LPFC_MBOXQ_t)); 117 if (!phba->mbox_mem_pool) 118 goto fail_free_mbuf_pool; 119 120 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 121 sizeof(struct lpfc_nodelist)); 122 if (!phba->nlp_mem_pool) 123 goto fail_free_mbox_pool; 124 125 if (phba->sli_rev == LPFC_SLI_REV4) { 126 phba->rrq_pool = 127 mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE, 128 sizeof(struct lpfc_node_rrq)); 129 if (!phba->rrq_pool) 130 goto fail_free_nlp_mem_pool; 131 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", 132 &phba->pcidev->dev, 133 LPFC_HDR_BUF_SIZE, align, 0); 134 if (!phba->lpfc_hrb_pool) 135 goto fail_free_rrq_mem_pool; 136 137 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", 138 &phba->pcidev->dev, 139 LPFC_DATA_BUF_SIZE, align, 0); 140 if (!phba->lpfc_drb_pool) 141 goto fail_free_hrb_pool; 142 phba->lpfc_hbq_pool = NULL; 143 } else { 144 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", 145 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); 146 if (!phba->lpfc_hbq_pool) 147 goto fail_free_nlp_mem_pool; 148 phba->lpfc_hrb_pool = NULL; 149 phba->lpfc_drb_pool = NULL; 150 } 151 152 if (phba->cfg_EnableXLane) { 153 phba->device_data_mem_pool = mempool_create_kmalloc_pool( 154 LPFC_DEVICE_DATA_POOL_SIZE, 155 sizeof(struct lpfc_device_data)); 156 if (!phba->device_data_mem_pool) 157 goto fail_free_drb_pool; 158 } else { 159 phba->device_data_mem_pool = NULL; 160 } 161 162 return 0; 163 fail_free_drb_pool: 164 dma_pool_destroy(phba->lpfc_drb_pool); 165 phba->lpfc_drb_pool = NULL; 166 fail_free_hrb_pool: 167 dma_pool_destroy(phba->lpfc_hrb_pool); 168 phba->lpfc_hrb_pool = NULL; 169 fail_free_rrq_mem_pool: 170 mempool_destroy(phba->rrq_pool); 171 phba->rrq_pool = NULL; 172 fail_free_nlp_mem_pool: 173 mempool_destroy(phba->nlp_mem_pool); 174 phba->nlp_mem_pool = NULL; 175 fail_free_mbox_pool: 176 mempool_destroy(phba->mbox_mem_pool); 177 phba->mbox_mem_pool = NULL; 178 fail_free_mbuf_pool: 179 while (i--) 180 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 181 pool->elements[i].phys); 182 kfree(pool->elements); 183 fail_free_lpfc_mbuf_pool: 184 dma_pool_destroy(phba->lpfc_mbuf_pool); 185 phba->lpfc_mbuf_pool = NULL; 186 fail: 187 return -ENOMEM; 188 } 189 190 int 191 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) 192 { 193 phba->lpfc_nvmet_drb_pool = 194 dma_pool_create("lpfc_nvmet_drb_pool", 195 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, 196 SGL_ALIGN_SZ, 0); 197 if (!phba->lpfc_nvmet_drb_pool) { 198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 199 "6024 Can't enable NVME Target - no memory\n"); 200 return -ENOMEM; 201 } 202 return 0; 203 } 204 205 /** 206 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc 207 * @phba: HBA to free memory for 208 * 209 * Description: Free the memory allocated by lpfc_mem_alloc routine. This 210 * routine is a the counterpart of lpfc_mem_alloc. 211 * 212 * Returns: None 213 **/ 214 void 215 lpfc_mem_free(struct lpfc_hba *phba) 216 { 217 int i; 218 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 219 struct lpfc_device_data *device_data; 220 221 /* Free HBQ pools */ 222 lpfc_sli_hbqbuf_free_all(phba); 223 dma_pool_destroy(phba->lpfc_nvmet_drb_pool); 224 phba->lpfc_nvmet_drb_pool = NULL; 225 226 dma_pool_destroy(phba->lpfc_drb_pool); 227 phba->lpfc_drb_pool = NULL; 228 229 dma_pool_destroy(phba->lpfc_hrb_pool); 230 phba->lpfc_hrb_pool = NULL; 231 232 dma_pool_destroy(phba->lpfc_hbq_pool); 233 phba->lpfc_hbq_pool = NULL; 234 235 mempool_destroy(phba->rrq_pool); 236 phba->rrq_pool = NULL; 237 238 /* Free NLP memory pool */ 239 mempool_destroy(phba->nlp_mem_pool); 240 phba->nlp_mem_pool = NULL; 241 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { 242 mempool_destroy(phba->active_rrq_pool); 243 phba->active_rrq_pool = NULL; 244 } 245 246 /* Free mbox memory pool */ 247 mempool_destroy(phba->mbox_mem_pool); 248 phba->mbox_mem_pool = NULL; 249 250 /* Free MBUF memory pool */ 251 for (i = 0; i < pool->current_count; i++) 252 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 253 pool->elements[i].phys); 254 kfree(pool->elements); 255 256 dma_pool_destroy(phba->lpfc_mbuf_pool); 257 phba->lpfc_mbuf_pool = NULL; 258 259 /* Free Device Data memory pool */ 260 if (phba->device_data_mem_pool) { 261 /* Ensure all objects have been returned to the pool */ 262 while (!list_empty(&phba->luns)) { 263 device_data = list_first_entry(&phba->luns, 264 struct lpfc_device_data, 265 listentry); 266 list_del(&device_data->listentry); 267 mempool_free(device_data, phba->device_data_mem_pool); 268 } 269 mempool_destroy(phba->device_data_mem_pool); 270 } 271 phba->device_data_mem_pool = NULL; 272 return; 273 } 274 275 /** 276 * lpfc_mem_free_all - Frees all PCI and driver memory 277 * @phba: HBA to free memory for 278 * 279 * Description: Free memory from PCI and driver memory pools and also those 280 * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees 281 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees 282 * the VPI bitmask. 283 * 284 * Returns: None 285 **/ 286 void 287 lpfc_mem_free_all(struct lpfc_hba *phba) 288 { 289 struct lpfc_sli *psli = &phba->sli; 290 LPFC_MBOXQ_t *mbox, *next_mbox; 291 struct lpfc_dmabuf *mp; 292 293 /* Free memory used in mailbox queue back to mailbox memory pool */ 294 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 295 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 296 if (mp) { 297 lpfc_mbuf_free(phba, mp->virt, mp->phys); 298 kfree(mp); 299 } 300 list_del(&mbox->list); 301 mempool_free(mbox, phba->mbox_mem_pool); 302 } 303 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ 304 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 305 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 306 if (mp) { 307 lpfc_mbuf_free(phba, mp->virt, mp->phys); 308 kfree(mp); 309 } 310 list_del(&mbox->list); 311 mempool_free(mbox, phba->mbox_mem_pool); 312 } 313 /* Free the active mailbox command back to the mailbox memory pool */ 314 spin_lock_irq(&phba->hbalock); 315 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 316 spin_unlock_irq(&phba->hbalock); 317 if (psli->mbox_active) { 318 mbox = psli->mbox_active; 319 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 320 if (mp) { 321 lpfc_mbuf_free(phba, mp->virt, mp->phys); 322 kfree(mp); 323 } 324 mempool_free(mbox, phba->mbox_mem_pool); 325 psli->mbox_active = NULL; 326 } 327 328 /* Free and destroy all the allocated memory pools */ 329 lpfc_mem_free(phba); 330 331 /* Free DMA buffer memory pool */ 332 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 333 phba->lpfc_sg_dma_buf_pool = NULL; 334 335 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 336 phba->lpfc_cmd_rsp_buf_pool = NULL; 337 338 /* Free Congestion Data buffer */ 339 if (phba->cgn_i) { 340 dma_free_coherent(&phba->pcidev->dev, 341 sizeof(struct lpfc_cgn_info), 342 phba->cgn_i->virt, phba->cgn_i->phys); 343 kfree(phba->cgn_i); 344 phba->cgn_i = NULL; 345 } 346 347 /* Free RX Monitor */ 348 if (phba->rx_monitor) { 349 lpfc_rx_monitor_destroy_ring(phba->rx_monitor); 350 kfree(phba->rx_monitor); 351 phba->rx_monitor = NULL; 352 } 353 354 /* Free the iocb lookup array */ 355 kfree(psli->iocbq_lookup); 356 psli->iocbq_lookup = NULL; 357 358 return; 359 } 360 361 /** 362 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool 363 * @phba: HBA which owns the pool to allocate from 364 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation 365 * @handle: used to return the DMA-mapped address of the mbuf 366 * 367 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. 368 * Allocates from generic dma_pool_alloc function first and if that fails and 369 * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the 370 * HBA's pool. 371 * 372 * Notes: Not interrupt-safe. Must be called with no locks held. Takes 373 * phba->hbalock. 374 * 375 * Returns: 376 * pointer to the allocated mbuf on success 377 * NULL on failure 378 **/ 379 void * 380 lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 381 { 382 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 383 unsigned long iflags; 384 void *ret; 385 386 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); 387 388 spin_lock_irqsave(&phba->hbalock, iflags); 389 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { 390 pool->current_count--; 391 ret = pool->elements[pool->current_count].virt; 392 *handle = pool->elements[pool->current_count].phys; 393 } 394 spin_unlock_irqrestore(&phba->hbalock, iflags); 395 return ret; 396 } 397 398 /** 399 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) 400 * @phba: HBA which owns the pool to return to 401 * @virt: mbuf to free 402 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed 403 * 404 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if 405 * it is below its max_count, frees the mbuf otherwise. 406 * 407 * Notes: Must be called with phba->hbalock held to synchronize access to 408 * lpfc_mbuf_safety_pool. 409 * 410 * Returns: None 411 **/ 412 void 413 __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 414 { 415 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 416 417 if (pool->current_count < pool->max_count) { 418 pool->elements[pool->current_count].virt = virt; 419 pool->elements[pool->current_count].phys = dma; 420 pool->current_count++; 421 } else { 422 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); 423 } 424 return; 425 } 426 427 /** 428 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) 429 * @phba: HBA which owns the pool to return to 430 * @virt: mbuf to free 431 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed 432 * 433 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if 434 * it is below its max_count, frees the mbuf otherwise. 435 * 436 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 437 * 438 * Returns: None 439 **/ 440 void 441 lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 442 { 443 unsigned long iflags; 444 445 spin_lock_irqsave(&phba->hbalock, iflags); 446 __lpfc_mbuf_free(phba, virt, dma); 447 spin_unlock_irqrestore(&phba->hbalock, iflags); 448 return; 449 } 450 451 /** 452 * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the 453 * lpfc_sg_dma_buf_pool PCI pool 454 * @phba: HBA which owns the pool to allocate from 455 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation 456 * @handle: used to return the DMA-mapped address of the nvmet_buf 457 * 458 * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool 459 * PCI pool. Allocates from generic dma_pool_alloc function. 460 * 461 * Returns: 462 * pointer to the allocated nvmet_buf on success 463 * NULL on failure 464 **/ 465 void * 466 lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 467 { 468 void *ret; 469 470 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); 471 return ret; 472 } 473 474 /** 475 * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool 476 * PCI pool 477 * @phba: HBA which owns the pool to return to 478 * @virt: nvmet_buf to free 479 * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed 480 * 481 * Returns: None 482 **/ 483 void 484 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) 485 { 486 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); 487 } 488 489 /** 490 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 491 * @phba: HBA to allocate HBQ buffer for 492 * 493 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI 494 * pool along a non-DMA-mapped container for it. 495 * 496 * Notes: Not interrupt-safe. Must be called with no locks held. 497 * 498 * Returns: 499 * pointer to HBQ on success 500 * NULL on failure 501 **/ 502 struct hbq_dmabuf * 503 lpfc_els_hbq_alloc(struct lpfc_hba *phba) 504 { 505 struct hbq_dmabuf *hbqbp; 506 507 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 508 if (!hbqbp) 509 return NULL; 510 511 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 512 &hbqbp->dbuf.phys); 513 if (!hbqbp->dbuf.virt) { 514 kfree(hbqbp); 515 return NULL; 516 } 517 hbqbp->total_size = LPFC_BPL_SIZE; 518 return hbqbp; 519 } 520 521 /** 522 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 523 * @phba: HBA buffer was allocated for 524 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 525 * 526 * Description: Frees both the container and the DMA-mapped buffer returned by 527 * lpfc_els_hbq_alloc. 528 * 529 * Notes: Can be called with or without locks held. 530 * 531 * Returns: None 532 **/ 533 void 534 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 535 { 536 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 537 kfree(hbqbp); 538 return; 539 } 540 541 /** 542 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer 543 * @phba: HBA to allocate a receive buffer for 544 * 545 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI 546 * pool along a non-DMA-mapped container for it. 547 * 548 * Notes: Not interrupt-safe. Must be called with no locks held. 549 * 550 * Returns: 551 * pointer to HBQ on success 552 * NULL on failure 553 **/ 554 struct hbq_dmabuf * 555 lpfc_sli4_rb_alloc(struct lpfc_hba *phba) 556 { 557 struct hbq_dmabuf *dma_buf; 558 559 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 560 if (!dma_buf) 561 return NULL; 562 563 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 564 &dma_buf->hbuf.phys); 565 if (!dma_buf->hbuf.virt) { 566 kfree(dma_buf); 567 return NULL; 568 } 569 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 570 &dma_buf->dbuf.phys); 571 if (!dma_buf->dbuf.virt) { 572 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 573 dma_buf->hbuf.phys); 574 kfree(dma_buf); 575 return NULL; 576 } 577 dma_buf->total_size = LPFC_DATA_BUF_SIZE; 578 return dma_buf; 579 } 580 581 /** 582 * lpfc_sli4_rb_free - Frees a receive buffer 583 * @phba: HBA buffer was allocated for 584 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc 585 * 586 * Description: Frees both the container and the DMA-mapped buffers returned by 587 * lpfc_sli4_rb_alloc. 588 * 589 * Notes: Can be called with or without locks held. 590 * 591 * Returns: None 592 **/ 593 void 594 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) 595 { 596 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 597 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); 598 kfree(dmab); 599 } 600 601 /** 602 * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer 603 * @phba: HBA to allocate a receive buffer for 604 * 605 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI 606 * pool along a non-DMA-mapped container for it. 607 * 608 * Returns: 609 * pointer to HBQ on success 610 * NULL on failure 611 **/ 612 struct rqb_dmabuf * 613 lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) 614 { 615 struct rqb_dmabuf *dma_buf; 616 617 dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL); 618 if (!dma_buf) 619 return NULL; 620 621 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 622 &dma_buf->hbuf.phys); 623 if (!dma_buf->hbuf.virt) { 624 kfree(dma_buf); 625 return NULL; 626 } 627 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, 628 GFP_KERNEL, &dma_buf->dbuf.phys); 629 if (!dma_buf->dbuf.virt) { 630 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 631 dma_buf->hbuf.phys); 632 kfree(dma_buf); 633 return NULL; 634 } 635 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; 636 return dma_buf; 637 } 638 639 /** 640 * lpfc_sli4_nvmet_free - Frees a receive buffer 641 * @phba: HBA buffer was allocated for 642 * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc 643 * 644 * Description: Frees both the container and the DMA-mapped buffers returned by 645 * lpfc_sli4_nvmet_alloc. 646 * 647 * Notes: Can be called with or without locks held. 648 * 649 * Returns: None 650 **/ 651 void 652 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) 653 { 654 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 655 dma_pool_free(phba->lpfc_nvmet_drb_pool, 656 dmab->dbuf.virt, dmab->dbuf.phys); 657 kfree(dmab); 658 } 659 660 /** 661 * lpfc_in_buf_free - Free a DMA buffer 662 * @phba: HBA buffer is associated with 663 * @mp: Buffer to free 664 * 665 * Description: Frees the given DMA buffer in the appropriate way given if the 666 * HBA is running in SLI3 mode with HBQs enabled. 667 * 668 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 669 * 670 * Returns: None 671 **/ 672 void 673 lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 674 { 675 struct hbq_dmabuf *hbq_entry; 676 unsigned long flags; 677 678 if (!mp) 679 return; 680 681 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 682 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); 683 /* Check whether HBQ is still in use */ 684 spin_lock_irqsave(&phba->hbalock, flags); 685 if (!phba->hbq_in_use) { 686 spin_unlock_irqrestore(&phba->hbalock, flags); 687 return; 688 } 689 list_del(&hbq_entry->dbuf.list); 690 if (hbq_entry->tag == -1) { 691 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 692 (phba, hbq_entry); 693 } else { 694 lpfc_sli_free_hbq(phba, hbq_entry); 695 } 696 spin_unlock_irqrestore(&phba->hbalock, flags); 697 } else { 698 lpfc_mbuf_free(phba, mp->virt, mp->phys); 699 kfree(mp); 700 } 701 return; 702 } 703 704 /** 705 * lpfc_rq_buf_free - Free a RQ DMA buffer 706 * @phba: HBA buffer is associated with 707 * @mp: Buffer to free 708 * 709 * Description: Frees the given DMA buffer in the appropriate way given by 710 * reposting it to its associated RQ so it can be reused. 711 * 712 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 713 * 714 * Returns: None 715 **/ 716 void 717 lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 718 { 719 struct lpfc_rqb *rqbp; 720 struct lpfc_rqe hrqe; 721 struct lpfc_rqe drqe; 722 struct rqb_dmabuf *rqb_entry; 723 unsigned long flags; 724 int rc; 725 726 if (!mp) 727 return; 728 729 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); 730 rqbp = rqb_entry->hrq->rqbp; 731 732 spin_lock_irqsave(&phba->hbalock, flags); 733 list_del(&rqb_entry->hbuf.list); 734 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); 735 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); 736 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); 737 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); 738 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 739 if (rc < 0) { 740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 741 "6409 Cannot post to HRQ %d: %x %x %x " 742 "DRQ %x %x\n", 743 rqb_entry->hrq->queue_id, 744 rqb_entry->hrq->host_index, 745 rqb_entry->hrq->hba_index, 746 rqb_entry->hrq->entry_count, 747 rqb_entry->drq->host_index, 748 rqb_entry->drq->hba_index); 749 (rqbp->rqb_free_buffer)(phba, rqb_entry); 750 } else { 751 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 752 rqbp->buffer_count++; 753 } 754 755 spin_unlock_irqrestore(&phba->hbalock, flags); 756 } 757