1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2014 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/mempool.h> 25 #include <linux/slab.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_transport_fc.h> 32 #include <scsi/fc/fc_fs.h> 33 34 #include <linux/nvme-fc-driver.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc_nvme.h" 45 #include "lpfc_nvmet.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_logmsg.h" 48 49 #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 50 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 51 #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ 52 53 int 54 lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { 55 size_t bytes; 56 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 57 58 if (max_xri <= 0) 59 return -ENOMEM; 60 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * 61 sizeof(unsigned long); 62 phba->cfg_rrq_xri_bitmap_sz = bytes; 63 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 64 bytes); 65 if (!phba->active_rrq_pool) 66 return -ENOMEM; 67 else 68 return 0; 69 } 70 71 /** 72 * lpfc_mem_alloc - create and allocate all PCI and memory pools 73 * @phba: HBA to allocate pools for 74 * 75 * Description: Creates and allocates PCI pools lpfc_mbuf_pool, 76 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools 77 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 78 * 79 * Notes: Not interrupt-safe. Must be called with no locks held. If any 80 * allocation fails, frees all successfully allocated memory before returning. 81 * 82 * Returns: 83 * 0 on success 84 * -ENOMEM on failure (if any memory allocations fail) 85 **/ 86 int 87 lpfc_mem_alloc(struct lpfc_hba *phba, int align) 88 { 89 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 90 int i; 91 92 93 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, 94 LPFC_BPL_SIZE, 95 align, 0); 96 if (!phba->lpfc_mbuf_pool) 97 goto fail; 98 99 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, 100 sizeof(struct lpfc_dmabuf), 101 GFP_KERNEL); 102 if (!pool->elements) 103 goto fail_free_lpfc_mbuf_pool; 104 105 pool->max_count = 0; 106 pool->current_count = 0; 107 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { 108 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, 109 GFP_KERNEL, &pool->elements[i].phys); 110 if (!pool->elements[i].virt) 111 goto fail_free_mbuf_pool; 112 pool->max_count++; 113 pool->current_count++; 114 } 115 116 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 117 sizeof(LPFC_MBOXQ_t)); 118 if (!phba->mbox_mem_pool) 119 goto fail_free_mbuf_pool; 120 121 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 122 sizeof(struct lpfc_nodelist)); 123 if (!phba->nlp_mem_pool) 124 goto fail_free_mbox_pool; 125 126 if (phba->sli_rev == LPFC_SLI_REV4) { 127 phba->rrq_pool = 128 mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, 129 sizeof(struct lpfc_node_rrq)); 130 if (!phba->rrq_pool) 131 goto fail_free_nlp_mem_pool; 132 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", 133 &phba->pcidev->dev, 134 LPFC_HDR_BUF_SIZE, align, 0); 135 if (!phba->lpfc_hrb_pool) 136 goto fail_free_rrq_mem_pool; 137 138 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", 139 &phba->pcidev->dev, 140 LPFC_DATA_BUF_SIZE, align, 0); 141 if (!phba->lpfc_drb_pool) 142 goto fail_free_hrb_pool; 143 phba->lpfc_hbq_pool = NULL; 144 } else { 145 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", 146 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); 147 if (!phba->lpfc_hbq_pool) 148 goto fail_free_nlp_mem_pool; 149 phba->lpfc_hrb_pool = NULL; 150 phba->lpfc_drb_pool = NULL; 151 } 152 153 if (phba->cfg_EnableXLane) { 154 phba->device_data_mem_pool = mempool_create_kmalloc_pool( 155 LPFC_DEVICE_DATA_POOL_SIZE, 156 sizeof(struct lpfc_device_data)); 157 if (!phba->device_data_mem_pool) 158 goto fail_free_drb_pool; 159 } else { 160 phba->device_data_mem_pool = NULL; 161 } 162 163 return 0; 164 fail_free_drb_pool: 165 dma_pool_destroy(phba->lpfc_drb_pool); 166 phba->lpfc_drb_pool = NULL; 167 fail_free_hrb_pool: 168 dma_pool_destroy(phba->lpfc_hrb_pool); 169 phba->lpfc_hrb_pool = NULL; 170 fail_free_rrq_mem_pool: 171 mempool_destroy(phba->rrq_pool); 172 phba->rrq_pool = NULL; 173 fail_free_nlp_mem_pool: 174 mempool_destroy(phba->nlp_mem_pool); 175 phba->nlp_mem_pool = NULL; 176 fail_free_mbox_pool: 177 mempool_destroy(phba->mbox_mem_pool); 178 phba->mbox_mem_pool = NULL; 179 fail_free_mbuf_pool: 180 while (i--) 181 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 182 pool->elements[i].phys); 183 kfree(pool->elements); 184 fail_free_lpfc_mbuf_pool: 185 dma_pool_destroy(phba->lpfc_mbuf_pool); 186 phba->lpfc_mbuf_pool = NULL; 187 fail: 188 return -ENOMEM; 189 } 190 191 int 192 lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) 193 { 194 phba->lpfc_nvmet_drb_pool = 195 dma_pool_create("lpfc_nvmet_drb_pool", 196 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, 197 SGL_ALIGN_SZ, 0); 198 if (!phba->lpfc_nvmet_drb_pool) { 199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 200 "6024 Can't enable NVME Target - no memory\n"); 201 return -ENOMEM; 202 } 203 return 0; 204 } 205 206 /** 207 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc 208 * @phba: HBA to free memory for 209 * 210 * Description: Free the memory allocated by lpfc_mem_alloc routine. This 211 * routine is a the counterpart of lpfc_mem_alloc. 212 * 213 * Returns: None 214 **/ 215 void 216 lpfc_mem_free(struct lpfc_hba *phba) 217 { 218 int i; 219 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 220 struct lpfc_device_data *device_data; 221 222 /* Free HBQ pools */ 223 lpfc_sli_hbqbuf_free_all(phba); 224 dma_pool_destroy(phba->lpfc_nvmet_drb_pool); 225 phba->lpfc_nvmet_drb_pool = NULL; 226 227 dma_pool_destroy(phba->lpfc_drb_pool); 228 phba->lpfc_drb_pool = NULL; 229 230 dma_pool_destroy(phba->lpfc_hrb_pool); 231 phba->lpfc_hrb_pool = NULL; 232 233 dma_pool_destroy(phba->lpfc_hbq_pool); 234 phba->lpfc_hbq_pool = NULL; 235 236 mempool_destroy(phba->rrq_pool); 237 phba->rrq_pool = NULL; 238 239 /* Free NLP memory pool */ 240 mempool_destroy(phba->nlp_mem_pool); 241 phba->nlp_mem_pool = NULL; 242 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { 243 mempool_destroy(phba->active_rrq_pool); 244 phba->active_rrq_pool = NULL; 245 } 246 247 /* Free mbox memory pool */ 248 mempool_destroy(phba->mbox_mem_pool); 249 phba->mbox_mem_pool = NULL; 250 251 /* Free MBUF memory pool */ 252 for (i = 0; i < pool->current_count; i++) 253 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 254 pool->elements[i].phys); 255 kfree(pool->elements); 256 257 dma_pool_destroy(phba->lpfc_mbuf_pool); 258 phba->lpfc_mbuf_pool = NULL; 259 260 /* Free Device Data memory pool */ 261 if (phba->device_data_mem_pool) { 262 /* Ensure all objects have been returned to the pool */ 263 while (!list_empty(&phba->luns)) { 264 device_data = list_first_entry(&phba->luns, 265 struct lpfc_device_data, 266 listentry); 267 list_del(&device_data->listentry); 268 mempool_free(device_data, phba->device_data_mem_pool); 269 } 270 mempool_destroy(phba->device_data_mem_pool); 271 } 272 phba->device_data_mem_pool = NULL; 273 return; 274 } 275 276 /** 277 * lpfc_mem_free_all - Frees all PCI and driver memory 278 * @phba: HBA to free memory for 279 * 280 * Description: Free memory from PCI and driver memory pools and also those 281 * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees 282 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees 283 * the VPI bitmask. 284 * 285 * Returns: None 286 **/ 287 void 288 lpfc_mem_free_all(struct lpfc_hba *phba) 289 { 290 struct lpfc_sli *psli = &phba->sli; 291 LPFC_MBOXQ_t *mbox, *next_mbox; 292 struct lpfc_dmabuf *mp; 293 294 /* Free memory used in mailbox queue back to mailbox memory pool */ 295 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 296 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 297 if (mp) { 298 lpfc_mbuf_free(phba, mp->virt, mp->phys); 299 kfree(mp); 300 } 301 list_del(&mbox->list); 302 mempool_free(mbox, phba->mbox_mem_pool); 303 } 304 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ 305 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 306 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 307 if (mp) { 308 lpfc_mbuf_free(phba, mp->virt, mp->phys); 309 kfree(mp); 310 } 311 list_del(&mbox->list); 312 mempool_free(mbox, phba->mbox_mem_pool); 313 } 314 /* Free the active mailbox command back to the mailbox memory pool */ 315 spin_lock_irq(&phba->hbalock); 316 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 317 spin_unlock_irq(&phba->hbalock); 318 if (psli->mbox_active) { 319 mbox = psli->mbox_active; 320 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 321 if (mp) { 322 lpfc_mbuf_free(phba, mp->virt, mp->phys); 323 kfree(mp); 324 } 325 mempool_free(mbox, phba->mbox_mem_pool); 326 psli->mbox_active = NULL; 327 } 328 329 /* Free and destroy all the allocated memory pools */ 330 lpfc_mem_free(phba); 331 332 /* Free DMA buffer memory pool */ 333 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 334 phba->lpfc_sg_dma_buf_pool = NULL; 335 336 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 337 phba->lpfc_cmd_rsp_buf_pool = NULL; 338 339 /* Free the iocb lookup array */ 340 kfree(psli->iocbq_lookup); 341 psli->iocbq_lookup = NULL; 342 343 return; 344 } 345 346 /** 347 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool 348 * @phba: HBA which owns the pool to allocate from 349 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation 350 * @handle: used to return the DMA-mapped address of the mbuf 351 * 352 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. 353 * Allocates from generic dma_pool_alloc function first and if that fails and 354 * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the 355 * HBA's pool. 356 * 357 * Notes: Not interrupt-safe. Must be called with no locks held. Takes 358 * phba->hbalock. 359 * 360 * Returns: 361 * pointer to the allocated mbuf on success 362 * NULL on failure 363 **/ 364 void * 365 lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 366 { 367 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 368 unsigned long iflags; 369 void *ret; 370 371 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); 372 373 spin_lock_irqsave(&phba->hbalock, iflags); 374 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { 375 pool->current_count--; 376 ret = pool->elements[pool->current_count].virt; 377 *handle = pool->elements[pool->current_count].phys; 378 } 379 spin_unlock_irqrestore(&phba->hbalock, iflags); 380 return ret; 381 } 382 383 /** 384 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) 385 * @phba: HBA which owns the pool to return to 386 * @virt: mbuf to free 387 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed 388 * 389 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if 390 * it is below its max_count, frees the mbuf otherwise. 391 * 392 * Notes: Must be called with phba->hbalock held to synchronize access to 393 * lpfc_mbuf_safety_pool. 394 * 395 * Returns: None 396 **/ 397 void 398 __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 399 { 400 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 401 402 if (pool->current_count < pool->max_count) { 403 pool->elements[pool->current_count].virt = virt; 404 pool->elements[pool->current_count].phys = dma; 405 pool->current_count++; 406 } else { 407 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); 408 } 409 return; 410 } 411 412 /** 413 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) 414 * @phba: HBA which owns the pool to return to 415 * @virt: mbuf to free 416 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed 417 * 418 * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if 419 * it is below its max_count, frees the mbuf otherwise. 420 * 421 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 422 * 423 * Returns: None 424 **/ 425 void 426 lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 427 { 428 unsigned long iflags; 429 430 spin_lock_irqsave(&phba->hbalock, iflags); 431 __lpfc_mbuf_free(phba, virt, dma); 432 spin_unlock_irqrestore(&phba->hbalock, iflags); 433 return; 434 } 435 436 /** 437 * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the 438 * lpfc_sg_dma_buf_pool PCI pool 439 * @phba: HBA which owns the pool to allocate from 440 * @mem_flags: indicates if this is a priority (MEM_PRI) allocation 441 * @handle: used to return the DMA-mapped address of the nvmet_buf 442 * 443 * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool 444 * PCI pool. Allocates from generic dma_pool_alloc function. 445 * 446 * Returns: 447 * pointer to the allocated nvmet_buf on success 448 * NULL on failure 449 **/ 450 void * 451 lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 452 { 453 void *ret; 454 455 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); 456 return ret; 457 } 458 459 /** 460 * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool 461 * PCI pool 462 * @phba: HBA which owns the pool to return to 463 * @virt: nvmet_buf to free 464 * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed 465 * 466 * Returns: None 467 **/ 468 void 469 lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) 470 { 471 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); 472 } 473 474 /** 475 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 476 * @phba: HBA to allocate HBQ buffer for 477 * 478 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI 479 * pool along a non-DMA-mapped container for it. 480 * 481 * Notes: Not interrupt-safe. Must be called with no locks held. 482 * 483 * Returns: 484 * pointer to HBQ on success 485 * NULL on failure 486 **/ 487 struct hbq_dmabuf * 488 lpfc_els_hbq_alloc(struct lpfc_hba *phba) 489 { 490 struct hbq_dmabuf *hbqbp; 491 492 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 493 if (!hbqbp) 494 return NULL; 495 496 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 497 &hbqbp->dbuf.phys); 498 if (!hbqbp->dbuf.virt) { 499 kfree(hbqbp); 500 return NULL; 501 } 502 hbqbp->total_size = LPFC_BPL_SIZE; 503 return hbqbp; 504 } 505 506 /** 507 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 508 * @phba: HBA buffer was allocated for 509 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 510 * 511 * Description: Frees both the container and the DMA-mapped buffer returned by 512 * lpfc_els_hbq_alloc. 513 * 514 * Notes: Can be called with or without locks held. 515 * 516 * Returns: None 517 **/ 518 void 519 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 520 { 521 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 522 kfree(hbqbp); 523 return; 524 } 525 526 /** 527 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer 528 * @phba: HBA to allocate a receive buffer for 529 * 530 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI 531 * pool along a non-DMA-mapped container for it. 532 * 533 * Notes: Not interrupt-safe. Must be called with no locks held. 534 * 535 * Returns: 536 * pointer to HBQ on success 537 * NULL on failure 538 **/ 539 struct hbq_dmabuf * 540 lpfc_sli4_rb_alloc(struct lpfc_hba *phba) 541 { 542 struct hbq_dmabuf *dma_buf; 543 544 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 545 if (!dma_buf) 546 return NULL; 547 548 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 549 &dma_buf->hbuf.phys); 550 if (!dma_buf->hbuf.virt) { 551 kfree(dma_buf); 552 return NULL; 553 } 554 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 555 &dma_buf->dbuf.phys); 556 if (!dma_buf->dbuf.virt) { 557 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 558 dma_buf->hbuf.phys); 559 kfree(dma_buf); 560 return NULL; 561 } 562 dma_buf->total_size = LPFC_DATA_BUF_SIZE; 563 return dma_buf; 564 } 565 566 /** 567 * lpfc_sli4_rb_free - Frees a receive buffer 568 * @phba: HBA buffer was allocated for 569 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc 570 * 571 * Description: Frees both the container and the DMA-mapped buffers returned by 572 * lpfc_sli4_rb_alloc. 573 * 574 * Notes: Can be called with or without locks held. 575 * 576 * Returns: None 577 **/ 578 void 579 lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) 580 { 581 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 582 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); 583 kfree(dmab); 584 } 585 586 /** 587 * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer 588 * @phba: HBA to allocate a receive buffer for 589 * 590 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI 591 * pool along a non-DMA-mapped container for it. 592 * 593 * Notes: Not interrupt-safe. Must be called with no locks held. 594 * 595 * Returns: 596 * pointer to HBQ on success 597 * NULL on failure 598 **/ 599 struct rqb_dmabuf * 600 lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) 601 { 602 struct rqb_dmabuf *dma_buf; 603 604 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); 605 if (!dma_buf) 606 return NULL; 607 608 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 609 &dma_buf->hbuf.phys); 610 if (!dma_buf->hbuf.virt) { 611 kfree(dma_buf); 612 return NULL; 613 } 614 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, 615 GFP_KERNEL, &dma_buf->dbuf.phys); 616 if (!dma_buf->dbuf.virt) { 617 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 618 dma_buf->hbuf.phys); 619 kfree(dma_buf); 620 return NULL; 621 } 622 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; 623 return dma_buf; 624 } 625 626 /** 627 * lpfc_sli4_nvmet_free - Frees a receive buffer 628 * @phba: HBA buffer was allocated for 629 * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc 630 * 631 * Description: Frees both the container and the DMA-mapped buffers returned by 632 * lpfc_sli4_nvmet_alloc. 633 * 634 * Notes: Can be called with or without locks held. 635 * 636 * Returns: None 637 **/ 638 void 639 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) 640 { 641 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 642 dma_pool_free(phba->lpfc_nvmet_drb_pool, 643 dmab->dbuf.virt, dmab->dbuf.phys); 644 kfree(dmab); 645 } 646 647 /** 648 * lpfc_in_buf_free - Free a DMA buffer 649 * @phba: HBA buffer is associated with 650 * @mp: Buffer to free 651 * 652 * Description: Frees the given DMA buffer in the appropriate way given if the 653 * HBA is running in SLI3 mode with HBQs enabled. 654 * 655 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 656 * 657 * Returns: None 658 **/ 659 void 660 lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 661 { 662 struct hbq_dmabuf *hbq_entry; 663 unsigned long flags; 664 665 if (!mp) 666 return; 667 668 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 669 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); 670 /* Check whether HBQ is still in use */ 671 spin_lock_irqsave(&phba->hbalock, flags); 672 if (!phba->hbq_in_use) { 673 spin_unlock_irqrestore(&phba->hbalock, flags); 674 return; 675 } 676 list_del(&hbq_entry->dbuf.list); 677 if (hbq_entry->tag == -1) { 678 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 679 (phba, hbq_entry); 680 } else { 681 lpfc_sli_free_hbq(phba, hbq_entry); 682 } 683 spin_unlock_irqrestore(&phba->hbalock, flags); 684 } else { 685 lpfc_mbuf_free(phba, mp->virt, mp->phys); 686 kfree(mp); 687 } 688 return; 689 } 690 691 /** 692 * lpfc_rq_buf_free - Free a RQ DMA buffer 693 * @phba: HBA buffer is associated with 694 * @mp: Buffer to free 695 * 696 * Description: Frees the given DMA buffer in the appropriate way given by 697 * reposting it to its associated RQ so it can be reused. 698 * 699 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 700 * 701 * Returns: None 702 **/ 703 void 704 lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 705 { 706 struct lpfc_rqb *rqbp; 707 struct lpfc_rqe hrqe; 708 struct lpfc_rqe drqe; 709 struct rqb_dmabuf *rqb_entry; 710 unsigned long flags; 711 int rc; 712 713 if (!mp) 714 return; 715 716 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); 717 rqbp = rqb_entry->hrq->rqbp; 718 719 spin_lock_irqsave(&phba->hbalock, flags); 720 list_del(&rqb_entry->hbuf.list); 721 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); 722 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); 723 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); 724 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); 725 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 726 if (rc < 0) { 727 (rqbp->rqb_free_buffer)(phba, rqb_entry); 728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 729 "6409 Cannot post to HRQ %d: %x %x %x " 730 "DRQ %x %x\n", 731 rqb_entry->hrq->queue_id, 732 rqb_entry->hrq->host_index, 733 rqb_entry->hrq->hba_index, 734 rqb_entry->hrq->entry_count, 735 rqb_entry->drq->host_index, 736 rqb_entry->drq->hba_index); 737 } else { 738 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 739 rqbp->buffer_count++; 740 } 741 742 spin_unlock_irqrestore(&phba->hbalock, flags); 743 } 744