1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/mm.h> 36 #include <linux/scatterlist.h> 37 #include <linux/sched.h> 38 #include <linux/slab.h> 39 40 #include <asm/page.h> 41 42 #include "mthca_memfree.h" 43 #include "mthca_dev.h" 44 #include "mthca_cmd.h" 45 46 /* 47 * We allocate in as big chunks as we can, up to a maximum of 256 KB 48 * per chunk. 49 */ 50 enum { 51 MTHCA_ICM_ALLOC_SIZE = 1 << 18, 52 MTHCA_TABLE_CHUNK_SIZE = 1 << 18 53 }; 54 55 struct mthca_user_db_table { 56 struct mutex mutex; 57 struct { 58 u64 uvirt; 59 struct scatterlist mem; 60 int refcount; 61 } page[0]; 62 }; 63 64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) 65 { 66 int i; 67 68 if (chunk->nsg > 0) 69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, 70 PCI_DMA_BIDIRECTIONAL); 71 72 for (i = 0; i < chunk->npages; ++i) 73 __free_pages(sg_page(&chunk->mem[i]), 74 get_order(chunk->mem[i].length)); 75 } 76 77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) 78 { 79 int i; 80 81 for (i = 0; i < chunk->npages; ++i) { 82 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 83 lowmem_page_address(sg_page(&chunk->mem[i])), 84 sg_dma_address(&chunk->mem[i])); 85 } 86 } 87 88 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent) 89 { 90 struct mthca_icm_chunk *chunk, *tmp; 91 92 if (!icm) 93 return; 94 95 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { 96 if (coherent) 97 mthca_free_icm_coherent(dev, chunk); 98 else 99 mthca_free_icm_pages(dev, chunk); 100 101 kfree(chunk); 102 } 103 104 kfree(icm); 105 } 106 107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 108 { 109 struct page *page; 110 111 /* 112 * Use __GFP_ZERO because buggy firmware assumes ICM pages are 113 * cleared, and subtle failures are seen if they aren't. 114 */ 115 page = alloc_pages(gfp_mask | __GFP_ZERO, order); 116 if (!page) 117 return -ENOMEM; 118 119 sg_set_page(mem, page, PAGE_SIZE << order, 0); 120 return 0; 121 } 122 123 static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 124 int order, gfp_t gfp_mask) 125 { 126 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem), 127 gfp_mask); 128 if (!buf) 129 return -ENOMEM; 130 131 sg_set_buf(mem, buf, PAGE_SIZE << order); 132 BUG_ON(mem->offset); 133 sg_dma_len(mem) = PAGE_SIZE << order; 134 return 0; 135 } 136 137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, 138 gfp_t gfp_mask, int coherent) 139 { 140 struct mthca_icm *icm; 141 struct mthca_icm_chunk *chunk = NULL; 142 int cur_order; 143 int ret; 144 145 /* We use sg_set_buf for coherent allocs, which assumes low memory */ 146 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); 147 148 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 149 if (!icm) 150 return icm; 151 152 icm->refcount = 0; 153 INIT_LIST_HEAD(&icm->chunk_list); 154 155 cur_order = get_order(MTHCA_ICM_ALLOC_SIZE); 156 157 while (npages > 0) { 158 if (!chunk) { 159 chunk = kmalloc(sizeof *chunk, 160 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 161 if (!chunk) 162 goto fail; 163 164 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN); 165 chunk->npages = 0; 166 chunk->nsg = 0; 167 list_add_tail(&chunk->list, &icm->chunk_list); 168 } 169 170 while (1 << cur_order > npages) 171 --cur_order; 172 173 if (coherent) 174 ret = mthca_alloc_icm_coherent(&dev->pdev->dev, 175 &chunk->mem[chunk->npages], 176 cur_order, gfp_mask); 177 else 178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], 179 cur_order, gfp_mask); 180 181 if (!ret) { 182 ++chunk->npages; 183 184 if (coherent) 185 ++chunk->nsg; 186 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) { 187 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 188 chunk->npages, 189 PCI_DMA_BIDIRECTIONAL); 190 191 if (chunk->nsg <= 0) 192 goto fail; 193 } 194 195 if (chunk->npages == MTHCA_ICM_CHUNK_LEN) 196 chunk = NULL; 197 198 npages -= 1 << cur_order; 199 } else { 200 --cur_order; 201 if (cur_order < 0) 202 goto fail; 203 } 204 } 205 206 if (!coherent && chunk) { 207 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 208 chunk->npages, 209 PCI_DMA_BIDIRECTIONAL); 210 211 if (chunk->nsg <= 0) 212 goto fail; 213 } 214 215 return icm; 216 217 fail: 218 mthca_free_icm(dev, icm, coherent); 219 return NULL; 220 } 221 222 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) 223 { 224 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; 225 int ret = 0; 226 u8 status; 227 228 mutex_lock(&table->mutex); 229 230 if (table->icm[i]) { 231 ++table->icm[i]->refcount; 232 goto out; 233 } 234 235 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 236 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 237 __GFP_NOWARN, table->coherent); 238 if (!table->icm[i]) { 239 ret = -ENOMEM; 240 goto out; 241 } 242 243 if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 244 &status) || status) { 245 mthca_free_icm(dev, table->icm[i], table->coherent); 246 table->icm[i] = NULL; 247 ret = -ENOMEM; 248 goto out; 249 } 250 251 ++table->icm[i]->refcount; 252 253 out: 254 mutex_unlock(&table->mutex); 255 return ret; 256 } 257 258 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) 259 { 260 int i; 261 u8 status; 262 263 if (!mthca_is_memfree(dev)) 264 return; 265 266 i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; 267 268 mutex_lock(&table->mutex); 269 270 if (--table->icm[i]->refcount == 0) { 271 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 272 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 273 &status); 274 mthca_free_icm(dev, table->icm[i], table->coherent); 275 table->icm[i] = NULL; 276 } 277 278 mutex_unlock(&table->mutex); 279 } 280 281 void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle) 282 { 283 int idx, offset, dma_offset, i; 284 struct mthca_icm_chunk *chunk; 285 struct mthca_icm *icm; 286 struct page *page = NULL; 287 288 if (!table->lowmem) 289 return NULL; 290 291 mutex_lock(&table->mutex); 292 293 idx = (obj & (table->num_obj - 1)) * table->obj_size; 294 icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE]; 295 dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE; 296 297 if (!icm) 298 goto out; 299 300 list_for_each_entry(chunk, &icm->chunk_list, list) { 301 for (i = 0; i < chunk->npages; ++i) { 302 if (dma_handle && dma_offset >= 0) { 303 if (sg_dma_len(&chunk->mem[i]) > dma_offset) 304 *dma_handle = sg_dma_address(&chunk->mem[i]) + 305 dma_offset; 306 dma_offset -= sg_dma_len(&chunk->mem[i]); 307 } 308 /* DMA mapping can merge pages but not split them, 309 * so if we found the page, dma_handle has already 310 * been assigned to. */ 311 if (chunk->mem[i].length > offset) { 312 page = sg_page(&chunk->mem[i]); 313 goto out; 314 } 315 offset -= chunk->mem[i].length; 316 } 317 } 318 319 out: 320 mutex_unlock(&table->mutex); 321 return page ? lowmem_page_address(page) + offset : NULL; 322 } 323 324 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, 325 int start, int end) 326 { 327 int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size; 328 int i, err; 329 330 for (i = start; i <= end; i += inc) { 331 err = mthca_table_get(dev, table, i); 332 if (err) 333 goto fail; 334 } 335 336 return 0; 337 338 fail: 339 while (i > start) { 340 i -= inc; 341 mthca_table_put(dev, table, i); 342 } 343 344 return err; 345 } 346 347 void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, 348 int start, int end) 349 { 350 int i; 351 352 if (!mthca_is_memfree(dev)) 353 return; 354 355 for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) 356 mthca_table_put(dev, table, i); 357 } 358 359 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, 360 u64 virt, int obj_size, 361 int nobj, int reserved, 362 int use_lowmem, int use_coherent) 363 { 364 struct mthca_icm_table *table; 365 int obj_per_chunk; 366 int num_icm; 367 unsigned chunk_size; 368 int i; 369 u8 status; 370 371 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size; 372 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); 373 374 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); 375 if (!table) 376 return NULL; 377 378 table->virt = virt; 379 table->num_icm = num_icm; 380 table->num_obj = nobj; 381 table->obj_size = obj_size; 382 table->lowmem = use_lowmem; 383 table->coherent = use_coherent; 384 mutex_init(&table->mutex); 385 386 for (i = 0; i < num_icm; ++i) 387 table->icm[i] = NULL; 388 389 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 390 chunk_size = MTHCA_TABLE_CHUNK_SIZE; 391 if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) 392 chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; 393 394 table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, 395 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 396 __GFP_NOWARN, use_coherent); 397 if (!table->icm[i]) 398 goto err; 399 if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE, 400 &status) || status) { 401 mthca_free_icm(dev, table->icm[i], table->coherent); 402 table->icm[i] = NULL; 403 goto err; 404 } 405 406 /* 407 * Add a reference to this ICM chunk so that it never 408 * gets freed (since it contains reserved firmware objects). 409 */ 410 ++table->icm[i]->refcount; 411 } 412 413 return table; 414 415 err: 416 for (i = 0; i < num_icm; ++i) 417 if (table->icm[i]) { 418 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, 419 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 420 &status); 421 mthca_free_icm(dev, table->icm[i], table->coherent); 422 } 423 424 kfree(table); 425 426 return NULL; 427 } 428 429 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) 430 { 431 int i; 432 u8 status; 433 434 for (i = 0; i < table->num_icm; ++i) 435 if (table->icm[i]) { 436 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 437 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 438 &status); 439 mthca_free_icm(dev, table->icm[i], table->coherent); 440 } 441 442 kfree(table); 443 } 444 445 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) 446 { 447 return dev->uar_table.uarc_base + 448 uar->index * dev->uar_table.uarc_size + 449 page * MTHCA_ICM_PAGE_SIZE; 450 } 451 452 int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 453 struct mthca_user_db_table *db_tab, int index, u64 uaddr) 454 { 455 struct page *pages[1]; 456 int ret = 0; 457 u8 status; 458 int i; 459 460 if (!mthca_is_memfree(dev)) 461 return 0; 462 463 if (index < 0 || index > dev->uar_table.uarc_size / 8) 464 return -EINVAL; 465 466 mutex_lock(&db_tab->mutex); 467 468 i = index / MTHCA_DB_REC_PER_PAGE; 469 470 if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) || 471 (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) || 472 (uaddr & 4095)) { 473 ret = -EINVAL; 474 goto out; 475 } 476 477 if (db_tab->page[i].refcount) { 478 ++db_tab->page[i].refcount; 479 goto out; 480 } 481 482 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, 483 pages, NULL); 484 if (ret < 0) 485 goto out; 486 487 sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE, 488 uaddr & ~PAGE_MASK); 489 490 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 491 if (ret < 0) { 492 put_page(pages[0]); 493 goto out; 494 } 495 496 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), 497 mthca_uarc_virt(dev, uar, i), &status); 498 if (!ret && status) 499 ret = -EINVAL; 500 if (ret) { 501 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 502 put_page(sg_page(&db_tab->page[i].mem)); 503 goto out; 504 } 505 506 db_tab->page[i].uvirt = uaddr; 507 db_tab->page[i].refcount = 1; 508 509 out: 510 mutex_unlock(&db_tab->mutex); 511 return ret; 512 } 513 514 void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 515 struct mthca_user_db_table *db_tab, int index) 516 { 517 if (!mthca_is_memfree(dev)) 518 return; 519 520 /* 521 * To make our bookkeeping simpler, we don't unmap DB 522 * pages until we clean up the whole db table. 523 */ 524 525 mutex_lock(&db_tab->mutex); 526 527 --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount; 528 529 mutex_unlock(&db_tab->mutex); 530 } 531 532 struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev) 533 { 534 struct mthca_user_db_table *db_tab; 535 int npages; 536 int i; 537 538 if (!mthca_is_memfree(dev)) 539 return NULL; 540 541 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; 542 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); 543 if (!db_tab) 544 return ERR_PTR(-ENOMEM); 545 546 mutex_init(&db_tab->mutex); 547 for (i = 0; i < npages; ++i) { 548 db_tab->page[i].refcount = 0; 549 db_tab->page[i].uvirt = 0; 550 sg_init_table(&db_tab->page[i].mem, 1); 551 } 552 553 return db_tab; 554 } 555 556 void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, 557 struct mthca_user_db_table *db_tab) 558 { 559 int i; 560 u8 status; 561 562 if (!mthca_is_memfree(dev)) 563 return; 564 565 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) { 566 if (db_tab->page[i].uvirt) { 567 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 568 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 569 put_page(sg_page(&db_tab->page[i].mem)); 570 } 571 } 572 573 kfree(db_tab); 574 } 575 576 int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, 577 u32 qn, __be32 **db) 578 { 579 int group; 580 int start, end, dir; 581 int i, j; 582 struct mthca_db_page *page; 583 int ret = 0; 584 u8 status; 585 586 mutex_lock(&dev->db_tab->mutex); 587 588 switch (type) { 589 case MTHCA_DB_TYPE_CQ_ARM: 590 case MTHCA_DB_TYPE_SQ: 591 group = 0; 592 start = 0; 593 end = dev->db_tab->max_group1; 594 dir = 1; 595 break; 596 597 case MTHCA_DB_TYPE_CQ_SET_CI: 598 case MTHCA_DB_TYPE_RQ: 599 case MTHCA_DB_TYPE_SRQ: 600 group = 1; 601 start = dev->db_tab->npages - 1; 602 end = dev->db_tab->min_group2; 603 dir = -1; 604 break; 605 606 default: 607 ret = -EINVAL; 608 goto out; 609 } 610 611 for (i = start; i != end; i += dir) 612 if (dev->db_tab->page[i].db_rec && 613 !bitmap_full(dev->db_tab->page[i].used, 614 MTHCA_DB_REC_PER_PAGE)) { 615 page = dev->db_tab->page + i; 616 goto found; 617 } 618 619 for (i = start; i != end; i += dir) 620 if (!dev->db_tab->page[i].db_rec) { 621 page = dev->db_tab->page + i; 622 goto alloc; 623 } 624 625 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { 626 ret = -ENOMEM; 627 goto out; 628 } 629 630 if (group == 0) 631 ++dev->db_tab->max_group1; 632 else 633 --dev->db_tab->min_group2; 634 635 page = dev->db_tab->page + end; 636 637 alloc: 638 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 639 &page->mapping, GFP_KERNEL); 640 if (!page->db_rec) { 641 ret = -ENOMEM; 642 goto out; 643 } 644 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE); 645 646 ret = mthca_MAP_ICM_page(dev, page->mapping, 647 mthca_uarc_virt(dev, &dev->driver_uar, i), &status); 648 if (!ret && status) 649 ret = -EINVAL; 650 if (ret) { 651 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 652 page->db_rec, page->mapping); 653 goto out; 654 } 655 656 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); 657 658 found: 659 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); 660 set_bit(j, page->used); 661 662 if (group == 1) 663 j = MTHCA_DB_REC_PER_PAGE - 1 - j; 664 665 ret = i * MTHCA_DB_REC_PER_PAGE + j; 666 667 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); 668 669 *db = (__be32 *) &page->db_rec[j]; 670 671 out: 672 mutex_unlock(&dev->db_tab->mutex); 673 674 return ret; 675 } 676 677 void mthca_free_db(struct mthca_dev *dev, int type, int db_index) 678 { 679 int i, j; 680 struct mthca_db_page *page; 681 u8 status; 682 683 i = db_index / MTHCA_DB_REC_PER_PAGE; 684 j = db_index % MTHCA_DB_REC_PER_PAGE; 685 686 page = dev->db_tab->page + i; 687 688 mutex_lock(&dev->db_tab->mutex); 689 690 page->db_rec[j] = 0; 691 if (i >= dev->db_tab->min_group2) 692 j = MTHCA_DB_REC_PER_PAGE - 1 - j; 693 clear_bit(j, page->used); 694 695 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && 696 i >= dev->db_tab->max_group1 - 1) { 697 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 698 699 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 700 page->db_rec, page->mapping); 701 page->db_rec = NULL; 702 703 if (i == dev->db_tab->max_group1) { 704 --dev->db_tab->max_group1; 705 /* XXX may be able to unmap more pages now */ 706 } 707 if (i == dev->db_tab->min_group2) 708 ++dev->db_tab->min_group2; 709 } 710 711 mutex_unlock(&dev->db_tab->mutex); 712 } 713 714 int mthca_init_db_tab(struct mthca_dev *dev) 715 { 716 int i; 717 718 if (!mthca_is_memfree(dev)) 719 return 0; 720 721 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); 722 if (!dev->db_tab) 723 return -ENOMEM; 724 725 mutex_init(&dev->db_tab->mutex); 726 727 dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; 728 dev->db_tab->max_group1 = 0; 729 dev->db_tab->min_group2 = dev->db_tab->npages - 1; 730 731 dev->db_tab->page = kmalloc(dev->db_tab->npages * 732 sizeof *dev->db_tab->page, 733 GFP_KERNEL); 734 if (!dev->db_tab->page) { 735 kfree(dev->db_tab); 736 return -ENOMEM; 737 } 738 739 for (i = 0; i < dev->db_tab->npages; ++i) 740 dev->db_tab->page[i].db_rec = NULL; 741 742 return 0; 743 } 744 745 void mthca_cleanup_db_tab(struct mthca_dev *dev) 746 { 747 int i; 748 u8 status; 749 750 if (!mthca_is_memfree(dev)) 751 return; 752 753 /* 754 * Because we don't always free our UARC pages when they 755 * become empty to make mthca_free_db() simpler we need to 756 * make a sweep through the doorbell pages and free any 757 * leftover pages now. 758 */ 759 for (i = 0; i < dev->db_tab->npages; ++i) { 760 if (!dev->db_tab->page[i].db_rec) 761 continue; 762 763 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) 764 mthca_warn(dev, "Kernel UARC page %d not empty\n", i); 765 766 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 767 768 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 769 dev->db_tab->page[i].db_rec, 770 dev->db_tab->page[i].mapping); 771 } 772 773 kfree(dev->db_tab->page); 774 kfree(dev->db_tab); 775 } 776