1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/errno.h> 36 #include <linux/export.h> 37 #include <linux/slab.h> 38 #include <linux/kernel.h> 39 #include <linux/vmalloc.h> 40 41 #include <linux/mlx4/cmd.h> 42 43 #include "mlx4.h" 44 #include "icm.h" 45 46 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) 47 { 48 int o; 49 int m; 50 u32 seg; 51 52 spin_lock(&buddy->lock); 53 54 for (o = order; o <= buddy->max_order; ++o) 55 if (buddy->num_free[o]) { 56 m = 1 << (buddy->max_order - o); 57 seg = find_first_bit(buddy->bits[o], m); 58 if (seg < m) 59 goto found; 60 } 61 62 spin_unlock(&buddy->lock); 63 return -1; 64 65 found: 66 clear_bit(seg, buddy->bits[o]); 67 --buddy->num_free[o]; 68 69 while (o > order) { 70 --o; 71 seg <<= 1; 72 set_bit(seg ^ 1, buddy->bits[o]); 73 ++buddy->num_free[o]; 74 } 75 76 spin_unlock(&buddy->lock); 77 78 seg <<= order; 79 80 return seg; 81 } 82 83 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) 84 { 85 seg >>= order; 86 87 spin_lock(&buddy->lock); 88 89 while (test_bit(seg ^ 1, buddy->bits[order])) { 90 clear_bit(seg ^ 1, buddy->bits[order]); 91 --buddy->num_free[order]; 92 seg >>= 1; 93 ++order; 94 } 95 96 set_bit(seg, buddy->bits[order]); 97 ++buddy->num_free[order]; 98 99 spin_unlock(&buddy->lock); 100 } 101 102 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) 103 { 104 int i, s; 105 106 buddy->max_order = max_order; 107 spin_lock_init(&buddy->lock); 108 109 buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *), 110 GFP_KERNEL); 111 buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free), 112 GFP_KERNEL); 113 if (!buddy->bits || !buddy->num_free) 114 goto err_out; 115 116 for (i = 0; i <= buddy->max_order; ++i) { 117 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 118 buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); 119 if (!buddy->bits[i]) 120 goto err_out_free; 121 } 122 123 set_bit(0, buddy->bits[buddy->max_order]); 124 buddy->num_free[buddy->max_order] = 1; 125 126 return 0; 127 128 err_out_free: 129 for (i = 0; i <= buddy->max_order; ++i) 130 kvfree(buddy->bits[i]); 131 132 err_out: 133 kfree(buddy->bits); 134 kfree(buddy->num_free); 135 136 return -ENOMEM; 137 } 138 139 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) 140 { 141 int i; 142 143 for (i = 0; i <= buddy->max_order; ++i) 144 kvfree(buddy->bits[i]); 145 146 kfree(buddy->bits); 147 kfree(buddy->num_free); 148 } 149 150 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 151 { 152 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 153 u32 seg; 154 int seg_order; 155 u32 offset; 156 157 seg_order = max_t(int, order - log_mtts_per_seg, 0); 158 159 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); 160 if (seg == -1) 161 return -1; 162 163 offset = seg * (1 << log_mtts_per_seg); 164 165 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, 166 offset + (1 << order) - 1)) { 167 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); 168 return -1; 169 } 170 171 return offset; 172 } 173 174 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 175 { 176 u64 in_param = 0; 177 u64 out_param; 178 int err; 179 180 if (mlx4_is_mfunc(dev)) { 181 set_param_l(&in_param, order); 182 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, 183 RES_OP_RESERVE_AND_MAP, 184 MLX4_CMD_ALLOC_RES, 185 MLX4_CMD_TIME_CLASS_A, 186 MLX4_CMD_WRAPPED); 187 if (err) 188 return -1; 189 return get_param_l(&out_param); 190 } 191 return __mlx4_alloc_mtt_range(dev, order); 192 } 193 194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 195 struct mlx4_mtt *mtt) 196 { 197 int i; 198 199 if (!npages) { 200 mtt->order = -1; 201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; 202 return 0; 203 } else 204 mtt->page_shift = page_shift; 205 206 for (mtt->order = 0, i = 1; i < npages; i <<= 1) 207 ++mtt->order; 208 209 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); 210 if (mtt->offset == -1) 211 return -ENOMEM; 212 213 return 0; 214 } 215 EXPORT_SYMBOL_GPL(mlx4_mtt_init); 216 217 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 218 { 219 u32 first_seg; 220 int seg_order; 221 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 222 223 seg_order = max_t(int, order - log_mtts_per_seg, 0); 224 first_seg = offset / (1 << log_mtts_per_seg); 225 226 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); 227 mlx4_table_put_range(dev, &mr_table->mtt_table, offset, 228 offset + (1 << order) - 1); 229 } 230 231 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 232 { 233 u64 in_param = 0; 234 int err; 235 236 if (mlx4_is_mfunc(dev)) { 237 set_param_l(&in_param, offset); 238 set_param_h(&in_param, order); 239 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, 240 MLX4_CMD_FREE_RES, 241 MLX4_CMD_TIME_CLASS_A, 242 MLX4_CMD_WRAPPED); 243 if (err) 244 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n", 245 offset, order); 246 return; 247 } 248 __mlx4_free_mtt_range(dev, offset, order); 249 } 250 251 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 252 { 253 if (mtt->order < 0) 254 return; 255 256 mlx4_free_mtt_range(dev, mtt->offset, mtt->order); 257 } 258 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); 259 260 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 261 { 262 return (u64) mtt->offset * dev->caps.mtt_entry_sz; 263 } 264 EXPORT_SYMBOL_GPL(mlx4_mtt_addr); 265 266 static u32 hw_index_to_key(u32 ind) 267 { 268 return (ind >> 24) | (ind << 8); 269 } 270 271 static u32 key_to_hw_index(u32 key) 272 { 273 return (key << 24) | (key >> 8); 274 } 275 276 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 277 int mpt_index) 278 { 279 return mlx4_cmd(dev, mailbox->dma, mpt_index, 280 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, 281 MLX4_CMD_WRAPPED); 282 } 283 284 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 285 int mpt_index) 286 { 287 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 288 !mailbox, MLX4_CMD_HW2SW_MPT, 289 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 290 } 291 292 /* Must protect against concurrent access */ 293 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, 294 struct mlx4_mpt_entry ***mpt_entry) 295 { 296 int err; 297 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); 298 struct mlx4_cmd_mailbox *mailbox = NULL; 299 300 if (mmr->enabled != MLX4_MPT_EN_HW) 301 return -EINVAL; 302 303 err = mlx4_HW2SW_MPT(dev, NULL, key); 304 if (err) { 305 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); 306 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); 307 return err; 308 } 309 310 mmr->enabled = MLX4_MPT_EN_SW; 311 312 if (!mlx4_is_mfunc(dev)) { 313 **mpt_entry = mlx4_table_find( 314 &mlx4_priv(dev)->mr_table.dmpt_table, 315 key, NULL); 316 } else { 317 mailbox = mlx4_alloc_cmd_mailbox(dev); 318 if (IS_ERR(mailbox)) 319 return PTR_ERR(mailbox); 320 321 err = mlx4_cmd_box(dev, 0, mailbox->dma, key, 322 0, MLX4_CMD_QUERY_MPT, 323 MLX4_CMD_TIME_CLASS_B, 324 MLX4_CMD_WRAPPED); 325 if (err) 326 goto free_mailbox; 327 328 *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf; 329 } 330 331 if (!(*mpt_entry) || !(**mpt_entry)) { 332 err = -ENOMEM; 333 goto free_mailbox; 334 } 335 336 return 0; 337 338 free_mailbox: 339 mlx4_free_cmd_mailbox(dev, mailbox); 340 return err; 341 } 342 EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt); 343 344 int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, 345 struct mlx4_mpt_entry **mpt_entry) 346 { 347 int err; 348 349 if (!mlx4_is_mfunc(dev)) { 350 /* Make sure any changes to this entry are flushed */ 351 wmb(); 352 353 *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW; 354 355 /* Make sure the new status is written */ 356 wmb(); 357 358 err = mlx4_SYNC_TPT(dev); 359 } else { 360 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); 361 362 struct mlx4_cmd_mailbox *mailbox = 363 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, 364 buf); 365 366 err = mlx4_SW2HW_MPT(dev, mailbox, key); 367 } 368 369 if (!err) { 370 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; 371 mmr->enabled = MLX4_MPT_EN_HW; 372 } 373 return err; 374 } 375 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); 376 377 void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, 378 struct mlx4_mpt_entry **mpt_entry) 379 { 380 if (mlx4_is_mfunc(dev)) { 381 struct mlx4_cmd_mailbox *mailbox = 382 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, 383 buf); 384 mlx4_free_cmd_mailbox(dev, mailbox); 385 } 386 } 387 EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); 388 389 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, 390 u32 pdn) 391 { 392 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; 393 /* The wrapper function will put the slave's id here */ 394 if (mlx4_is_mfunc(dev)) 395 pd_flags &= ~MLX4_MPT_PD_VF_MASK; 396 397 mpt_entry->pd_flags = cpu_to_be32(pd_flags | 398 (pdn & MLX4_MPT_PD_MASK) 399 | MLX4_MPT_PD_FLAG_EN_INV); 400 return 0; 401 } 402 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd); 403 404 int mlx4_mr_hw_change_access(struct mlx4_dev *dev, 405 struct mlx4_mpt_entry *mpt_entry, 406 u32 access) 407 { 408 u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) | 409 (access & MLX4_PERM_MASK); 410 411 mpt_entry->flags = cpu_to_be32(flags); 412 return 0; 413 } 414 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access); 415 416 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, 417 u64 iova, u64 size, u32 access, int npages, 418 int page_shift, struct mlx4_mr *mr) 419 { 420 mr->iova = iova; 421 mr->size = size; 422 mr->pd = pd; 423 mr->access = access; 424 mr->enabled = MLX4_MPT_DISABLED; 425 mr->key = hw_index_to_key(mridx); 426 427 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 428 } 429 430 static int mlx4_WRITE_MTT(struct mlx4_dev *dev, 431 struct mlx4_cmd_mailbox *mailbox, 432 int num_entries) 433 { 434 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, 435 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 436 } 437 438 int __mlx4_mpt_reserve(struct mlx4_dev *dev) 439 { 440 struct mlx4_priv *priv = mlx4_priv(dev); 441 442 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 443 } 444 445 static int mlx4_mpt_reserve(struct mlx4_dev *dev) 446 { 447 u64 out_param; 448 449 if (mlx4_is_mfunc(dev)) { 450 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, 451 MLX4_CMD_ALLOC_RES, 452 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 453 return -1; 454 return get_param_l(&out_param); 455 } 456 return __mlx4_mpt_reserve(dev); 457 } 458 459 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) 460 { 461 struct mlx4_priv *priv = mlx4_priv(dev); 462 463 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR); 464 } 465 466 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) 467 { 468 u64 in_param = 0; 469 470 if (mlx4_is_mfunc(dev)) { 471 set_param_l(&in_param, index); 472 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, 473 MLX4_CMD_FREE_RES, 474 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 475 mlx4_warn(dev, "Failed to release mr index:%d\n", 476 index); 477 return; 478 } 479 __mlx4_mpt_release(dev, index); 480 } 481 482 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) 483 { 484 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 485 486 return mlx4_table_get(dev, &mr_table->dmpt_table, index); 487 } 488 489 static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) 490 { 491 u64 param = 0; 492 493 if (mlx4_is_mfunc(dev)) { 494 set_param_l(¶m, index); 495 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, 496 MLX4_CMD_ALLOC_RES, 497 MLX4_CMD_TIME_CLASS_A, 498 MLX4_CMD_WRAPPED); 499 } 500 return __mlx4_mpt_alloc_icm(dev, index); 501 } 502 503 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 504 { 505 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 506 507 mlx4_table_put(dev, &mr_table->dmpt_table, index); 508 } 509 510 static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 511 { 512 u64 in_param = 0; 513 514 if (mlx4_is_mfunc(dev)) { 515 set_param_l(&in_param, index); 516 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, 517 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 518 MLX4_CMD_WRAPPED)) 519 mlx4_warn(dev, "Failed to free icm of mr index:%d\n", 520 index); 521 return; 522 } 523 return __mlx4_mpt_free_icm(dev, index); 524 } 525 526 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 527 int npages, int page_shift, struct mlx4_mr *mr) 528 { 529 u32 index; 530 int err; 531 532 index = mlx4_mpt_reserve(dev); 533 if (index == -1) 534 return -ENOMEM; 535 536 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, 537 access, npages, page_shift, mr); 538 if (err) 539 mlx4_mpt_release(dev, index); 540 541 return err; 542 } 543 EXPORT_SYMBOL_GPL(mlx4_mr_alloc); 544 545 static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) 546 { 547 int err; 548 549 if (mr->enabled == MLX4_MPT_EN_HW) { 550 err = mlx4_HW2SW_MPT(dev, NULL, 551 key_to_hw_index(mr->key) & 552 (dev->caps.num_mpts - 1)); 553 if (err) { 554 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n", 555 err); 556 return err; 557 } 558 559 mr->enabled = MLX4_MPT_EN_SW; 560 } 561 mlx4_mtt_cleanup(dev, &mr->mtt); 562 563 return 0; 564 } 565 566 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 567 { 568 int ret; 569 570 ret = mlx4_mr_free_reserved(dev, mr); 571 if (ret) 572 return ret; 573 if (mr->enabled) 574 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); 575 mlx4_mpt_release(dev, key_to_hw_index(mr->key)); 576 577 return 0; 578 } 579 EXPORT_SYMBOL_GPL(mlx4_mr_free); 580 581 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) 582 { 583 mlx4_mtt_cleanup(dev, &mr->mtt); 584 mr->mtt.order = -1; 585 } 586 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); 587 588 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, 589 u64 iova, u64 size, int npages, 590 int page_shift, struct mlx4_mpt_entry *mpt_entry) 591 { 592 int err; 593 594 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 595 if (err) 596 return err; 597 598 mpt_entry->start = cpu_to_be64(iova); 599 mpt_entry->length = cpu_to_be64(size); 600 mpt_entry->entity_size = cpu_to_be32(page_shift); 601 mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE | 602 MLX4_MPT_FLAG_SW_OWNS)); 603 if (mr->mtt.order < 0) { 604 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 605 mpt_entry->mtt_addr = 0; 606 } else { 607 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, 608 &mr->mtt)); 609 if (mr->mtt.page_shift == 0) 610 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 611 } 612 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 613 /* fast register MR in free state */ 614 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 615 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 616 MLX4_MPT_PD_FLAG_RAE); 617 } else { 618 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 619 } 620 mr->enabled = MLX4_MPT_EN_SW; 621 622 return 0; 623 } 624 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write); 625 626 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 627 { 628 struct mlx4_cmd_mailbox *mailbox; 629 struct mlx4_mpt_entry *mpt_entry; 630 int err; 631 632 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key)); 633 if (err) 634 return err; 635 636 mailbox = mlx4_alloc_cmd_mailbox(dev); 637 if (IS_ERR(mailbox)) { 638 err = PTR_ERR(mailbox); 639 goto err_table; 640 } 641 mpt_entry = mailbox->buf; 642 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 643 MLX4_MPT_FLAG_REGION | 644 mr->access); 645 646 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 647 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); 648 mpt_entry->start = cpu_to_be64(mr->iova); 649 mpt_entry->length = cpu_to_be64(mr->size); 650 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 651 652 if (mr->mtt.order < 0) { 653 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 654 mpt_entry->mtt_addr = 0; 655 } else { 656 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, 657 &mr->mtt)); 658 } 659 660 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 661 /* fast register MR in free state */ 662 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 663 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 664 MLX4_MPT_PD_FLAG_RAE); 665 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 666 } else { 667 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 668 } 669 670 err = mlx4_SW2HW_MPT(dev, mailbox, 671 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 672 if (err) { 673 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 674 goto err_cmd; 675 } 676 mr->enabled = MLX4_MPT_EN_HW; 677 678 mlx4_free_cmd_mailbox(dev, mailbox); 679 680 return 0; 681 682 err_cmd: 683 mlx4_free_cmd_mailbox(dev, mailbox); 684 685 err_table: 686 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); 687 return err; 688 } 689 EXPORT_SYMBOL_GPL(mlx4_mr_enable); 690 691 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 692 int start_index, int npages, u64 *page_list) 693 { 694 struct mlx4_priv *priv = mlx4_priv(dev); 695 __be64 *mtts; 696 dma_addr_t dma_handle; 697 int i; 698 699 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + 700 start_index, &dma_handle); 701 702 if (!mtts) 703 return -ENOMEM; 704 705 dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, 706 npages * sizeof(u64), DMA_TO_DEVICE); 707 708 for (i = 0; i < npages; ++i) 709 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 710 711 dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, 712 npages * sizeof(u64), DMA_TO_DEVICE); 713 714 return 0; 715 } 716 717 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 718 int start_index, int npages, u64 *page_list) 719 { 720 int err = 0; 721 int chunk; 722 int mtts_per_page; 723 int max_mtts_first_page; 724 725 /* compute how may mtts fit in the first page */ 726 mtts_per_page = PAGE_SIZE / sizeof(u64); 727 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) 728 % mtts_per_page; 729 730 chunk = min_t(int, max_mtts_first_page, npages); 731 732 while (npages > 0) { 733 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 734 if (err) 735 return err; 736 npages -= chunk; 737 start_index += chunk; 738 page_list += chunk; 739 740 chunk = min_t(int, mtts_per_page, npages); 741 } 742 return err; 743 } 744 745 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 746 int start_index, int npages, u64 *page_list) 747 { 748 struct mlx4_cmd_mailbox *mailbox = NULL; 749 __be64 *inbox = NULL; 750 int chunk; 751 int err = 0; 752 int i; 753 754 if (mtt->order < 0) 755 return -EINVAL; 756 757 if (mlx4_is_mfunc(dev)) { 758 mailbox = mlx4_alloc_cmd_mailbox(dev); 759 if (IS_ERR(mailbox)) 760 return PTR_ERR(mailbox); 761 inbox = mailbox->buf; 762 763 while (npages > 0) { 764 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, 765 npages); 766 inbox[0] = cpu_to_be64(mtt->offset + start_index); 767 inbox[1] = 0; 768 for (i = 0; i < chunk; ++i) 769 inbox[i + 2] = cpu_to_be64(page_list[i] | 770 MLX4_MTT_FLAG_PRESENT); 771 err = mlx4_WRITE_MTT(dev, mailbox, chunk); 772 if (err) { 773 mlx4_free_cmd_mailbox(dev, mailbox); 774 return err; 775 } 776 777 npages -= chunk; 778 start_index += chunk; 779 page_list += chunk; 780 } 781 mlx4_free_cmd_mailbox(dev, mailbox); 782 return err; 783 } 784 785 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); 786 } 787 EXPORT_SYMBOL_GPL(mlx4_write_mtt); 788 789 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 790 struct mlx4_buf *buf) 791 { 792 u64 *page_list; 793 int err; 794 int i; 795 796 page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL); 797 if (!page_list) 798 return -ENOMEM; 799 800 for (i = 0; i < buf->npages; ++i) 801 if (buf->nbufs == 1) 802 page_list[i] = buf->direct.map + (i << buf->page_shift); 803 else 804 page_list[i] = buf->page_list[i].map; 805 806 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 807 808 kfree(page_list); 809 return err; 810 } 811 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); 812 813 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, 814 struct mlx4_mw *mw) 815 { 816 u32 index; 817 818 if ((type == MLX4_MW_TYPE_1 && 819 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || 820 (type == MLX4_MW_TYPE_2 && 821 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) 822 return -EOPNOTSUPP; 823 824 index = mlx4_mpt_reserve(dev); 825 if (index == -1) 826 return -ENOMEM; 827 828 mw->key = hw_index_to_key(index); 829 mw->pd = pd; 830 mw->type = type; 831 mw->enabled = MLX4_MPT_DISABLED; 832 833 return 0; 834 } 835 EXPORT_SYMBOL_GPL(mlx4_mw_alloc); 836 837 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) 838 { 839 struct mlx4_cmd_mailbox *mailbox; 840 struct mlx4_mpt_entry *mpt_entry; 841 int err; 842 843 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); 844 if (err) 845 return err; 846 847 mailbox = mlx4_alloc_cmd_mailbox(dev); 848 if (IS_ERR(mailbox)) { 849 err = PTR_ERR(mailbox); 850 goto err_table; 851 } 852 mpt_entry = mailbox->buf; 853 854 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned 855 * off, thus creating a memory window and not a memory region. 856 */ 857 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); 858 mpt_entry->pd_flags = cpu_to_be32(mw->pd); 859 if (mw->type == MLX4_MW_TYPE_2) { 860 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 861 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); 862 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV); 863 } 864 865 err = mlx4_SW2HW_MPT(dev, mailbox, 866 key_to_hw_index(mw->key) & 867 (dev->caps.num_mpts - 1)); 868 if (err) { 869 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 870 goto err_cmd; 871 } 872 mw->enabled = MLX4_MPT_EN_HW; 873 874 mlx4_free_cmd_mailbox(dev, mailbox); 875 876 return 0; 877 878 err_cmd: 879 mlx4_free_cmd_mailbox(dev, mailbox); 880 881 err_table: 882 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); 883 return err; 884 } 885 EXPORT_SYMBOL_GPL(mlx4_mw_enable); 886 887 void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) 888 { 889 int err; 890 891 if (mw->enabled == MLX4_MPT_EN_HW) { 892 err = mlx4_HW2SW_MPT(dev, NULL, 893 key_to_hw_index(mw->key) & 894 (dev->caps.num_mpts - 1)); 895 if (err) 896 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); 897 898 mw->enabled = MLX4_MPT_EN_SW; 899 } 900 if (mw->enabled) 901 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); 902 mlx4_mpt_release(dev, key_to_hw_index(mw->key)); 903 } 904 EXPORT_SYMBOL_GPL(mlx4_mw_free); 905 906 int mlx4_init_mr_table(struct mlx4_dev *dev) 907 { 908 struct mlx4_priv *priv = mlx4_priv(dev); 909 struct mlx4_mr_table *mr_table = &priv->mr_table; 910 int err; 911 912 /* Nothing to do for slaves - all MR handling is forwarded 913 * to the master */ 914 if (mlx4_is_slave(dev)) 915 return 0; 916 917 if (!is_power_of_2(dev->caps.num_mpts)) 918 return -EINVAL; 919 920 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 921 ~0, dev->caps.reserved_mrws, 0); 922 if (err) 923 return err; 924 925 err = mlx4_buddy_init(&mr_table->mtt_buddy, 926 ilog2((u32)dev->caps.num_mtts / 927 (1 << log_mtts_per_seg))); 928 if (err) 929 goto err_buddy; 930 931 if (dev->caps.reserved_mtts) { 932 priv->reserved_mtts = 933 mlx4_alloc_mtt_range(dev, 934 fls(dev->caps.reserved_mtts - 1)); 935 if (priv->reserved_mtts < 0) { 936 mlx4_warn(dev, "MTT table of order %u is too small\n", 937 mr_table->mtt_buddy.max_order); 938 err = -ENOMEM; 939 goto err_reserve_mtts; 940 } 941 } 942 943 return 0; 944 945 err_reserve_mtts: 946 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 947 948 err_buddy: 949 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 950 951 return err; 952 } 953 954 void mlx4_cleanup_mr_table(struct mlx4_dev *dev) 955 { 956 struct mlx4_priv *priv = mlx4_priv(dev); 957 struct mlx4_mr_table *mr_table = &priv->mr_table; 958 959 if (mlx4_is_slave(dev)) 960 return; 961 if (priv->reserved_mtts >= 0) 962 mlx4_free_mtt_range(dev, priv->reserved_mtts, 963 fls(dev->caps.reserved_mtts - 1)); 964 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 965 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 966 } 967 968 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, 969 int npages, u64 iova) 970 { 971 int i, page_mask; 972 973 if (npages > fmr->max_pages) 974 return -EINVAL; 975 976 page_mask = (1 << fmr->page_shift) - 1; 977 978 /* We are getting page lists, so va must be page aligned. */ 979 if (iova & page_mask) 980 return -EINVAL; 981 982 /* Trust the user not to pass misaligned data in page_list */ 983 if (0) 984 for (i = 0; i < npages; ++i) { 985 if (page_list[i] & ~page_mask) 986 return -EINVAL; 987 } 988 989 if (fmr->maps >= fmr->max_maps) 990 return -EINVAL; 991 992 return 0; 993 } 994 995 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 996 int npages, u64 iova, u32 *lkey, u32 *rkey) 997 { 998 u32 key; 999 int i, err; 1000 1001 err = mlx4_check_fmr(fmr, page_list, npages, iova); 1002 if (err) 1003 return err; 1004 1005 ++fmr->maps; 1006 1007 key = key_to_hw_index(fmr->mr.key); 1008 key += dev->caps.num_mpts; 1009 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); 1010 1011 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 1012 1013 /* Make sure MPT status is visible before writing MTT entries */ 1014 wmb(); 1015 1016 dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle, 1017 npages * sizeof(u64), DMA_TO_DEVICE); 1018 1019 for (i = 0; i < npages; ++i) 1020 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 1021 1022 dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle, 1023 npages * sizeof(u64), DMA_TO_DEVICE); 1024 1025 fmr->mpt->key = cpu_to_be32(key); 1026 fmr->mpt->lkey = cpu_to_be32(key); 1027 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); 1028 fmr->mpt->start = cpu_to_be64(iova); 1029 1030 /* Make MTT entries are visible before setting MPT status */ 1031 wmb(); 1032 1033 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; 1034 1035 /* Make sure MPT status is visible before consumer can use FMR */ 1036 wmb(); 1037 1038 return 0; 1039 } 1040 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); 1041 1042 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 1043 int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 1044 { 1045 struct mlx4_priv *priv = mlx4_priv(dev); 1046 int err = -ENOMEM; 1047 1048 if (max_maps > dev->caps.max_fmr_maps) 1049 return -EINVAL; 1050 1051 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 1052 return -EINVAL; 1053 1054 /* All MTTs must fit in the same page */ 1055 if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE) 1056 return -EINVAL; 1057 1058 fmr->page_shift = page_shift; 1059 fmr->max_pages = max_pages; 1060 fmr->max_maps = max_maps; 1061 fmr->maps = 0; 1062 1063 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, 1064 page_shift, &fmr->mr); 1065 if (err) 1066 return err; 1067 1068 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 1069 fmr->mr.mtt.offset, 1070 &fmr->dma_handle); 1071 1072 if (!fmr->mtts) { 1073 err = -ENOMEM; 1074 goto err_free; 1075 } 1076 1077 return 0; 1078 1079 err_free: 1080 (void) mlx4_mr_free(dev, &fmr->mr); 1081 return err; 1082 } 1083 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); 1084 1085 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 1086 { 1087 struct mlx4_priv *priv = mlx4_priv(dev); 1088 int err; 1089 1090 err = mlx4_mr_enable(dev, &fmr->mr); 1091 if (err) 1092 return err; 1093 1094 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, 1095 key_to_hw_index(fmr->mr.key), NULL); 1096 if (!fmr->mpt) 1097 return -ENOMEM; 1098 1099 return 0; 1100 } 1101 EXPORT_SYMBOL_GPL(mlx4_fmr_enable); 1102 1103 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 1104 u32 *lkey, u32 *rkey) 1105 { 1106 if (!fmr->maps) 1107 return; 1108 1109 /* To unmap: it is sufficient to take back ownership from HW */ 1110 *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; 1111 1112 /* Make sure MPT status is visible */ 1113 wmb(); 1114 1115 fmr->maps = 0; 1116 } 1117 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); 1118 1119 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 1120 { 1121 int ret; 1122 1123 if (fmr->maps) 1124 return -EBUSY; 1125 if (fmr->mr.enabled == MLX4_MPT_EN_HW) { 1126 /* In case of FMR was enabled and unmapped 1127 * make sure to give ownership of MPT back to HW 1128 * so HW2SW_MPT command will success. 1129 */ 1130 *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; 1131 /* Make sure MPT status is visible before changing MPT fields */ 1132 wmb(); 1133 fmr->mpt->length = 0; 1134 fmr->mpt->start = 0; 1135 /* Make sure MPT data is visible after changing MPT status */ 1136 wmb(); 1137 *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW; 1138 /* make sure MPT status is visible */ 1139 wmb(); 1140 } 1141 1142 ret = mlx4_mr_free(dev, &fmr->mr); 1143 if (ret) 1144 return ret; 1145 fmr->mr.enabled = MLX4_MPT_DISABLED; 1146 1147 return 0; 1148 } 1149 EXPORT_SYMBOL_GPL(mlx4_fmr_free); 1150 1151 int mlx4_SYNC_TPT(struct mlx4_dev *dev) 1152 { 1153 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1154 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1155 } 1156 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 1157