1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/platform_device.h> 35 #include "hns_roce_device.h" 36 #include "hns_roce_hem.h" 37 #include "hns_roce_common.h" 38 39 #define DMA_ADDR_T_SHIFT 12 40 #define BT_BA_SHIFT 32 41 42 #define HEM_INDEX_BUF BIT(0) 43 #define HEM_INDEX_L0 BIT(1) 44 #define HEM_INDEX_L1 BIT(2) 45 struct hns_roce_hem_index { 46 u64 buf; 47 u64 l0; 48 u64 l1; 49 u32 inited; /* indicate which index is available */ 50 }; 51 52 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) 53 { 54 int hop_num = 0; 55 56 switch (type) { 57 case HEM_TYPE_QPC: 58 hop_num = hr_dev->caps.qpc_hop_num; 59 break; 60 case HEM_TYPE_MTPT: 61 hop_num = hr_dev->caps.mpt_hop_num; 62 break; 63 case HEM_TYPE_CQC: 64 hop_num = hr_dev->caps.cqc_hop_num; 65 break; 66 case HEM_TYPE_SRQC: 67 hop_num = hr_dev->caps.srqc_hop_num; 68 break; 69 case HEM_TYPE_SCCC: 70 hop_num = hr_dev->caps.sccc_hop_num; 71 break; 72 case HEM_TYPE_QPC_TIMER: 73 hop_num = hr_dev->caps.qpc_timer_hop_num; 74 break; 75 case HEM_TYPE_CQC_TIMER: 76 hop_num = hr_dev->caps.cqc_timer_hop_num; 77 break; 78 case HEM_TYPE_GMV: 79 hop_num = hr_dev->caps.gmv_hop_num; 80 break; 81 default: 82 return false; 83 } 84 85 return hop_num ? true : false; 86 } 87 88 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx, 89 u32 bt_chunk_num, u64 hem_max_num) 90 { 91 u64 start_idx = round_down(hem_idx, bt_chunk_num); 92 u64 check_max_num = start_idx + bt_chunk_num; 93 u64 i; 94 95 for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++) 96 if (i != hem_idx && hem[i]) 97 return false; 98 99 return true; 100 } 101 102 static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num) 103 { 104 u64 start_idx = round_down(ba_idx, bt_chunk_num); 105 int i; 106 107 for (i = 0; i < bt_chunk_num; i++) 108 if (i != ba_idx && bt[start_idx + i]) 109 return false; 110 111 return true; 112 } 113 114 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num) 115 { 116 if (check_whether_bt_num_3(table_type, hop_num)) 117 return 3; 118 else if (check_whether_bt_num_2(table_type, hop_num)) 119 return 2; 120 else if (check_whether_bt_num_1(table_type, hop_num)) 121 return 1; 122 else 123 return 0; 124 } 125 126 static int get_hem_table_config(struct hns_roce_dev *hr_dev, 127 struct hns_roce_hem_mhop *mhop, 128 u32 type) 129 { 130 struct device *dev = hr_dev->dev; 131 132 switch (type) { 133 case HEM_TYPE_QPC: 134 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz 135 + PAGE_SHIFT); 136 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz 137 + PAGE_SHIFT); 138 mhop->ba_l0_num = hr_dev->caps.qpc_bt_num; 139 mhop->hop_num = hr_dev->caps.qpc_hop_num; 140 break; 141 case HEM_TYPE_MTPT: 142 mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz 143 + PAGE_SHIFT); 144 mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz 145 + PAGE_SHIFT); 146 mhop->ba_l0_num = hr_dev->caps.mpt_bt_num; 147 mhop->hop_num = hr_dev->caps.mpt_hop_num; 148 break; 149 case HEM_TYPE_CQC: 150 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz 151 + PAGE_SHIFT); 152 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz 153 + PAGE_SHIFT); 154 mhop->ba_l0_num = hr_dev->caps.cqc_bt_num; 155 mhop->hop_num = hr_dev->caps.cqc_hop_num; 156 break; 157 case HEM_TYPE_SCCC: 158 mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz 159 + PAGE_SHIFT); 160 mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz 161 + PAGE_SHIFT); 162 mhop->ba_l0_num = hr_dev->caps.sccc_bt_num; 163 mhop->hop_num = hr_dev->caps.sccc_hop_num; 164 break; 165 case HEM_TYPE_QPC_TIMER: 166 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz 167 + PAGE_SHIFT); 168 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz 169 + PAGE_SHIFT); 170 mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num; 171 mhop->hop_num = hr_dev->caps.qpc_timer_hop_num; 172 break; 173 case HEM_TYPE_CQC_TIMER: 174 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz 175 + PAGE_SHIFT); 176 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz 177 + PAGE_SHIFT); 178 mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num; 179 mhop->hop_num = hr_dev->caps.cqc_timer_hop_num; 180 break; 181 case HEM_TYPE_SRQC: 182 mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz 183 + PAGE_SHIFT); 184 mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz 185 + PAGE_SHIFT); 186 mhop->ba_l0_num = hr_dev->caps.srqc_bt_num; 187 mhop->hop_num = hr_dev->caps.srqc_hop_num; 188 break; 189 case HEM_TYPE_GMV: 190 mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz + 191 PAGE_SHIFT); 192 mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz + 193 PAGE_SHIFT); 194 mhop->ba_l0_num = hr_dev->caps.gmv_bt_num; 195 mhop->hop_num = hr_dev->caps.gmv_hop_num; 196 break; 197 default: 198 dev_err(dev, "table %u not support multi-hop addressing!\n", 199 type); 200 return -EINVAL; 201 } 202 203 return 0; 204 } 205 206 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, 207 struct hns_roce_hem_table *table, unsigned long *obj, 208 struct hns_roce_hem_mhop *mhop) 209 { 210 struct device *dev = hr_dev->dev; 211 u32 chunk_ba_num; 212 u32 chunk_size; 213 u32 table_idx; 214 u32 bt_num; 215 216 if (get_hem_table_config(hr_dev, mhop, table->type)) 217 return -EINVAL; 218 219 if (!obj) 220 return 0; 221 222 /* 223 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages. 224 * MTT/CQE alloc hem for bt pages. 225 */ 226 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); 227 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; 228 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : 229 mhop->bt_chunk_size; 230 table_idx = (*obj & (table->num_obj - 1)) / 231 (chunk_size / table->obj_size); 232 switch (bt_num) { 233 case 3: 234 mhop->l2_idx = table_idx & (chunk_ba_num - 1); 235 mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1); 236 mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num; 237 break; 238 case 2: 239 mhop->l1_idx = table_idx & (chunk_ba_num - 1); 240 mhop->l0_idx = table_idx / chunk_ba_num; 241 break; 242 case 1: 243 mhop->l0_idx = table_idx; 244 break; 245 default: 246 dev_err(dev, "table %u not support hop_num = %u!\n", 247 table->type, mhop->hop_num); 248 return -EINVAL; 249 } 250 if (mhop->l0_idx >= mhop->ba_l0_num) 251 mhop->l0_idx %= mhop->ba_l0_num; 252 253 return 0; 254 } 255 256 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, 257 int npages, 258 unsigned long hem_alloc_size, 259 gfp_t gfp_mask) 260 { 261 struct hns_roce_hem_chunk *chunk = NULL; 262 struct hns_roce_hem *hem; 263 struct scatterlist *mem; 264 int order; 265 void *buf; 266 267 WARN_ON(gfp_mask & __GFP_HIGHMEM); 268 269 hem = kmalloc(sizeof(*hem), 270 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 271 if (!hem) 272 return NULL; 273 274 hem->refcount = 0; 275 INIT_LIST_HEAD(&hem->chunk_list); 276 277 order = get_order(hem_alloc_size); 278 279 while (npages > 0) { 280 if (!chunk) { 281 chunk = kmalloc(sizeof(*chunk), 282 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 283 if (!chunk) 284 goto fail; 285 286 sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN); 287 chunk->npages = 0; 288 chunk->nsg = 0; 289 memset(chunk->buf, 0, sizeof(chunk->buf)); 290 list_add_tail(&chunk->list, &hem->chunk_list); 291 } 292 293 while (1 << order > npages) 294 --order; 295 296 /* 297 * Alloc memory one time. If failed, don't alloc small block 298 * memory, directly return fail. 299 */ 300 mem = &chunk->mem[chunk->npages]; 301 buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order, 302 &sg_dma_address(mem), gfp_mask); 303 if (!buf) 304 goto fail; 305 306 chunk->buf[chunk->npages] = buf; 307 sg_dma_len(mem) = PAGE_SIZE << order; 308 309 ++chunk->npages; 310 ++chunk->nsg; 311 npages -= 1 << order; 312 } 313 314 return hem; 315 316 fail: 317 hns_roce_free_hem(hr_dev, hem); 318 return NULL; 319 } 320 321 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem) 322 { 323 struct hns_roce_hem_chunk *chunk, *tmp; 324 int i; 325 326 if (!hem) 327 return; 328 329 list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) { 330 for (i = 0; i < chunk->npages; ++i) 331 dma_free_coherent(hr_dev->dev, 332 sg_dma_len(&chunk->mem[i]), 333 chunk->buf[i], 334 sg_dma_address(&chunk->mem[i])); 335 kfree(chunk); 336 } 337 338 kfree(hem); 339 } 340 341 static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, 342 struct hns_roce_hem_table *table, unsigned long obj) 343 { 344 spinlock_t *lock = &hr_dev->bt_cmd_lock; 345 struct device *dev = hr_dev->dev; 346 struct hns_roce_hem_iter iter; 347 void __iomem *bt_cmd; 348 __le32 bt_cmd_val[2]; 349 __le32 bt_cmd_h = 0; 350 unsigned long flags; 351 __le32 bt_cmd_l; 352 int ret = 0; 353 u64 bt_ba; 354 long end; 355 356 /* Find the HEM(Hardware Entry Memory) entry */ 357 unsigned long i = (obj & (table->num_obj - 1)) / 358 (table->table_chunk_size / table->obj_size); 359 360 switch (table->type) { 361 case HEM_TYPE_QPC: 362 case HEM_TYPE_MTPT: 363 case HEM_TYPE_CQC: 364 case HEM_TYPE_SRQC: 365 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, 366 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); 367 break; 368 default: 369 return ret; 370 } 371 372 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, 373 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); 374 roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); 375 roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); 376 377 /* Currently iter only a chunk */ 378 for (hns_roce_hem_first(table->hem[i], &iter); 379 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { 380 bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT; 381 382 spin_lock_irqsave(lock, flags); 383 384 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; 385 386 end = HW_SYNC_TIMEOUT_MSECS; 387 while (end > 0) { 388 if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) 389 break; 390 391 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); 392 end -= HW_SYNC_SLEEP_TIME_INTERVAL; 393 } 394 395 if (end <= 0) { 396 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); 397 spin_unlock_irqrestore(lock, flags); 398 return -EBUSY; 399 } 400 401 bt_cmd_l = cpu_to_le32(bt_ba); 402 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, 403 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 404 bt_ba >> BT_BA_SHIFT); 405 406 bt_cmd_val[0] = bt_cmd_l; 407 bt_cmd_val[1] = bt_cmd_h; 408 hns_roce_write64_k(bt_cmd_val, 409 hr_dev->reg_base + ROCEE_BT_CMD_L_REG); 410 spin_unlock_irqrestore(lock, flags); 411 } 412 413 return ret; 414 } 415 416 static int calc_hem_config(struct hns_roce_dev *hr_dev, 417 struct hns_roce_hem_table *table, unsigned long obj, 418 struct hns_roce_hem_mhop *mhop, 419 struct hns_roce_hem_index *index) 420 { 421 struct ib_device *ibdev = &hr_dev->ib_dev; 422 unsigned long mhop_obj = obj; 423 u32 l0_idx, l1_idx, l2_idx; 424 u32 chunk_ba_num; 425 u32 bt_num; 426 int ret; 427 428 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop); 429 if (ret) 430 return ret; 431 432 l0_idx = mhop->l0_idx; 433 l1_idx = mhop->l1_idx; 434 l2_idx = mhop->l2_idx; 435 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; 436 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); 437 switch (bt_num) { 438 case 3: 439 index->l1 = l0_idx * chunk_ba_num + l1_idx; 440 index->l0 = l0_idx; 441 index->buf = l0_idx * chunk_ba_num * chunk_ba_num + 442 l1_idx * chunk_ba_num + l2_idx; 443 break; 444 case 2: 445 index->l0 = l0_idx; 446 index->buf = l0_idx * chunk_ba_num + l1_idx; 447 break; 448 case 1: 449 index->buf = l0_idx; 450 break; 451 default: 452 ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n", 453 table->type, mhop->hop_num); 454 return -EINVAL; 455 } 456 457 if (unlikely(index->buf >= table->num_hem)) { 458 ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n", 459 table->type, index->buf, table->num_hem); 460 return -EINVAL; 461 } 462 463 return 0; 464 } 465 466 static void free_mhop_hem(struct hns_roce_dev *hr_dev, 467 struct hns_roce_hem_table *table, 468 struct hns_roce_hem_mhop *mhop, 469 struct hns_roce_hem_index *index) 470 { 471 u32 bt_size = mhop->bt_chunk_size; 472 struct device *dev = hr_dev->dev; 473 474 if (index->inited & HEM_INDEX_BUF) { 475 hns_roce_free_hem(hr_dev, table->hem[index->buf]); 476 table->hem[index->buf] = NULL; 477 } 478 479 if (index->inited & HEM_INDEX_L1) { 480 dma_free_coherent(dev, bt_size, table->bt_l1[index->l1], 481 table->bt_l1_dma_addr[index->l1]); 482 table->bt_l1[index->l1] = NULL; 483 } 484 485 if (index->inited & HEM_INDEX_L0) { 486 dma_free_coherent(dev, bt_size, table->bt_l0[index->l0], 487 table->bt_l0_dma_addr[index->l0]); 488 table->bt_l0[index->l0] = NULL; 489 } 490 } 491 492 static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, 493 struct hns_roce_hem_table *table, 494 struct hns_roce_hem_mhop *mhop, 495 struct hns_roce_hem_index *index) 496 { 497 u32 bt_size = mhop->bt_chunk_size; 498 struct device *dev = hr_dev->dev; 499 struct hns_roce_hem_iter iter; 500 gfp_t flag; 501 u64 bt_ba; 502 u32 size; 503 int ret; 504 505 /* alloc L1 BA's chunk */ 506 if ((check_whether_bt_num_3(table->type, mhop->hop_num) || 507 check_whether_bt_num_2(table->type, mhop->hop_num)) && 508 !table->bt_l0[index->l0]) { 509 table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size, 510 &table->bt_l0_dma_addr[index->l0], 511 GFP_KERNEL); 512 if (!table->bt_l0[index->l0]) { 513 ret = -ENOMEM; 514 goto out; 515 } 516 index->inited |= HEM_INDEX_L0; 517 } 518 519 /* alloc L2 BA's chunk */ 520 if (check_whether_bt_num_3(table->type, mhop->hop_num) && 521 !table->bt_l1[index->l1]) { 522 table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size, 523 &table->bt_l1_dma_addr[index->l1], 524 GFP_KERNEL); 525 if (!table->bt_l1[index->l1]) { 526 ret = -ENOMEM; 527 goto err_alloc_hem; 528 } 529 index->inited |= HEM_INDEX_L1; 530 *(table->bt_l0[index->l0] + mhop->l1_idx) = 531 table->bt_l1_dma_addr[index->l1]; 532 } 533 534 /* 535 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC. 536 * alloc bt space chunk for MTT/CQE. 537 */ 538 size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size; 539 flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN; 540 table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT, 541 size, flag); 542 if (!table->hem[index->buf]) { 543 ret = -ENOMEM; 544 goto err_alloc_hem; 545 } 546 547 index->inited |= HEM_INDEX_BUF; 548 hns_roce_hem_first(table->hem[index->buf], &iter); 549 bt_ba = hns_roce_hem_addr(&iter); 550 if (table->type < HEM_TYPE_MTT) { 551 if (mhop->hop_num == 2) 552 *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba; 553 else if (mhop->hop_num == 1) 554 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba; 555 } else if (mhop->hop_num == 2) { 556 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba; 557 } 558 559 return 0; 560 err_alloc_hem: 561 free_mhop_hem(hr_dev, table, mhop, index); 562 out: 563 return ret; 564 } 565 566 static int set_mhop_hem(struct hns_roce_dev *hr_dev, 567 struct hns_roce_hem_table *table, unsigned long obj, 568 struct hns_roce_hem_mhop *mhop, 569 struct hns_roce_hem_index *index) 570 { 571 struct ib_device *ibdev = &hr_dev->ib_dev; 572 int step_idx; 573 int ret = 0; 574 575 if (index->inited & HEM_INDEX_L0) { 576 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0); 577 if (ret) { 578 ibdev_err(ibdev, "set HEM step 0 failed!\n"); 579 goto out; 580 } 581 } 582 583 if (index->inited & HEM_INDEX_L1) { 584 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1); 585 if (ret) { 586 ibdev_err(ibdev, "set HEM step 1 failed!\n"); 587 goto out; 588 } 589 } 590 591 if (index->inited & HEM_INDEX_BUF) { 592 if (mhop->hop_num == HNS_ROCE_HOP_NUM_0) 593 step_idx = 0; 594 else 595 step_idx = mhop->hop_num; 596 ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx); 597 if (ret) 598 ibdev_err(ibdev, "set HEM step last failed!\n"); 599 } 600 out: 601 return ret; 602 } 603 604 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, 605 struct hns_roce_hem_table *table, 606 unsigned long obj) 607 { 608 struct ib_device *ibdev = &hr_dev->ib_dev; 609 struct hns_roce_hem_index index = {}; 610 struct hns_roce_hem_mhop mhop = {}; 611 int ret; 612 613 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index); 614 if (ret) { 615 ibdev_err(ibdev, "calc hem config failed!\n"); 616 return ret; 617 } 618 619 mutex_lock(&table->mutex); 620 if (table->hem[index.buf]) { 621 ++table->hem[index.buf]->refcount; 622 goto out; 623 } 624 625 ret = alloc_mhop_hem(hr_dev, table, &mhop, &index); 626 if (ret) { 627 ibdev_err(ibdev, "alloc mhop hem failed!\n"); 628 goto out; 629 } 630 631 /* set HEM base address to hardware */ 632 if (table->type < HEM_TYPE_MTT) { 633 ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index); 634 if (ret) { 635 ibdev_err(ibdev, "set HEM address to HW failed!\n"); 636 goto err_alloc; 637 } 638 } 639 640 ++table->hem[index.buf]->refcount; 641 goto out; 642 643 err_alloc: 644 free_mhop_hem(hr_dev, table, &mhop, &index); 645 out: 646 mutex_unlock(&table->mutex); 647 return ret; 648 } 649 650 int hns_roce_table_get(struct hns_roce_dev *hr_dev, 651 struct hns_roce_hem_table *table, unsigned long obj) 652 { 653 struct device *dev = hr_dev->dev; 654 unsigned long i; 655 int ret = 0; 656 657 if (hns_roce_check_whether_mhop(hr_dev, table->type)) 658 return hns_roce_table_mhop_get(hr_dev, table, obj); 659 660 i = (obj & (table->num_obj - 1)) / (table->table_chunk_size / 661 table->obj_size); 662 663 mutex_lock(&table->mutex); 664 665 if (table->hem[i]) { 666 ++table->hem[i]->refcount; 667 goto out; 668 } 669 670 table->hem[i] = hns_roce_alloc_hem(hr_dev, 671 table->table_chunk_size >> PAGE_SHIFT, 672 table->table_chunk_size, 673 (table->lowmem ? GFP_KERNEL : 674 GFP_HIGHUSER) | __GFP_NOWARN); 675 if (!table->hem[i]) { 676 ret = -ENOMEM; 677 goto out; 678 } 679 680 /* Set HEM base address(128K/page, pa) to Hardware */ 681 if (hns_roce_set_hem(hr_dev, table, obj)) { 682 hns_roce_free_hem(hr_dev, table->hem[i]); 683 table->hem[i] = NULL; 684 ret = -ENODEV; 685 dev_err(dev, "set HEM base address to HW failed.\n"); 686 goto out; 687 } 688 689 ++table->hem[i]->refcount; 690 out: 691 mutex_unlock(&table->mutex); 692 return ret; 693 } 694 695 static void clear_mhop_hem(struct hns_roce_dev *hr_dev, 696 struct hns_roce_hem_table *table, unsigned long obj, 697 struct hns_roce_hem_mhop *mhop, 698 struct hns_roce_hem_index *index) 699 { 700 struct ib_device *ibdev = &hr_dev->ib_dev; 701 u32 hop_num = mhop->hop_num; 702 u32 chunk_ba_num; 703 int step_idx; 704 705 index->inited = HEM_INDEX_BUF; 706 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; 707 if (check_whether_bt_num_2(table->type, hop_num)) { 708 if (hns_roce_check_hem_null(table->hem, index->buf, 709 chunk_ba_num, table->num_hem)) 710 index->inited |= HEM_INDEX_L0; 711 } else if (check_whether_bt_num_3(table->type, hop_num)) { 712 if (hns_roce_check_hem_null(table->hem, index->buf, 713 chunk_ba_num, table->num_hem)) { 714 index->inited |= HEM_INDEX_L1; 715 if (hns_roce_check_bt_null(table->bt_l1, index->l1, 716 chunk_ba_num)) 717 index->inited |= HEM_INDEX_L0; 718 } 719 } 720 721 if (table->type < HEM_TYPE_MTT) { 722 if (hop_num == HNS_ROCE_HOP_NUM_0) 723 step_idx = 0; 724 else 725 step_idx = hop_num; 726 727 if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx)) 728 ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num); 729 730 if (index->inited & HEM_INDEX_L1) 731 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) 732 ibdev_warn(ibdev, "failed to clear HEM step 1.\n"); 733 734 if (index->inited & HEM_INDEX_L0) 735 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) 736 ibdev_warn(ibdev, "failed to clear HEM step 0.\n"); 737 } 738 } 739 740 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, 741 struct hns_roce_hem_table *table, 742 unsigned long obj, 743 int check_refcount) 744 { 745 struct ib_device *ibdev = &hr_dev->ib_dev; 746 struct hns_roce_hem_index index = {}; 747 struct hns_roce_hem_mhop mhop = {}; 748 int ret; 749 750 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index); 751 if (ret) { 752 ibdev_err(ibdev, "calc hem config failed!\n"); 753 return; 754 } 755 756 mutex_lock(&table->mutex); 757 if (check_refcount && (--table->hem[index.buf]->refcount > 0)) { 758 mutex_unlock(&table->mutex); 759 return; 760 } 761 762 clear_mhop_hem(hr_dev, table, obj, &mhop, &index); 763 free_mhop_hem(hr_dev, table, &mhop, &index); 764 765 mutex_unlock(&table->mutex); 766 } 767 768 void hns_roce_table_put(struct hns_roce_dev *hr_dev, 769 struct hns_roce_hem_table *table, unsigned long obj) 770 { 771 struct device *dev = hr_dev->dev; 772 unsigned long i; 773 774 if (hns_roce_check_whether_mhop(hr_dev, table->type)) { 775 hns_roce_table_mhop_put(hr_dev, table, obj, 1); 776 return; 777 } 778 779 i = (obj & (table->num_obj - 1)) / 780 (table->table_chunk_size / table->obj_size); 781 782 mutex_lock(&table->mutex); 783 784 if (--table->hem[i]->refcount == 0) { 785 /* Clear HEM base address */ 786 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) 787 dev_warn(dev, "Clear HEM base address failed.\n"); 788 789 hns_roce_free_hem(hr_dev, table->hem[i]); 790 table->hem[i] = NULL; 791 } 792 793 mutex_unlock(&table->mutex); 794 } 795 796 void *hns_roce_table_find(struct hns_roce_dev *hr_dev, 797 struct hns_roce_hem_table *table, 798 unsigned long obj, dma_addr_t *dma_handle) 799 { 800 struct hns_roce_hem_chunk *chunk; 801 struct hns_roce_hem_mhop mhop; 802 struct hns_roce_hem *hem; 803 unsigned long mhop_obj = obj; 804 unsigned long obj_per_chunk; 805 unsigned long idx_offset; 806 int offset, dma_offset; 807 void *addr = NULL; 808 u32 hem_idx = 0; 809 int length; 810 int i, j; 811 812 if (!table->lowmem) 813 return NULL; 814 815 mutex_lock(&table->mutex); 816 817 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) { 818 obj_per_chunk = table->table_chunk_size / table->obj_size; 819 hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk]; 820 idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; 821 dma_offset = offset = idx_offset * table->obj_size; 822 } else { 823 u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ 824 825 if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop)) 826 goto out; 827 /* mtt mhop */ 828 i = mhop.l0_idx; 829 j = mhop.l1_idx; 830 if (mhop.hop_num == 2) 831 hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j; 832 else if (mhop.hop_num == 1 || 833 mhop.hop_num == HNS_ROCE_HOP_NUM_0) 834 hem_idx = i; 835 836 hem = table->hem[hem_idx]; 837 dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size % 838 mhop.bt_chunk_size; 839 if (mhop.hop_num == 2) 840 dma_offset = offset = 0; 841 } 842 843 if (!hem) 844 goto out; 845 846 list_for_each_entry(chunk, &hem->chunk_list, list) { 847 for (i = 0; i < chunk->npages; ++i) { 848 length = sg_dma_len(&chunk->mem[i]); 849 if (dma_handle && dma_offset >= 0) { 850 if (length > (u32)dma_offset) 851 *dma_handle = sg_dma_address( 852 &chunk->mem[i]) + dma_offset; 853 dma_offset -= length; 854 } 855 856 if (length > (u32)offset) { 857 addr = chunk->buf[i] + offset; 858 goto out; 859 } 860 offset -= length; 861 } 862 } 863 864 out: 865 mutex_unlock(&table->mutex); 866 return addr; 867 } 868 869 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, 870 struct hns_roce_hem_table *table, u32 type, 871 unsigned long obj_size, unsigned long nobj, 872 int use_lowmem) 873 { 874 unsigned long obj_per_chunk; 875 unsigned long num_hem; 876 877 if (!hns_roce_check_whether_mhop(hr_dev, type)) { 878 table->table_chunk_size = hr_dev->caps.chunk_sz; 879 obj_per_chunk = table->table_chunk_size / obj_size; 880 num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; 881 882 table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL); 883 if (!table->hem) 884 return -ENOMEM; 885 } else { 886 struct hns_roce_hem_mhop mhop = {}; 887 unsigned long buf_chunk_size; 888 unsigned long bt_chunk_size; 889 unsigned long bt_chunk_num; 890 unsigned long num_bt_l0; 891 u32 hop_num; 892 893 if (get_hem_table_config(hr_dev, &mhop, type)) 894 return -EINVAL; 895 896 buf_chunk_size = mhop.buf_chunk_size; 897 bt_chunk_size = mhop.bt_chunk_size; 898 num_bt_l0 = mhop.ba_l0_num; 899 hop_num = mhop.hop_num; 900 901 obj_per_chunk = buf_chunk_size / obj_size; 902 num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; 903 bt_chunk_num = bt_chunk_size / BA_BYTE_LEN; 904 if (type >= HEM_TYPE_MTT) 905 num_bt_l0 = bt_chunk_num; 906 907 table->hem = kcalloc(num_hem, sizeof(*table->hem), 908 GFP_KERNEL); 909 if (!table->hem) 910 goto err_kcalloc_hem_buf; 911 912 if (check_whether_bt_num_3(type, hop_num)) { 913 unsigned long num_bt_l1; 914 915 num_bt_l1 = (num_hem + bt_chunk_num - 1) / 916 bt_chunk_num; 917 table->bt_l1 = kcalloc(num_bt_l1, 918 sizeof(*table->bt_l1), 919 GFP_KERNEL); 920 if (!table->bt_l1) 921 goto err_kcalloc_bt_l1; 922 923 table->bt_l1_dma_addr = kcalloc(num_bt_l1, 924 sizeof(*table->bt_l1_dma_addr), 925 GFP_KERNEL); 926 927 if (!table->bt_l1_dma_addr) 928 goto err_kcalloc_l1_dma; 929 } 930 931 if (check_whether_bt_num_2(type, hop_num) || 932 check_whether_bt_num_3(type, hop_num)) { 933 table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0), 934 GFP_KERNEL); 935 if (!table->bt_l0) 936 goto err_kcalloc_bt_l0; 937 938 table->bt_l0_dma_addr = kcalloc(num_bt_l0, 939 sizeof(*table->bt_l0_dma_addr), 940 GFP_KERNEL); 941 if (!table->bt_l0_dma_addr) 942 goto err_kcalloc_l0_dma; 943 } 944 } 945 946 table->type = type; 947 table->num_hem = num_hem; 948 table->num_obj = nobj; 949 table->obj_size = obj_size; 950 table->lowmem = use_lowmem; 951 mutex_init(&table->mutex); 952 953 return 0; 954 955 err_kcalloc_l0_dma: 956 kfree(table->bt_l0); 957 table->bt_l0 = NULL; 958 959 err_kcalloc_bt_l0: 960 kfree(table->bt_l1_dma_addr); 961 table->bt_l1_dma_addr = NULL; 962 963 err_kcalloc_l1_dma: 964 kfree(table->bt_l1); 965 table->bt_l1 = NULL; 966 967 err_kcalloc_bt_l1: 968 kfree(table->hem); 969 table->hem = NULL; 970 971 err_kcalloc_hem_buf: 972 return -ENOMEM; 973 } 974 975 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev, 976 struct hns_roce_hem_table *table) 977 { 978 struct hns_roce_hem_mhop mhop; 979 u32 buf_chunk_size; 980 u64 obj; 981 int i; 982 983 if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop)) 984 return; 985 buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size : 986 mhop.bt_chunk_size; 987 988 for (i = 0; i < table->num_hem; ++i) { 989 obj = i * buf_chunk_size / table->obj_size; 990 if (table->hem[i]) 991 hns_roce_table_mhop_put(hr_dev, table, obj, 0); 992 } 993 994 kfree(table->hem); 995 table->hem = NULL; 996 kfree(table->bt_l1); 997 table->bt_l1 = NULL; 998 kfree(table->bt_l1_dma_addr); 999 table->bt_l1_dma_addr = NULL; 1000 kfree(table->bt_l0); 1001 table->bt_l0 = NULL; 1002 kfree(table->bt_l0_dma_addr); 1003 table->bt_l0_dma_addr = NULL; 1004 } 1005 1006 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, 1007 struct hns_roce_hem_table *table) 1008 { 1009 struct device *dev = hr_dev->dev; 1010 unsigned long i; 1011 1012 if (hns_roce_check_whether_mhop(hr_dev, table->type)) { 1013 hns_roce_cleanup_mhop_hem_table(hr_dev, table); 1014 return; 1015 } 1016 1017 for (i = 0; i < table->num_hem; ++i) 1018 if (table->hem[i]) { 1019 if (hr_dev->hw->clear_hem(hr_dev, table, 1020 i * table->table_chunk_size / table->obj_size, 0)) 1021 dev_err(dev, "Clear HEM base address failed.\n"); 1022 1023 hns_roce_free_hem(hr_dev, table->hem[i]); 1024 } 1025 1026 kfree(table->hem); 1027 } 1028 1029 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) 1030 { 1031 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) 1032 hns_roce_cleanup_hem_table(hr_dev, 1033 &hr_dev->srq_table.table); 1034 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); 1035 if (hr_dev->caps.qpc_timer_entry_sz) 1036 hns_roce_cleanup_hem_table(hr_dev, 1037 &hr_dev->qpc_timer_table); 1038 if (hr_dev->caps.cqc_timer_entry_sz) 1039 hns_roce_cleanup_hem_table(hr_dev, 1040 &hr_dev->cqc_timer_table); 1041 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) 1042 hns_roce_cleanup_hem_table(hr_dev, 1043 &hr_dev->qp_table.sccc_table); 1044 if (hr_dev->caps.trrl_entry_sz) 1045 hns_roce_cleanup_hem_table(hr_dev, 1046 &hr_dev->qp_table.trrl_table); 1047 1048 if (hr_dev->caps.gmv_entry_sz) 1049 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table); 1050 1051 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); 1052 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); 1053 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); 1054 } 1055 1056 struct roce_hem_item { 1057 struct list_head list; /* link all hems in the same bt level */ 1058 struct list_head sibling; /* link all hems in last hop for mtt */ 1059 void *addr; 1060 dma_addr_t dma_addr; 1061 size_t count; /* max ba numbers */ 1062 int start; /* start buf offset in this hem */ 1063 int end; /* end buf offset in this hem */ 1064 }; 1065 1066 static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev, 1067 int start, int end, 1068 int count, bool exist_bt, 1069 int bt_level) 1070 { 1071 struct roce_hem_item *hem; 1072 1073 hem = kzalloc(sizeof(*hem), GFP_KERNEL); 1074 if (!hem) 1075 return NULL; 1076 1077 if (exist_bt) { 1078 hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN, 1079 &hem->dma_addr, GFP_KERNEL); 1080 if (!hem->addr) { 1081 kfree(hem); 1082 return NULL; 1083 } 1084 } 1085 1086 hem->count = count; 1087 hem->start = start; 1088 hem->end = end; 1089 INIT_LIST_HEAD(&hem->list); 1090 INIT_LIST_HEAD(&hem->sibling); 1091 1092 return hem; 1093 } 1094 1095 static void hem_list_free_item(struct hns_roce_dev *hr_dev, 1096 struct roce_hem_item *hem, bool exist_bt) 1097 { 1098 if (exist_bt) 1099 dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN, 1100 hem->addr, hem->dma_addr); 1101 kfree(hem); 1102 } 1103 1104 static void hem_list_free_all(struct hns_roce_dev *hr_dev, 1105 struct list_head *head, bool exist_bt) 1106 { 1107 struct roce_hem_item *hem, *temp_hem; 1108 1109 list_for_each_entry_safe(hem, temp_hem, head, list) { 1110 list_del(&hem->list); 1111 hem_list_free_item(hr_dev, hem, exist_bt); 1112 } 1113 } 1114 1115 static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr, 1116 u64 table_addr) 1117 { 1118 *(u64 *)(base_addr) = table_addr; 1119 } 1120 1121 /* assign L0 table address to hem from root bt */ 1122 static void hem_list_assign_bt(struct hns_roce_dev *hr_dev, 1123 struct roce_hem_item *hem, void *cpu_addr, 1124 u64 phy_addr) 1125 { 1126 hem->addr = cpu_addr; 1127 hem->dma_addr = (dma_addr_t)phy_addr; 1128 } 1129 1130 static inline bool hem_list_page_is_in_range(struct roce_hem_item *hem, 1131 int offset) 1132 { 1133 return (hem->start <= offset && offset <= hem->end); 1134 } 1135 1136 static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list, 1137 int page_offset) 1138 { 1139 struct roce_hem_item *hem, *temp_hem; 1140 struct roce_hem_item *found = NULL; 1141 1142 list_for_each_entry_safe(hem, temp_hem, ba_list, list) { 1143 if (hem_list_page_is_in_range(hem, page_offset)) { 1144 found = hem; 1145 break; 1146 } 1147 } 1148 1149 return found; 1150 } 1151 1152 static bool hem_list_is_bottom_bt(int hopnum, int bt_level) 1153 { 1154 /* 1155 * hopnum base address table levels 1156 * 0 L0(buf) 1157 * 1 L0 -> buf 1158 * 2 L0 -> L1 -> buf 1159 * 3 L0 -> L1 -> L2 -> buf 1160 */ 1161 return bt_level >= (hopnum ? hopnum - 1 : hopnum); 1162 } 1163 1164 /** 1165 * calc base address entries num 1166 * @hopnum: num of mutihop addressing 1167 * @bt_level: base address table level 1168 * @unit: ba entries per bt page 1169 */ 1170 static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) 1171 { 1172 u32 step; 1173 int max; 1174 int i; 1175 1176 if (hopnum <= bt_level) 1177 return 0; 1178 /* 1179 * hopnum bt_level range 1180 * 1 0 unit 1181 * ------------ 1182 * 2 0 unit * unit 1183 * 2 1 unit 1184 * ------------ 1185 * 3 0 unit * unit * unit 1186 * 3 1 unit * unit 1187 * 3 2 unit 1188 */ 1189 step = 1; 1190 max = hopnum - bt_level; 1191 for (i = 0; i < max; i++) 1192 step = step * unit; 1193 1194 return step; 1195 } 1196 1197 /** 1198 * calc the root ba entries which could cover all regions 1199 * @regions: buf region array 1200 * @region_cnt: array size of @regions 1201 * @unit: ba entries per bt page 1202 */ 1203 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, 1204 int region_cnt, int unit) 1205 { 1206 struct hns_roce_buf_region *r; 1207 int total = 0; 1208 int step; 1209 int i; 1210 1211 for (i = 0; i < region_cnt; i++) { 1212 r = (struct hns_roce_buf_region *)®ions[i]; 1213 if (r->hopnum > 1) { 1214 step = hem_list_calc_ba_range(r->hopnum, 1, unit); 1215 if (step > 0) 1216 total += (r->count + step - 1) / step; 1217 } else { 1218 total += r->count; 1219 } 1220 } 1221 1222 return total; 1223 } 1224 1225 static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, 1226 const struct hns_roce_buf_region *r, int unit, 1227 int offset, struct list_head *mid_bt, 1228 struct list_head *btm_bt) 1229 { 1230 struct roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL }; 1231 struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL]; 1232 struct roce_hem_item *cur, *pre; 1233 const int hopnum = r->hopnum; 1234 int start_aligned; 1235 int distance; 1236 int ret = 0; 1237 int max_ofs; 1238 int level; 1239 u32 step; 1240 int end; 1241 1242 if (hopnum <= 1) 1243 return 0; 1244 1245 if (hopnum > HNS_ROCE_MAX_BT_LEVEL) { 1246 dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum); 1247 return -EINVAL; 1248 } 1249 1250 if (offset < r->offset) { 1251 dev_err(hr_dev->dev, "invalid offset %d, min %u!\n", 1252 offset, r->offset); 1253 return -EINVAL; 1254 } 1255 1256 distance = offset - r->offset; 1257 max_ofs = r->offset + r->count - 1; 1258 for (level = 0; level < hopnum; level++) 1259 INIT_LIST_HEAD(&temp_list[level]); 1260 1261 /* config L1 bt to last bt and link them to corresponding parent */ 1262 for (level = 1; level < hopnum; level++) { 1263 cur = hem_list_search_item(&mid_bt[level], offset); 1264 if (cur) { 1265 hem_ptrs[level] = cur; 1266 continue; 1267 } 1268 1269 step = hem_list_calc_ba_range(hopnum, level, unit); 1270 if (step < 1) { 1271 ret = -EINVAL; 1272 goto err_exit; 1273 } 1274 1275 start_aligned = (distance / step) * step + r->offset; 1276 end = min_t(int, start_aligned + step - 1, max_ofs); 1277 cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, 1278 true, level); 1279 if (!cur) { 1280 ret = -ENOMEM; 1281 goto err_exit; 1282 } 1283 hem_ptrs[level] = cur; 1284 list_add(&cur->list, &temp_list[level]); 1285 if (hem_list_is_bottom_bt(hopnum, level)) 1286 list_add(&cur->sibling, &temp_list[0]); 1287 1288 /* link bt to parent bt */ 1289 if (level > 1) { 1290 pre = hem_ptrs[level - 1]; 1291 step = (cur->start - pre->start) / step * BA_BYTE_LEN; 1292 hem_list_link_bt(hr_dev, pre->addr + step, 1293 cur->dma_addr); 1294 } 1295 } 1296 1297 list_splice(&temp_list[0], btm_bt); 1298 for (level = 1; level < hopnum; level++) 1299 list_splice(&temp_list[level], &mid_bt[level]); 1300 1301 return 0; 1302 1303 err_exit: 1304 for (level = 1; level < hopnum; level++) 1305 hem_list_free_all(hr_dev, &temp_list[level], true); 1306 1307 return ret; 1308 } 1309 1310 static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, 1311 struct hns_roce_hem_list *hem_list, int unit, 1312 const struct hns_roce_buf_region *regions, 1313 int region_cnt) 1314 { 1315 struct list_head temp_list[HNS_ROCE_MAX_BT_REGION]; 1316 struct roce_hem_item *hem, *temp_hem, *root_hem; 1317 const struct hns_roce_buf_region *r; 1318 struct list_head temp_root; 1319 struct list_head temp_btm; 1320 void *cpu_base; 1321 u64 phy_base; 1322 int ret = 0; 1323 int ba_num; 1324 int offset; 1325 int total; 1326 int step; 1327 int i; 1328 1329 r = ®ions[0]; 1330 root_hem = hem_list_search_item(&hem_list->root_bt, r->offset); 1331 if (root_hem) 1332 return 0; 1333 1334 ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit); 1335 if (ba_num < 1) 1336 return -ENOMEM; 1337 1338 if (ba_num > unit) 1339 return -ENOBUFS; 1340 1341 ba_num = min_t(int, ba_num, unit); 1342 INIT_LIST_HEAD(&temp_root); 1343 offset = r->offset; 1344 /* indicate to last region */ 1345 r = ®ions[region_cnt - 1]; 1346 root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1, 1347 ba_num, true, 0); 1348 if (!root_hem) 1349 return -ENOMEM; 1350 list_add(&root_hem->list, &temp_root); 1351 1352 hem_list->root_ba = root_hem->dma_addr; 1353 1354 INIT_LIST_HEAD(&temp_btm); 1355 for (i = 0; i < region_cnt; i++) 1356 INIT_LIST_HEAD(&temp_list[i]); 1357 1358 total = 0; 1359 for (i = 0; i < region_cnt && total < ba_num; i++) { 1360 r = ®ions[i]; 1361 if (!r->count) 1362 continue; 1363 1364 /* all regions's mid[x][0] shared the root_bt's trunk */ 1365 cpu_base = root_hem->addr + total * BA_BYTE_LEN; 1366 phy_base = root_hem->dma_addr + total * BA_BYTE_LEN; 1367 1368 /* if hopnum is 0 or 1, cut a new fake hem from the root bt 1369 * which's address share to all regions. 1370 */ 1371 if (hem_list_is_bottom_bt(r->hopnum, 0)) { 1372 hem = hem_list_alloc_item(hr_dev, r->offset, 1373 r->offset + r->count - 1, 1374 r->count, false, 0); 1375 if (!hem) { 1376 ret = -ENOMEM; 1377 goto err_exit; 1378 } 1379 hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base); 1380 list_add(&hem->list, &temp_list[i]); 1381 list_add(&hem->sibling, &temp_btm); 1382 total += r->count; 1383 } else { 1384 step = hem_list_calc_ba_range(r->hopnum, 1, unit); 1385 if (step < 1) { 1386 ret = -EINVAL; 1387 goto err_exit; 1388 } 1389 /* if exist mid bt, link L1 to L0 */ 1390 list_for_each_entry_safe(hem, temp_hem, 1391 &hem_list->mid_bt[i][1], list) { 1392 offset = (hem->start - r->offset) / step * 1393 BA_BYTE_LEN; 1394 hem_list_link_bt(hr_dev, cpu_base + offset, 1395 hem->dma_addr); 1396 total++; 1397 } 1398 } 1399 } 1400 1401 list_splice(&temp_btm, &hem_list->btm_bt); 1402 list_splice(&temp_root, &hem_list->root_bt); 1403 for (i = 0; i < region_cnt; i++) 1404 list_splice(&temp_list[i], &hem_list->mid_bt[i][0]); 1405 1406 return 0; 1407 1408 err_exit: 1409 for (i = 0; i < region_cnt; i++) 1410 hem_list_free_all(hr_dev, &temp_list[i], false); 1411 1412 hem_list_free_all(hr_dev, &temp_root, true); 1413 1414 return ret; 1415 } 1416 1417 /* construct the base address table and link them by address hop config */ 1418 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, 1419 struct hns_roce_hem_list *hem_list, 1420 const struct hns_roce_buf_region *regions, 1421 int region_cnt, unsigned int bt_pg_shift) 1422 { 1423 const struct hns_roce_buf_region *r; 1424 int ofs, end; 1425 int unit; 1426 int ret; 1427 int i; 1428 1429 if (region_cnt > HNS_ROCE_MAX_BT_REGION) { 1430 dev_err(hr_dev->dev, "invalid region region_cnt %d!\n", 1431 region_cnt); 1432 return -EINVAL; 1433 } 1434 1435 unit = (1 << bt_pg_shift) / BA_BYTE_LEN; 1436 for (i = 0; i < region_cnt; i++) { 1437 r = ®ions[i]; 1438 if (!r->count) 1439 continue; 1440 1441 end = r->offset + r->count; 1442 for (ofs = r->offset; ofs < end; ofs += unit) { 1443 ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs, 1444 hem_list->mid_bt[i], 1445 &hem_list->btm_bt); 1446 if (ret) { 1447 dev_err(hr_dev->dev, 1448 "alloc hem trunk fail ret=%d!\n", ret); 1449 goto err_alloc; 1450 } 1451 } 1452 } 1453 1454 ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions, 1455 region_cnt); 1456 if (ret) 1457 dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret); 1458 else 1459 return 0; 1460 1461 err_alloc: 1462 hns_roce_hem_list_release(hr_dev, hem_list); 1463 1464 return ret; 1465 } 1466 1467 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev, 1468 struct hns_roce_hem_list *hem_list) 1469 { 1470 int i, j; 1471 1472 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) 1473 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) 1474 hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j], 1475 j != 0); 1476 1477 hem_list_free_all(hr_dev, &hem_list->root_bt, true); 1478 INIT_LIST_HEAD(&hem_list->btm_bt); 1479 hem_list->root_ba = 0; 1480 } 1481 1482 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list) 1483 { 1484 int i, j; 1485 1486 INIT_LIST_HEAD(&hem_list->root_bt); 1487 INIT_LIST_HEAD(&hem_list->btm_bt); 1488 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) 1489 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) 1490 INIT_LIST_HEAD(&hem_list->mid_bt[i][j]); 1491 } 1492 1493 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, 1494 struct hns_roce_hem_list *hem_list, 1495 int offset, int *mtt_cnt, u64 *phy_addr) 1496 { 1497 struct list_head *head = &hem_list->btm_bt; 1498 struct roce_hem_item *hem, *temp_hem; 1499 void *cpu_base = NULL; 1500 u64 phy_base = 0; 1501 int nr = 0; 1502 1503 list_for_each_entry_safe(hem, temp_hem, head, sibling) { 1504 if (hem_list_page_is_in_range(hem, offset)) { 1505 nr = offset - hem->start; 1506 cpu_base = hem->addr + nr * BA_BYTE_LEN; 1507 phy_base = hem->dma_addr + nr * BA_BYTE_LEN; 1508 nr = hem->end + 1 - offset; 1509 break; 1510 } 1511 } 1512 1513 if (mtt_cnt) 1514 *mtt_cnt = nr; 1515 1516 if (phy_addr) 1517 *phy_addr = phy_base; 1518 1519 return cpu_base; 1520 } 1521