1 /* 2 * Microsemi Switchtec(tm) PCIe Management Driver 3 * Copyright (c) 2017, Microsemi Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16 #include <linux/interrupt.h> 17 #include <linux/io-64-nonatomic-lo-hi.h> 18 #include <linux/delay.h> 19 #include <linux/kthread.h> 20 #include <linux/module.h> 21 #include <linux/ntb.h> 22 #include <linux/pci.h> 23 #include <linux/switchtec.h> 24 25 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver"); 26 MODULE_VERSION("0.1"); 27 MODULE_LICENSE("GPL"); 28 MODULE_AUTHOR("Microsemi Corporation"); 29 30 static ulong max_mw_size = SZ_2M; 31 module_param(max_mw_size, ulong, 0644); 32 MODULE_PARM_DESC(max_mw_size, 33 "Max memory window size reported to the upper layer"); 34 35 static bool use_lut_mws; 36 module_param(use_lut_mws, bool, 0644); 37 MODULE_PARM_DESC(use_lut_mws, 38 "Enable the use of the LUT based memory windows"); 39 40 #define SWITCHTEC_NTB_MAGIC 0x45CC0001 41 #define MAX_MWS 128 42 43 struct shared_mw { 44 u32 magic; 45 u32 link_sta; 46 u32 partition_id; 47 u64 mw_sizes[MAX_MWS]; 48 u32 spad[128]; 49 }; 50 51 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry) 52 #define LUT_SIZE SZ_64K 53 54 struct switchtec_ntb { 55 struct ntb_dev ntb; 56 struct switchtec_dev *stdev; 57 58 int self_partition; 59 int peer_partition; 60 61 int doorbell_irq; 62 int message_irq; 63 64 struct ntb_info_regs __iomem *mmio_ntb; 65 struct ntb_ctrl_regs __iomem *mmio_ctrl; 66 struct ntb_dbmsg_regs __iomem *mmio_dbmsg; 67 struct ntb_ctrl_regs __iomem *mmio_self_ctrl; 68 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl; 69 struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg; 70 struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg; 71 72 void __iomem *mmio_xlink_win; 73 74 struct shared_mw *self_shared; 75 struct shared_mw __iomem *peer_shared; 76 dma_addr_t self_shared_dma; 77 78 u64 db_mask; 79 u64 db_valid_mask; 80 int db_shift; 81 int db_peer_shift; 82 83 /* synchronize rmw access of db_mask and hw reg */ 84 spinlock_t db_mask_lock; 85 86 int nr_direct_mw; 87 int nr_lut_mw; 88 int nr_rsvd_luts; 89 int direct_mw_to_bar[MAX_DIRECT_MW]; 90 91 int peer_nr_direct_mw; 92 int peer_nr_lut_mw; 93 int peer_direct_mw_to_bar[MAX_DIRECT_MW]; 94 95 bool link_is_up; 96 enum ntb_speed link_speed; 97 enum ntb_width link_width; 98 struct work_struct link_reinit_work; 99 }; 100 101 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb) 102 { 103 return container_of(ntb, struct switchtec_ntb, ntb); 104 } 105 106 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev, 107 struct ntb_ctrl_regs __iomem *ctl, 108 u32 op, int wait_status) 109 { 110 static const char * const op_text[] = { 111 [NTB_CTRL_PART_OP_LOCK] = "lock", 112 [NTB_CTRL_PART_OP_CFG] = "configure", 113 [NTB_CTRL_PART_OP_RESET] = "reset", 114 }; 115 116 int i; 117 u32 ps; 118 int status; 119 120 switch (op) { 121 case NTB_CTRL_PART_OP_LOCK: 122 status = NTB_CTRL_PART_STATUS_LOCKING; 123 break; 124 case NTB_CTRL_PART_OP_CFG: 125 status = NTB_CTRL_PART_STATUS_CONFIGURING; 126 break; 127 case NTB_CTRL_PART_OP_RESET: 128 status = NTB_CTRL_PART_STATUS_RESETTING; 129 break; 130 default: 131 return -EINVAL; 132 } 133 134 iowrite32(op, &ctl->partition_op); 135 136 for (i = 0; i < 1000; i++) { 137 if (msleep_interruptible(50) != 0) { 138 iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op); 139 return -EINTR; 140 } 141 142 ps = ioread32(&ctl->partition_status) & 0xFFFF; 143 144 if (ps != status) 145 break; 146 } 147 148 if (ps == wait_status) 149 return 0; 150 151 if (ps == status) { 152 dev_err(&sndev->stdev->dev, 153 "Timed out while performing %s (%d). (%08x)\n", 154 op_text[op], op, 155 ioread32(&ctl->partition_status)); 156 157 return -ETIMEDOUT; 158 } 159 160 return -EIO; 161 } 162 163 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx, 164 u32 val) 165 { 166 if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg)) 167 return -EINVAL; 168 169 iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg); 170 171 return 0; 172 } 173 174 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx) 175 { 176 struct switchtec_ntb *sndev = ntb_sndev(ntb); 177 int nr_direct_mw = sndev->peer_nr_direct_mw; 178 int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts; 179 180 if (pidx != NTB_DEF_PEER_IDX) 181 return -EINVAL; 182 183 if (!use_lut_mws) 184 nr_lut_mw = 0; 185 186 return nr_direct_mw + nr_lut_mw; 187 } 188 189 static int lut_index(struct switchtec_ntb *sndev, int mw_idx) 190 { 191 return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts; 192 } 193 194 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx) 195 { 196 return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts; 197 } 198 199 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, 200 int widx, resource_size_t *addr_align, 201 resource_size_t *size_align, 202 resource_size_t *size_max) 203 { 204 struct switchtec_ntb *sndev = ntb_sndev(ntb); 205 int lut; 206 resource_size_t size; 207 208 if (pidx != NTB_DEF_PEER_IDX) 209 return -EINVAL; 210 211 lut = widx >= sndev->peer_nr_direct_mw; 212 size = ioread64(&sndev->peer_shared->mw_sizes[widx]); 213 214 if (size == 0) 215 return -EINVAL; 216 217 if (addr_align) 218 *addr_align = lut ? size : SZ_4K; 219 220 if (size_align) 221 *size_align = lut ? size : SZ_4K; 222 223 if (size_max) 224 *size_max = size; 225 226 return 0; 227 } 228 229 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx) 230 { 231 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 232 int bar = sndev->peer_direct_mw_to_bar[idx]; 233 u32 ctl_val; 234 235 ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 236 ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN; 237 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 238 iowrite32(0, &ctl->bar_entry[bar].win_size); 239 iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr); 240 } 241 242 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx) 243 { 244 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 245 246 iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]); 247 } 248 249 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx, 250 dma_addr_t addr, resource_size_t size) 251 { 252 int xlate_pos = ilog2(size); 253 int bar = sndev->peer_direct_mw_to_bar[idx]; 254 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 255 u32 ctl_val; 256 257 ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 258 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; 259 260 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 261 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); 262 iowrite64(sndev->self_partition | addr, 263 &ctl->bar_entry[bar].xlate_addr); 264 } 265 266 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx, 267 dma_addr_t addr, resource_size_t size) 268 { 269 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 270 271 iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr), 272 &ctl->lut_entry[peer_lut_index(sndev, idx)]); 273 } 274 275 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, 276 dma_addr_t addr, resource_size_t size) 277 { 278 struct switchtec_ntb *sndev = ntb_sndev(ntb); 279 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; 280 int xlate_pos = ilog2(size); 281 int nr_direct_mw = sndev->peer_nr_direct_mw; 282 int rc; 283 284 if (pidx != NTB_DEF_PEER_IDX) 285 return -EINVAL; 286 287 dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n", 288 widx, pidx, &addr, &size); 289 290 if (widx >= switchtec_ntb_mw_count(ntb, pidx)) 291 return -EINVAL; 292 293 if (xlate_pos < 12) 294 return -EINVAL; 295 296 if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { 297 /* 298 * In certain circumstances we can get a buffer that is 299 * not aligned to its size. (Most of the time 300 * dma_alloc_coherent ensures this). This can happen when 301 * using large buffers allocated by the CMA 302 * (see CMA_CONFIG_ALIGNMENT) 303 */ 304 dev_err(&sndev->stdev->dev, 305 "ERROR: Memory window address is not aligned to it's size!\n"); 306 return -EINVAL; 307 } 308 309 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, 310 NTB_CTRL_PART_STATUS_LOCKED); 311 if (rc) 312 return rc; 313 314 if (addr == 0 || size == 0) { 315 if (widx < nr_direct_mw) 316 switchtec_ntb_mw_clr_direct(sndev, widx); 317 else 318 switchtec_ntb_mw_clr_lut(sndev, widx); 319 } else { 320 if (widx < nr_direct_mw) 321 switchtec_ntb_mw_set_direct(sndev, widx, addr, size); 322 else 323 switchtec_ntb_mw_set_lut(sndev, widx, addr, size); 324 } 325 326 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 327 NTB_CTRL_PART_STATUS_NORMAL); 328 329 if (rc == -EIO) { 330 dev_err(&sndev->stdev->dev, 331 "Hardware reported an error configuring mw %d: %08x\n", 332 widx, ioread32(&ctl->bar_error)); 333 334 if (widx < nr_direct_mw) 335 switchtec_ntb_mw_clr_direct(sndev, widx); 336 else 337 switchtec_ntb_mw_clr_lut(sndev, widx); 338 339 switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 340 NTB_CTRL_PART_STATUS_NORMAL); 341 } 342 343 return rc; 344 } 345 346 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb) 347 { 348 struct switchtec_ntb *sndev = ntb_sndev(ntb); 349 int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts; 350 351 return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0); 352 } 353 354 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev, 355 int idx, phys_addr_t *base, 356 resource_size_t *size) 357 { 358 int bar = sndev->direct_mw_to_bar[idx]; 359 size_t offset = 0; 360 361 if (bar < 0) 362 return -EINVAL; 363 364 if (idx == 0) { 365 /* 366 * This is the direct BAR shared with the LUTs 367 * which means the actual window will be offset 368 * by the size of all the LUT entries. 369 */ 370 371 offset = LUT_SIZE * sndev->nr_lut_mw; 372 } 373 374 if (base) 375 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; 376 377 if (size) { 378 *size = pci_resource_len(sndev->ntb.pdev, bar) - offset; 379 if (offset && *size > offset) 380 *size = offset; 381 382 if (*size > max_mw_size) 383 *size = max_mw_size; 384 } 385 386 return 0; 387 } 388 389 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev, 390 int idx, phys_addr_t *base, 391 resource_size_t *size) 392 { 393 int bar = sndev->direct_mw_to_bar[0]; 394 int offset; 395 396 offset = LUT_SIZE * lut_index(sndev, idx); 397 398 if (base) 399 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; 400 401 if (size) 402 *size = LUT_SIZE; 403 404 return 0; 405 } 406 407 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, 408 phys_addr_t *base, 409 resource_size_t *size) 410 { 411 struct switchtec_ntb *sndev = ntb_sndev(ntb); 412 413 if (idx < sndev->nr_direct_mw) 414 return switchtec_ntb_direct_get_addr(sndev, idx, base, size); 415 else if (idx < switchtec_ntb_peer_mw_count(ntb)) 416 return switchtec_ntb_lut_get_addr(sndev, idx, base, size); 417 else 418 return -EINVAL; 419 } 420 421 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev, 422 int partition, 423 enum ntb_speed *speed, 424 enum ntb_width *width) 425 { 426 struct switchtec_dev *stdev = sndev->stdev; 427 428 u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id); 429 u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]); 430 431 if (speed) 432 *speed = (linksta >> 16) & 0xF; 433 434 if (width) 435 *width = (linksta >> 20) & 0x3F; 436 } 437 438 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev) 439 { 440 enum ntb_speed self_speed, peer_speed; 441 enum ntb_width self_width, peer_width; 442 443 if (!sndev->link_is_up) { 444 sndev->link_speed = NTB_SPEED_NONE; 445 sndev->link_width = NTB_WIDTH_NONE; 446 return; 447 } 448 449 switchtec_ntb_part_link_speed(sndev, sndev->self_partition, 450 &self_speed, &self_width); 451 switchtec_ntb_part_link_speed(sndev, sndev->peer_partition, 452 &peer_speed, &peer_width); 453 454 sndev->link_speed = min(self_speed, peer_speed); 455 sndev->link_width = min(self_width, peer_width); 456 } 457 458 static int crosslink_is_enabled(struct switchtec_ntb *sndev) 459 { 460 struct ntb_info_regs __iomem *inf = sndev->mmio_ntb; 461 462 return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled); 463 } 464 465 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev) 466 { 467 int i; 468 u32 msg_map = 0; 469 470 if (!crosslink_is_enabled(sndev)) 471 return; 472 473 for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) { 474 int m = i | sndev->self_partition << 2; 475 476 msg_map |= m << i * 8; 477 } 478 479 iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map); 480 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift, 481 &sndev->mmio_peer_dbmsg->odb_mask); 482 } 483 484 enum switchtec_msg { 485 LINK_MESSAGE = 0, 486 MSG_LINK_UP = 1, 487 MSG_LINK_DOWN = 2, 488 MSG_CHECK_LINK = 3, 489 MSG_LINK_FORCE_DOWN = 4, 490 }; 491 492 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev); 493 494 static void link_reinit_work(struct work_struct *work) 495 { 496 struct switchtec_ntb *sndev; 497 498 sndev = container_of(work, struct switchtec_ntb, link_reinit_work); 499 500 switchtec_ntb_reinit_peer(sndev); 501 } 502 503 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev, 504 enum switchtec_msg msg) 505 { 506 int link_sta; 507 int old = sndev->link_is_up; 508 509 if (msg == MSG_LINK_FORCE_DOWN) { 510 schedule_work(&sndev->link_reinit_work); 511 512 if (sndev->link_is_up) { 513 sndev->link_is_up = 0; 514 ntb_link_event(&sndev->ntb); 515 dev_info(&sndev->stdev->dev, "ntb link forced down\n"); 516 } 517 518 return; 519 } 520 521 link_sta = sndev->self_shared->link_sta; 522 if (link_sta) { 523 u64 peer = ioread64(&sndev->peer_shared->magic); 524 525 if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC) 526 link_sta = peer >> 32; 527 else 528 link_sta = 0; 529 } 530 531 sndev->link_is_up = link_sta; 532 switchtec_ntb_set_link_speed(sndev); 533 534 if (link_sta != old) { 535 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK); 536 ntb_link_event(&sndev->ntb); 537 dev_info(&sndev->stdev->dev, "ntb link %s\n", 538 link_sta ? "up" : "down"); 539 540 if (link_sta) 541 crosslink_init_dbmsgs(sndev); 542 } 543 } 544 545 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev) 546 { 547 struct switchtec_ntb *sndev = stdev->sndev; 548 549 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); 550 } 551 552 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb, 553 enum ntb_speed *speed, 554 enum ntb_width *width) 555 { 556 struct switchtec_ntb *sndev = ntb_sndev(ntb); 557 558 if (speed) 559 *speed = sndev->link_speed; 560 if (width) 561 *width = sndev->link_width; 562 563 return sndev->link_is_up; 564 } 565 566 static int switchtec_ntb_link_enable(struct ntb_dev *ntb, 567 enum ntb_speed max_speed, 568 enum ntb_width max_width) 569 { 570 struct switchtec_ntb *sndev = ntb_sndev(ntb); 571 572 dev_dbg(&sndev->stdev->dev, "enabling link\n"); 573 574 sndev->self_shared->link_sta = 1; 575 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP); 576 577 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); 578 579 return 0; 580 } 581 582 static int switchtec_ntb_link_disable(struct ntb_dev *ntb) 583 { 584 struct switchtec_ntb *sndev = ntb_sndev(ntb); 585 586 dev_dbg(&sndev->stdev->dev, "disabling link\n"); 587 588 sndev->self_shared->link_sta = 0; 589 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN); 590 591 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); 592 593 return 0; 594 } 595 596 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb) 597 { 598 struct switchtec_ntb *sndev = ntb_sndev(ntb); 599 600 return sndev->db_valid_mask; 601 } 602 603 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb) 604 { 605 return 1; 606 } 607 608 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) 609 { 610 struct switchtec_ntb *sndev = ntb_sndev(ntb); 611 612 if (db_vector < 0 || db_vector > 1) 613 return 0; 614 615 return sndev->db_valid_mask; 616 } 617 618 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb) 619 { 620 u64 ret; 621 struct switchtec_ntb *sndev = ntb_sndev(ntb); 622 623 ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift; 624 625 return ret & sndev->db_valid_mask; 626 } 627 628 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) 629 { 630 struct switchtec_ntb *sndev = ntb_sndev(ntb); 631 632 iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb); 633 634 return 0; 635 } 636 637 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) 638 { 639 unsigned long irqflags; 640 struct switchtec_ntb *sndev = ntb_sndev(ntb); 641 642 if (db_bits & ~sndev->db_valid_mask) 643 return -EINVAL; 644 645 spin_lock_irqsave(&sndev->db_mask_lock, irqflags); 646 647 sndev->db_mask |= db_bits << sndev->db_shift; 648 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 649 650 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); 651 652 return 0; 653 } 654 655 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) 656 { 657 unsigned long irqflags; 658 struct switchtec_ntb *sndev = ntb_sndev(ntb); 659 660 if (db_bits & ~sndev->db_valid_mask) 661 return -EINVAL; 662 663 spin_lock_irqsave(&sndev->db_mask_lock, irqflags); 664 665 sndev->db_mask &= ~(db_bits << sndev->db_shift); 666 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 667 668 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); 669 670 return 0; 671 } 672 673 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb) 674 { 675 struct switchtec_ntb *sndev = ntb_sndev(ntb); 676 677 return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask; 678 } 679 680 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, 681 phys_addr_t *db_addr, 682 resource_size_t *db_size) 683 { 684 struct switchtec_ntb *sndev = ntb_sndev(ntb); 685 unsigned long offset; 686 687 offset = (unsigned long)sndev->mmio_peer_dbmsg->odb - 688 (unsigned long)sndev->stdev->mmio; 689 690 offset += sndev->db_shift / 8; 691 692 if (db_addr) 693 *db_addr = pci_resource_start(ntb->pdev, 0) + offset; 694 if (db_size) 695 *db_size = sizeof(u32); 696 697 return 0; 698 } 699 700 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) 701 { 702 struct switchtec_ntb *sndev = ntb_sndev(ntb); 703 704 iowrite64(db_bits << sndev->db_peer_shift, 705 &sndev->mmio_peer_dbmsg->odb); 706 707 return 0; 708 } 709 710 static int switchtec_ntb_spad_count(struct ntb_dev *ntb) 711 { 712 struct switchtec_ntb *sndev = ntb_sndev(ntb); 713 714 return ARRAY_SIZE(sndev->self_shared->spad); 715 } 716 717 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx) 718 { 719 struct switchtec_ntb *sndev = ntb_sndev(ntb); 720 721 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) 722 return 0; 723 724 if (!sndev->self_shared) 725 return 0; 726 727 return sndev->self_shared->spad[idx]; 728 } 729 730 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) 731 { 732 struct switchtec_ntb *sndev = ntb_sndev(ntb); 733 734 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) 735 return -EINVAL; 736 737 if (!sndev->self_shared) 738 return -EIO; 739 740 sndev->self_shared->spad[idx] = val; 741 742 return 0; 743 } 744 745 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, 746 int sidx) 747 { 748 struct switchtec_ntb *sndev = ntb_sndev(ntb); 749 750 if (pidx != NTB_DEF_PEER_IDX) 751 return -EINVAL; 752 753 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) 754 return 0; 755 756 if (!sndev->peer_shared) 757 return 0; 758 759 return ioread32(&sndev->peer_shared->spad[sidx]); 760 } 761 762 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, 763 int sidx, u32 val) 764 { 765 struct switchtec_ntb *sndev = ntb_sndev(ntb); 766 767 if (pidx != NTB_DEF_PEER_IDX) 768 return -EINVAL; 769 770 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) 771 return -EINVAL; 772 773 if (!sndev->peer_shared) 774 return -EIO; 775 776 iowrite32(val, &sndev->peer_shared->spad[sidx]); 777 778 return 0; 779 } 780 781 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, 782 int sidx, phys_addr_t *spad_addr) 783 { 784 struct switchtec_ntb *sndev = ntb_sndev(ntb); 785 unsigned long offset; 786 787 if (pidx != NTB_DEF_PEER_IDX) 788 return -EINVAL; 789 790 offset = (unsigned long)&sndev->peer_shared->spad[sidx] - 791 (unsigned long)sndev->stdev->mmio; 792 793 if (spad_addr) 794 *spad_addr = pci_resource_start(ntb->pdev, 0) + offset; 795 796 return 0; 797 } 798 799 static const struct ntb_dev_ops switchtec_ntb_ops = { 800 .mw_count = switchtec_ntb_mw_count, 801 .mw_get_align = switchtec_ntb_mw_get_align, 802 .mw_set_trans = switchtec_ntb_mw_set_trans, 803 .peer_mw_count = switchtec_ntb_peer_mw_count, 804 .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr, 805 .link_is_up = switchtec_ntb_link_is_up, 806 .link_enable = switchtec_ntb_link_enable, 807 .link_disable = switchtec_ntb_link_disable, 808 .db_valid_mask = switchtec_ntb_db_valid_mask, 809 .db_vector_count = switchtec_ntb_db_vector_count, 810 .db_vector_mask = switchtec_ntb_db_vector_mask, 811 .db_read = switchtec_ntb_db_read, 812 .db_clear = switchtec_ntb_db_clear, 813 .db_set_mask = switchtec_ntb_db_set_mask, 814 .db_clear_mask = switchtec_ntb_db_clear_mask, 815 .db_read_mask = switchtec_ntb_db_read_mask, 816 .peer_db_addr = switchtec_ntb_peer_db_addr, 817 .peer_db_set = switchtec_ntb_peer_db_set, 818 .spad_count = switchtec_ntb_spad_count, 819 .spad_read = switchtec_ntb_spad_read, 820 .spad_write = switchtec_ntb_spad_write, 821 .peer_spad_read = switchtec_ntb_peer_spad_read, 822 .peer_spad_write = switchtec_ntb_peer_spad_write, 823 .peer_spad_addr = switchtec_ntb_peer_spad_addr, 824 }; 825 826 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) 827 { 828 u64 tpart_vec; 829 int self; 830 u64 part_map; 831 int bit; 832 833 sndev->ntb.pdev = sndev->stdev->pdev; 834 sndev->ntb.topo = NTB_TOPO_SWITCH; 835 sndev->ntb.ops = &switchtec_ntb_ops; 836 837 INIT_WORK(&sndev->link_reinit_work, link_reinit_work); 838 839 sndev->self_partition = sndev->stdev->partition; 840 841 sndev->mmio_ntb = sndev->stdev->mmio_ntb; 842 843 self = sndev->self_partition; 844 tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high); 845 tpart_vec <<= 32; 846 tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low); 847 848 part_map = ioread64(&sndev->mmio_ntb->ep_map); 849 part_map &= ~(1 << sndev->self_partition); 850 851 if (!ffs(tpart_vec)) { 852 if (sndev->stdev->partition_count != 2) { 853 dev_err(&sndev->stdev->dev, 854 "ntb target partition not defined\n"); 855 return -ENODEV; 856 } 857 858 bit = ffs(part_map); 859 if (!bit) { 860 dev_err(&sndev->stdev->dev, 861 "peer partition is not NT partition\n"); 862 return -ENODEV; 863 } 864 865 sndev->peer_partition = bit - 1; 866 } else { 867 if (ffs(tpart_vec) != fls(tpart_vec)) { 868 dev_err(&sndev->stdev->dev, 869 "ntb driver only supports 1 pair of 1-1 ntb mapping\n"); 870 return -ENODEV; 871 } 872 873 sndev->peer_partition = ffs(tpart_vec) - 1; 874 if (!(part_map & (1 << sndev->peer_partition))) { 875 dev_err(&sndev->stdev->dev, 876 "ntb target partition is not NT partition\n"); 877 return -ENODEV; 878 } 879 } 880 881 dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n", 882 sndev->self_partition, sndev->stdev->partition_count); 883 884 sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb + 885 SWITCHTEC_NTB_REG_CTRL_OFFSET; 886 sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb + 887 SWITCHTEC_NTB_REG_DBMSG_OFFSET; 888 889 sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition]; 890 sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition]; 891 sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition]; 892 sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg; 893 894 return 0; 895 } 896 897 static int config_rsvd_lut_win(struct switchtec_ntb *sndev, 898 struct ntb_ctrl_regs __iomem *ctl, 899 int lut_idx, int partition, u64 addr) 900 { 901 int peer_bar = sndev->peer_direct_mw_to_bar[0]; 902 u32 ctl_val; 903 int rc; 904 905 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, 906 NTB_CTRL_PART_STATUS_LOCKED); 907 if (rc) 908 return rc; 909 910 ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl); 911 ctl_val &= 0xFF; 912 ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN; 913 ctl_val |= ilog2(LUT_SIZE) << 8; 914 ctl_val |= (sndev->nr_lut_mw - 1) << 14; 915 iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl); 916 917 iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr), 918 &ctl->lut_entry[lut_idx]); 919 920 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 921 NTB_CTRL_PART_STATUS_NORMAL); 922 if (rc) { 923 u32 bar_error, lut_error; 924 925 bar_error = ioread32(&ctl->bar_error); 926 lut_error = ioread32(&ctl->lut_error); 927 dev_err(&sndev->stdev->dev, 928 "Error setting up reserved lut window: %08x / %08x\n", 929 bar_error, lut_error); 930 return rc; 931 } 932 933 return 0; 934 } 935 936 static int config_req_id_table(struct switchtec_ntb *sndev, 937 struct ntb_ctrl_regs __iomem *mmio_ctrl, 938 int *req_ids, int count) 939 { 940 int i, rc = 0; 941 u32 error; 942 u32 proxy_id; 943 944 if (ioread32(&mmio_ctrl->req_id_table_size) < count) { 945 dev_err(&sndev->stdev->dev, 946 "Not enough requester IDs available.\n"); 947 return -EFAULT; 948 } 949 950 rc = switchtec_ntb_part_op(sndev, mmio_ctrl, 951 NTB_CTRL_PART_OP_LOCK, 952 NTB_CTRL_PART_STATUS_LOCKED); 953 if (rc) 954 return rc; 955 956 iowrite32(NTB_PART_CTRL_ID_PROT_DIS, 957 &mmio_ctrl->partition_ctrl); 958 959 for (i = 0; i < count; i++) { 960 iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN, 961 &mmio_ctrl->req_id_table[i]); 962 963 proxy_id = ioread32(&mmio_ctrl->req_id_table[i]); 964 dev_dbg(&sndev->stdev->dev, 965 "Requester ID %02X:%02X.%X -> BB:%02X.%X\n", 966 req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F, 967 req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F, 968 (proxy_id >> 1) & 0x7); 969 } 970 971 rc = switchtec_ntb_part_op(sndev, mmio_ctrl, 972 NTB_CTRL_PART_OP_CFG, 973 NTB_CTRL_PART_STATUS_NORMAL); 974 975 if (rc == -EIO) { 976 error = ioread32(&mmio_ctrl->req_id_error); 977 dev_err(&sndev->stdev->dev, 978 "Error setting up the requester ID table: %08x\n", 979 error); 980 } 981 982 return 0; 983 } 984 985 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx, 986 u64 *mw_addrs, int mw_count) 987 { 988 int rc, i; 989 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl; 990 u64 addr; 991 size_t size, offset; 992 int bar; 993 int xlate_pos; 994 u32 ctl_val; 995 996 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, 997 NTB_CTRL_PART_STATUS_LOCKED); 998 if (rc) 999 return rc; 1000 1001 for (i = 0; i < sndev->nr_lut_mw; i++) { 1002 if (i == ntb_lut_idx) 1003 continue; 1004 1005 addr = mw_addrs[0] + LUT_SIZE * i; 1006 1007 iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) | 1008 addr), 1009 &ctl->lut_entry[i]); 1010 } 1011 1012 sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count); 1013 1014 for (i = 0; i < sndev->nr_direct_mw; i++) { 1015 bar = sndev->direct_mw_to_bar[i]; 1016 offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0; 1017 addr = mw_addrs[i] + offset; 1018 size = pci_resource_len(sndev->ntb.pdev, bar) - offset; 1019 xlate_pos = ilog2(size); 1020 1021 if (offset && size > offset) 1022 size = offset; 1023 1024 ctl_val = ioread32(&ctl->bar_entry[bar].ctl); 1025 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; 1026 1027 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 1028 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); 1029 iowrite64(sndev->peer_partition | addr, 1030 &ctl->bar_entry[bar].xlate_addr); 1031 } 1032 1033 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, 1034 NTB_CTRL_PART_STATUS_NORMAL); 1035 if (rc) { 1036 u32 bar_error, lut_error; 1037 1038 bar_error = ioread32(&ctl->bar_error); 1039 lut_error = ioread32(&ctl->lut_error); 1040 dev_err(&sndev->stdev->dev, 1041 "Error setting up cross link windows: %08x / %08x\n", 1042 bar_error, lut_error); 1043 return rc; 1044 } 1045 1046 return 0; 1047 } 1048 1049 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev, 1050 struct ntb_ctrl_regs __iomem *mmio_ctrl) 1051 { 1052 int req_ids[16]; 1053 int i; 1054 u32 proxy_id; 1055 1056 for (i = 0; i < ARRAY_SIZE(req_ids); i++) { 1057 proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]); 1058 1059 if (!(proxy_id & NTB_CTRL_REQ_ID_EN)) 1060 break; 1061 1062 req_ids[i] = ((proxy_id >> 1) & 0xFF); 1063 } 1064 1065 return config_req_id_table(sndev, mmio_ctrl, req_ids, i); 1066 } 1067 1068 /* 1069 * In crosslink configuration there is a virtual partition in the 1070 * middle of the two switches. The BARs in this partition have to be 1071 * enumerated and assigned addresses. 1072 */ 1073 static int crosslink_enum_partition(struct switchtec_ntb *sndev, 1074 u64 *bar_addrs) 1075 { 1076 struct part_cfg_regs __iomem *part_cfg = 1077 &sndev->stdev->mmio_part_cfg_all[sndev->peer_partition]; 1078 u32 pff = ioread32(&part_cfg->vep_pff_inst_id); 1079 struct pff_csr_regs __iomem *mmio_pff = 1080 &sndev->stdev->mmio_pff_csr[pff]; 1081 const u64 bar_space = 0x1000000000LL; 1082 u64 bar_addr; 1083 int bar_cnt = 0; 1084 int i; 1085 1086 iowrite16(0x6, &mmio_pff->pcicmd); 1087 1088 for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) { 1089 iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]); 1090 bar_addr = ioread64(&mmio_pff->pci_bar64[i]); 1091 bar_addr &= ~0xf; 1092 1093 dev_dbg(&sndev->stdev->dev, 1094 "Crosslink BAR%d addr: %llx\n", 1095 i, bar_addr); 1096 1097 if (bar_addr != bar_space * i) 1098 continue; 1099 1100 bar_addrs[bar_cnt++] = bar_addr; 1101 } 1102 1103 return bar_cnt; 1104 } 1105 1106 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev) 1107 { 1108 int rc; 1109 int bar = sndev->direct_mw_to_bar[0]; 1110 const int ntb_lut_idx = 1; 1111 u64 bar_addrs[6]; 1112 u64 addr; 1113 int offset; 1114 int bar_cnt; 1115 1116 if (!crosslink_is_enabled(sndev)) 1117 return 0; 1118 1119 dev_info(&sndev->stdev->dev, "Using crosslink configuration\n"); 1120 sndev->ntb.topo = NTB_TOPO_CROSSLINK; 1121 1122 bar_cnt = crosslink_enum_partition(sndev, bar_addrs); 1123 if (bar_cnt < sndev->nr_direct_mw + 1) { 1124 dev_err(&sndev->stdev->dev, 1125 "Error enumerating crosslink partition\n"); 1126 return -EINVAL; 1127 } 1128 1129 addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET + 1130 SWITCHTEC_NTB_REG_DBMSG_OFFSET + 1131 sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition); 1132 1133 offset = addr & (LUT_SIZE - 1); 1134 addr -= offset; 1135 1136 rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx, 1137 sndev->peer_partition, addr); 1138 if (rc) 1139 return rc; 1140 1141 rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1], 1142 bar_cnt - 1); 1143 if (rc) 1144 return rc; 1145 1146 rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl); 1147 if (rc) 1148 return rc; 1149 1150 sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar, 1151 LUT_SIZE, LUT_SIZE); 1152 if (!sndev->mmio_xlink_win) { 1153 rc = -ENOMEM; 1154 return rc; 1155 } 1156 1157 sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset; 1158 sndev->nr_rsvd_luts++; 1159 1160 crosslink_init_dbmsgs(sndev); 1161 1162 return 0; 1163 } 1164 1165 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev) 1166 { 1167 if (sndev->mmio_xlink_win) 1168 pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win); 1169 } 1170 1171 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl) 1172 { 1173 int i; 1174 int cnt = 0; 1175 1176 for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) { 1177 u32 r = ioread32(&ctrl->bar_entry[i].ctl); 1178 1179 if (r & NTB_CTRL_BAR_VALID) 1180 map[cnt++] = i; 1181 } 1182 1183 return cnt; 1184 } 1185 1186 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev) 1187 { 1188 sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar, 1189 sndev->mmio_self_ctrl); 1190 1191 sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries); 1192 sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw); 1193 1194 dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n", 1195 sndev->nr_direct_mw, sndev->nr_lut_mw); 1196 1197 sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar, 1198 sndev->mmio_peer_ctrl); 1199 1200 sndev->peer_nr_lut_mw = 1201 ioread16(&sndev->mmio_peer_ctrl->lut_table_entries); 1202 sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw); 1203 1204 dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n", 1205 sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw); 1206 1207 } 1208 1209 /* 1210 * There are 64 doorbells in the switch hardware but this is 1211 * shared among all partitions. So we must split them in half 1212 * (32 for each partition). However, the message interrupts are 1213 * also shared with the top 4 doorbells so we just limit this to 1214 * 28 doorbells per partition. 1215 * 1216 * In crosslink mode, each side has it's own dbmsg register so 1217 * they can each use all 60 of the available doorbells. 1218 */ 1219 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev) 1220 { 1221 sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL; 1222 1223 if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) { 1224 sndev->db_shift = 0; 1225 sndev->db_peer_shift = 0; 1226 sndev->db_valid_mask = sndev->db_mask; 1227 } else if (sndev->self_partition < sndev->peer_partition) { 1228 sndev->db_shift = 0; 1229 sndev->db_peer_shift = 32; 1230 sndev->db_valid_mask = 0x0FFFFFFF; 1231 } else { 1232 sndev->db_shift = 32; 1233 sndev->db_peer_shift = 0; 1234 sndev->db_valid_mask = 0x0FFFFFFF; 1235 } 1236 1237 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); 1238 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift, 1239 &sndev->mmio_peer_dbmsg->odb_mask); 1240 1241 dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n", 1242 sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask); 1243 } 1244 1245 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev) 1246 { 1247 int i; 1248 u32 msg_map = 0; 1249 1250 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { 1251 int m = i | sndev->peer_partition << 2; 1252 1253 msg_map |= m << i * 8; 1254 } 1255 1256 iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map); 1257 1258 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) 1259 iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK, 1260 &sndev->mmio_self_dbmsg->imsg[i]); 1261 } 1262 1263 static int 1264 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev) 1265 { 1266 int req_ids[2]; 1267 1268 /* 1269 * Root Complex Requester ID (which is 0:00.0) 1270 */ 1271 req_ids[0] = 0; 1272 1273 /* 1274 * Host Bridge Requester ID (as read from the mmap address) 1275 */ 1276 req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id); 1277 1278 return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids, 1279 ARRAY_SIZE(req_ids)); 1280 } 1281 1282 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev) 1283 { 1284 int i; 1285 1286 memset(sndev->self_shared, 0, LUT_SIZE); 1287 sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC; 1288 sndev->self_shared->partition_id = sndev->stdev->partition; 1289 1290 for (i = 0; i < sndev->nr_direct_mw; i++) { 1291 int bar = sndev->direct_mw_to_bar[i]; 1292 resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar); 1293 1294 if (i == 0) 1295 sz = min_t(resource_size_t, sz, 1296 LUT_SIZE * sndev->nr_lut_mw); 1297 1298 sndev->self_shared->mw_sizes[i] = sz; 1299 } 1300 1301 for (i = 0; i < sndev->nr_lut_mw; i++) { 1302 int idx = sndev->nr_direct_mw + i; 1303 1304 sndev->self_shared->mw_sizes[idx] = LUT_SIZE; 1305 } 1306 } 1307 1308 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev) 1309 { 1310 int self_bar = sndev->direct_mw_to_bar[0]; 1311 int rc; 1312 1313 sndev->nr_rsvd_luts++; 1314 sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev, 1315 LUT_SIZE, 1316 &sndev->self_shared_dma, 1317 GFP_KERNEL); 1318 if (!sndev->self_shared) { 1319 dev_err(&sndev->stdev->dev, 1320 "unable to allocate memory for shared mw\n"); 1321 return -ENOMEM; 1322 } 1323 1324 switchtec_ntb_init_shared(sndev); 1325 1326 rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0, 1327 sndev->self_partition, 1328 sndev->self_shared_dma); 1329 if (rc) 1330 goto unalloc_and_exit; 1331 1332 sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE); 1333 if (!sndev->peer_shared) { 1334 rc = -ENOMEM; 1335 goto unalloc_and_exit; 1336 } 1337 1338 dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n"); 1339 return 0; 1340 1341 unalloc_and_exit: 1342 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, 1343 sndev->self_shared, sndev->self_shared_dma); 1344 1345 return rc; 1346 } 1347 1348 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev) 1349 { 1350 if (sndev->peer_shared) 1351 pci_iounmap(sndev->stdev->pdev, sndev->peer_shared); 1352 1353 if (sndev->self_shared) 1354 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, 1355 sndev->self_shared, 1356 sndev->self_shared_dma); 1357 sndev->nr_rsvd_luts--; 1358 } 1359 1360 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev) 1361 { 1362 struct switchtec_ntb *sndev = dev; 1363 1364 dev_dbg(&sndev->stdev->dev, "doorbell\n"); 1365 1366 ntb_db_event(&sndev->ntb, 0); 1367 1368 return IRQ_HANDLED; 1369 } 1370 1371 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev) 1372 { 1373 int i; 1374 struct switchtec_ntb *sndev = dev; 1375 1376 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { 1377 u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]); 1378 1379 if (msg & NTB_DBMSG_IMSG_STATUS) { 1380 dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", 1381 i, (u32)msg); 1382 iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status); 1383 1384 if (i == LINK_MESSAGE) 1385 switchtec_ntb_check_link(sndev, msg); 1386 } 1387 } 1388 1389 return IRQ_HANDLED; 1390 } 1391 1392 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev) 1393 { 1394 int i; 1395 int rc; 1396 int doorbell_irq = 0; 1397 int message_irq = 0; 1398 int event_irq; 1399 int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map); 1400 1401 event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number); 1402 1403 while (doorbell_irq == event_irq) 1404 doorbell_irq++; 1405 while (message_irq == doorbell_irq || 1406 message_irq == event_irq) 1407 message_irq++; 1408 1409 dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n", 1410 event_irq, doorbell_irq, message_irq); 1411 1412 for (i = 0; i < idb_vecs - 4; i++) 1413 iowrite8(doorbell_irq, 1414 &sndev->mmio_self_dbmsg->idb_vec_map[i]); 1415 1416 for (; i < idb_vecs; i++) 1417 iowrite8(message_irq, 1418 &sndev->mmio_self_dbmsg->idb_vec_map[i]); 1419 1420 sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq); 1421 sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq); 1422 1423 rc = request_irq(sndev->doorbell_irq, 1424 switchtec_ntb_doorbell_isr, 0, 1425 "switchtec_ntb_doorbell", sndev); 1426 if (rc) 1427 return rc; 1428 1429 rc = request_irq(sndev->message_irq, 1430 switchtec_ntb_message_isr, 0, 1431 "switchtec_ntb_message", sndev); 1432 if (rc) { 1433 free_irq(sndev->doorbell_irq, sndev); 1434 return rc; 1435 } 1436 1437 return 0; 1438 } 1439 1440 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev) 1441 { 1442 free_irq(sndev->doorbell_irq, sndev); 1443 free_irq(sndev->message_irq, sndev); 1444 } 1445 1446 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev) 1447 { 1448 dev_info(&sndev->stdev->dev, "peer reinitialized\n"); 1449 switchtec_ntb_deinit_shared_mw(sndev); 1450 switchtec_ntb_init_mw(sndev); 1451 return switchtec_ntb_init_shared_mw(sndev); 1452 } 1453 1454 static int switchtec_ntb_add(struct device *dev, 1455 struct class_interface *class_intf) 1456 { 1457 struct switchtec_dev *stdev = to_stdev(dev); 1458 struct switchtec_ntb *sndev; 1459 int rc; 1460 1461 stdev->sndev = NULL; 1462 1463 if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8)) 1464 return -ENODEV; 1465 1466 sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev)); 1467 if (!sndev) 1468 return -ENOMEM; 1469 1470 sndev->stdev = stdev; 1471 rc = switchtec_ntb_init_sndev(sndev); 1472 if (rc) 1473 goto free_and_exit; 1474 1475 switchtec_ntb_init_mw(sndev); 1476 1477 rc = switchtec_ntb_init_req_id_table(sndev); 1478 if (rc) 1479 goto free_and_exit; 1480 1481 rc = switchtec_ntb_init_crosslink(sndev); 1482 if (rc) 1483 goto free_and_exit; 1484 1485 switchtec_ntb_init_db(sndev); 1486 switchtec_ntb_init_msgs(sndev); 1487 1488 rc = switchtec_ntb_init_shared_mw(sndev); 1489 if (rc) 1490 goto deinit_crosslink; 1491 1492 rc = switchtec_ntb_init_db_msg_irq(sndev); 1493 if (rc) 1494 goto deinit_shared_and_exit; 1495 1496 /* 1497 * If this host crashed, the other host may think the link is 1498 * still up. Tell them to force it down (it will go back up 1499 * once we register the ntb device). 1500 */ 1501 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN); 1502 1503 rc = ntb_register_device(&sndev->ntb); 1504 if (rc) 1505 goto deinit_and_exit; 1506 1507 stdev->sndev = sndev; 1508 stdev->link_notifier = switchtec_ntb_link_notification; 1509 dev_info(dev, "NTB device registered\n"); 1510 1511 return 0; 1512 1513 deinit_and_exit: 1514 switchtec_ntb_deinit_db_msg_irq(sndev); 1515 deinit_shared_and_exit: 1516 switchtec_ntb_deinit_shared_mw(sndev); 1517 deinit_crosslink: 1518 switchtec_ntb_deinit_crosslink(sndev); 1519 free_and_exit: 1520 kfree(sndev); 1521 dev_err(dev, "failed to register ntb device: %d\n", rc); 1522 return rc; 1523 } 1524 1525 static void switchtec_ntb_remove(struct device *dev, 1526 struct class_interface *class_intf) 1527 { 1528 struct switchtec_dev *stdev = to_stdev(dev); 1529 struct switchtec_ntb *sndev = stdev->sndev; 1530 1531 if (!sndev) 1532 return; 1533 1534 stdev->link_notifier = NULL; 1535 stdev->sndev = NULL; 1536 ntb_unregister_device(&sndev->ntb); 1537 switchtec_ntb_deinit_db_msg_irq(sndev); 1538 switchtec_ntb_deinit_shared_mw(sndev); 1539 switchtec_ntb_deinit_crosslink(sndev); 1540 kfree(sndev); 1541 dev_info(dev, "ntb device unregistered\n"); 1542 } 1543 1544 static struct class_interface switchtec_interface = { 1545 .add_dev = switchtec_ntb_add, 1546 .remove_dev = switchtec_ntb_remove, 1547 }; 1548 1549 static int __init switchtec_ntb_init(void) 1550 { 1551 switchtec_interface.class = switchtec_class; 1552 return class_interface_register(&switchtec_interface); 1553 } 1554 module_init(switchtec_ntb_init); 1555 1556 static void __exit switchtec_ntb_exit(void) 1557 { 1558 class_interface_unregister(&switchtec_interface); 1559 } 1560 module_exit(switchtec_ntb_exit); 1561