1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Transport Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/debugfs.h> 51 #include <linux/delay.h> 52 #include <linux/dmaengine.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/errno.h> 55 #include <linux/export.h> 56 #include <linux/interrupt.h> 57 #include <linux/module.h> 58 #include <linux/pci.h> 59 #include <linux/slab.h> 60 #include <linux/types.h> 61 #include "linux/ntb.h" 62 #include "linux/ntb_transport.h" 63 64 #define NTB_TRANSPORT_VERSION 4 65 #define NTB_TRANSPORT_VER "4" 66 #define NTB_TRANSPORT_NAME "ntb_transport" 67 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" 68 69 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); 70 MODULE_VERSION(NTB_TRANSPORT_VER); 71 MODULE_LICENSE("Dual BSD/GPL"); 72 MODULE_AUTHOR("Intel Corporation"); 73 74 static unsigned long max_mw_size; 75 module_param(max_mw_size, ulong, 0644); 76 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); 77 78 static unsigned int transport_mtu = 0x401E; 79 module_param(transport_mtu, uint, 0644); 80 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 81 82 static unsigned char max_num_clients; 83 module_param(max_num_clients, byte, 0644); 84 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 85 86 static unsigned int copy_bytes = 1024; 87 module_param(copy_bytes, uint, 0644); 88 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 89 90 static struct dentry *nt_debugfs_dir; 91 92 struct ntb_queue_entry { 93 /* ntb_queue list reference */ 94 struct list_head entry; 95 /* pointers to data to be transferred */ 96 void *cb_data; 97 void *buf; 98 unsigned int len; 99 unsigned int flags; 100 101 struct ntb_transport_qp *qp; 102 union { 103 struct ntb_payload_header __iomem *tx_hdr; 104 struct ntb_payload_header *rx_hdr; 105 }; 106 unsigned int index; 107 }; 108 109 struct ntb_rx_info { 110 unsigned int entry; 111 }; 112 113 struct ntb_transport_qp { 114 struct ntb_transport_ctx *transport; 115 struct ntb_dev *ndev; 116 void *cb_data; 117 struct dma_chan *dma_chan; 118 119 bool client_ready; 120 bool link_is_up; 121 122 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 123 u64 qp_bit; 124 125 struct ntb_rx_info __iomem *rx_info; 126 struct ntb_rx_info *remote_rx_info; 127 128 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 129 void *data, int len); 130 struct list_head tx_free_q; 131 spinlock_t ntb_tx_free_q_lock; 132 void __iomem *tx_mw; 133 dma_addr_t tx_mw_phys; 134 unsigned int tx_index; 135 unsigned int tx_max_entry; 136 unsigned int tx_max_frame; 137 138 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 139 void *data, int len); 140 struct list_head rx_pend_q; 141 struct list_head rx_free_q; 142 spinlock_t ntb_rx_pend_q_lock; 143 spinlock_t ntb_rx_free_q_lock; 144 void *rx_buff; 145 unsigned int rx_index; 146 unsigned int rx_max_entry; 147 unsigned int rx_max_frame; 148 dma_cookie_t last_cookie; 149 struct tasklet_struct rxc_db_work; 150 151 void (*event_handler)(void *data, int status); 152 struct delayed_work link_work; 153 struct work_struct link_cleanup; 154 155 struct dentry *debugfs_dir; 156 struct dentry *debugfs_stats; 157 158 /* Stats */ 159 u64 rx_bytes; 160 u64 rx_pkts; 161 u64 rx_ring_empty; 162 u64 rx_err_no_buf; 163 u64 rx_err_oflow; 164 u64 rx_err_ver; 165 u64 rx_memcpy; 166 u64 rx_async; 167 u64 tx_bytes; 168 u64 tx_pkts; 169 u64 tx_ring_full; 170 u64 tx_err_no_buf; 171 u64 tx_memcpy; 172 u64 tx_async; 173 }; 174 175 struct ntb_transport_mw { 176 phys_addr_t phys_addr; 177 resource_size_t phys_size; 178 resource_size_t xlat_align; 179 resource_size_t xlat_align_size; 180 void __iomem *vbase; 181 size_t xlat_size; 182 size_t buff_size; 183 void *virt_addr; 184 dma_addr_t dma_addr; 185 }; 186 187 struct ntb_transport_client_dev { 188 struct list_head entry; 189 struct ntb_transport_ctx *nt; 190 struct device dev; 191 }; 192 193 struct ntb_transport_ctx { 194 struct list_head entry; 195 struct list_head client_devs; 196 197 struct ntb_dev *ndev; 198 199 struct ntb_transport_mw *mw_vec; 200 struct ntb_transport_qp *qp_vec; 201 unsigned int mw_count; 202 unsigned int qp_count; 203 u64 qp_bitmap; 204 u64 qp_bitmap_free; 205 206 bool link_is_up; 207 struct delayed_work link_work; 208 struct work_struct link_cleanup; 209 }; 210 211 enum { 212 DESC_DONE_FLAG = BIT(0), 213 LINK_DOWN_FLAG = BIT(1), 214 }; 215 216 struct ntb_payload_header { 217 unsigned int ver; 218 unsigned int len; 219 unsigned int flags; 220 }; 221 222 enum { 223 VERSION = 0, 224 QP_LINKS, 225 NUM_QPS, 226 NUM_MWS, 227 MW0_SZ_HIGH, 228 MW0_SZ_LOW, 229 MW1_SZ_HIGH, 230 MW1_SZ_LOW, 231 MAX_SPAD, 232 }; 233 234 #define dev_client_dev(__dev) \ 235 container_of((__dev), struct ntb_transport_client_dev, dev) 236 237 #define drv_client(__drv) \ 238 container_of((__drv), struct ntb_transport_client, driver) 239 240 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 241 #define NTB_QP_DEF_NUM_ENTRIES 100 242 #define NTB_LINK_DOWN_TIMEOUT 10 243 244 static void ntb_transport_rxc_db(unsigned long data); 245 static const struct ntb_ctx_ops ntb_transport_ops; 246 static struct ntb_client ntb_transport_client; 247 248 static int ntb_transport_bus_match(struct device *dev, 249 struct device_driver *drv) 250 { 251 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 252 } 253 254 static int ntb_transport_bus_probe(struct device *dev) 255 { 256 const struct ntb_transport_client *client; 257 int rc = -EINVAL; 258 259 get_device(dev); 260 261 client = drv_client(dev->driver); 262 rc = client->probe(dev); 263 if (rc) 264 put_device(dev); 265 266 return rc; 267 } 268 269 static int ntb_transport_bus_remove(struct device *dev) 270 { 271 const struct ntb_transport_client *client; 272 273 client = drv_client(dev->driver); 274 client->remove(dev); 275 276 put_device(dev); 277 278 return 0; 279 } 280 281 static struct bus_type ntb_transport_bus = { 282 .name = "ntb_transport", 283 .match = ntb_transport_bus_match, 284 .probe = ntb_transport_bus_probe, 285 .remove = ntb_transport_bus_remove, 286 }; 287 288 static LIST_HEAD(ntb_transport_list); 289 290 static int ntb_bus_init(struct ntb_transport_ctx *nt) 291 { 292 list_add(&nt->entry, &ntb_transport_list); 293 return 0; 294 } 295 296 static void ntb_bus_remove(struct ntb_transport_ctx *nt) 297 { 298 struct ntb_transport_client_dev *client_dev, *cd; 299 300 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 301 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 302 dev_name(&client_dev->dev)); 303 list_del(&client_dev->entry); 304 device_unregister(&client_dev->dev); 305 } 306 307 list_del(&nt->entry); 308 } 309 310 static void ntb_transport_client_release(struct device *dev) 311 { 312 struct ntb_transport_client_dev *client_dev; 313 314 client_dev = dev_client_dev(dev); 315 kfree(client_dev); 316 } 317 318 /** 319 * ntb_transport_unregister_client_dev - Unregister NTB client device 320 * @device_name: Name of NTB client device 321 * 322 * Unregister an NTB client device with the NTB transport layer 323 */ 324 void ntb_transport_unregister_client_dev(char *device_name) 325 { 326 struct ntb_transport_client_dev *client, *cd; 327 struct ntb_transport_ctx *nt; 328 329 list_for_each_entry(nt, &ntb_transport_list, entry) 330 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 331 if (!strncmp(dev_name(&client->dev), device_name, 332 strlen(device_name))) { 333 list_del(&client->entry); 334 device_unregister(&client->dev); 335 } 336 } 337 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); 338 339 /** 340 * ntb_transport_register_client_dev - Register NTB client device 341 * @device_name: Name of NTB client device 342 * 343 * Register an NTB client device with the NTB transport layer 344 */ 345 int ntb_transport_register_client_dev(char *device_name) 346 { 347 struct ntb_transport_client_dev *client_dev; 348 struct ntb_transport_ctx *nt; 349 int rc, i = 0; 350 351 if (list_empty(&ntb_transport_list)) 352 return -ENODEV; 353 354 list_for_each_entry(nt, &ntb_transport_list, entry) { 355 struct device *dev; 356 357 client_dev = kzalloc(sizeof(*client_dev), 358 GFP_KERNEL); 359 if (!client_dev) { 360 rc = -ENOMEM; 361 goto err; 362 } 363 364 dev = &client_dev->dev; 365 366 /* setup and register client devices */ 367 dev_set_name(dev, "%s%d", device_name, i); 368 dev->bus = &ntb_transport_bus; 369 dev->release = ntb_transport_client_release; 370 dev->parent = &nt->ndev->dev; 371 372 rc = device_register(dev); 373 if (rc) { 374 kfree(client_dev); 375 goto err; 376 } 377 378 list_add_tail(&client_dev->entry, &nt->client_devs); 379 i++; 380 } 381 382 return 0; 383 384 err: 385 ntb_transport_unregister_client_dev(device_name); 386 387 return rc; 388 } 389 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); 390 391 /** 392 * ntb_transport_register_client - Register NTB client driver 393 * @drv: NTB client driver to be registered 394 * 395 * Register an NTB client driver with the NTB transport layer 396 * 397 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 398 */ 399 int ntb_transport_register_client(struct ntb_transport_client *drv) 400 { 401 drv->driver.bus = &ntb_transport_bus; 402 403 if (list_empty(&ntb_transport_list)) 404 return -ENODEV; 405 406 return driver_register(&drv->driver); 407 } 408 EXPORT_SYMBOL_GPL(ntb_transport_register_client); 409 410 /** 411 * ntb_transport_unregister_client - Unregister NTB client driver 412 * @drv: NTB client driver to be unregistered 413 * 414 * Unregister an NTB client driver with the NTB transport layer 415 * 416 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 417 */ 418 void ntb_transport_unregister_client(struct ntb_transport_client *drv) 419 { 420 driver_unregister(&drv->driver); 421 } 422 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); 423 424 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 425 loff_t *offp) 426 { 427 struct ntb_transport_qp *qp; 428 char *buf; 429 ssize_t ret, out_offset, out_count; 430 431 out_count = 1000; 432 433 buf = kmalloc(out_count, GFP_KERNEL); 434 if (!buf) 435 return -ENOMEM; 436 437 qp = filp->private_data; 438 out_offset = 0; 439 out_offset += snprintf(buf + out_offset, out_count - out_offset, 440 "NTB QP stats\n"); 441 out_offset += snprintf(buf + out_offset, out_count - out_offset, 442 "rx_bytes - \t%llu\n", qp->rx_bytes); 443 out_offset += snprintf(buf + out_offset, out_count - out_offset, 444 "rx_pkts - \t%llu\n", qp->rx_pkts); 445 out_offset += snprintf(buf + out_offset, out_count - out_offset, 446 "rx_memcpy - \t%llu\n", qp->rx_memcpy); 447 out_offset += snprintf(buf + out_offset, out_count - out_offset, 448 "rx_async - \t%llu\n", qp->rx_async); 449 out_offset += snprintf(buf + out_offset, out_count - out_offset, 450 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 451 out_offset += snprintf(buf + out_offset, out_count - out_offset, 452 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 453 out_offset += snprintf(buf + out_offset, out_count - out_offset, 454 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 455 out_offset += snprintf(buf + out_offset, out_count - out_offset, 456 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 457 out_offset += snprintf(buf + out_offset, out_count - out_offset, 458 "rx_buff - \t%p\n", qp->rx_buff); 459 out_offset += snprintf(buf + out_offset, out_count - out_offset, 460 "rx_index - \t%u\n", qp->rx_index); 461 out_offset += snprintf(buf + out_offset, out_count - out_offset, 462 "rx_max_entry - \t%u\n", qp->rx_max_entry); 463 464 out_offset += snprintf(buf + out_offset, out_count - out_offset, 465 "tx_bytes - \t%llu\n", qp->tx_bytes); 466 out_offset += snprintf(buf + out_offset, out_count - out_offset, 467 "tx_pkts - \t%llu\n", qp->tx_pkts); 468 out_offset += snprintf(buf + out_offset, out_count - out_offset, 469 "tx_memcpy - \t%llu\n", qp->tx_memcpy); 470 out_offset += snprintf(buf + out_offset, out_count - out_offset, 471 "tx_async - \t%llu\n", qp->tx_async); 472 out_offset += snprintf(buf + out_offset, out_count - out_offset, 473 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 474 out_offset += snprintf(buf + out_offset, out_count - out_offset, 475 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 476 out_offset += snprintf(buf + out_offset, out_count - out_offset, 477 "tx_mw - \t%p\n", qp->tx_mw); 478 out_offset += snprintf(buf + out_offset, out_count - out_offset, 479 "tx_index - \t%u\n", qp->tx_index); 480 out_offset += snprintf(buf + out_offset, out_count - out_offset, 481 "tx_max_entry - \t%u\n", qp->tx_max_entry); 482 483 out_offset += snprintf(buf + out_offset, out_count - out_offset, 484 "\nQP Link %s\n", 485 qp->link_is_up ? "Up" : "Down"); 486 if (out_offset > out_count) 487 out_offset = out_count; 488 489 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 490 kfree(buf); 491 return ret; 492 } 493 494 static const struct file_operations ntb_qp_debugfs_stats = { 495 .owner = THIS_MODULE, 496 .open = simple_open, 497 .read = debugfs_read, 498 }; 499 500 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 501 struct list_head *list) 502 { 503 unsigned long flags; 504 505 spin_lock_irqsave(lock, flags); 506 list_add_tail(entry, list); 507 spin_unlock_irqrestore(lock, flags); 508 } 509 510 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 511 struct list_head *list) 512 { 513 struct ntb_queue_entry *entry; 514 unsigned long flags; 515 516 spin_lock_irqsave(lock, flags); 517 if (list_empty(list)) { 518 entry = NULL; 519 goto out; 520 } 521 entry = list_first_entry(list, struct ntb_queue_entry, entry); 522 list_del(&entry->entry); 523 out: 524 spin_unlock_irqrestore(lock, flags); 525 526 return entry; 527 } 528 529 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 530 unsigned int qp_num) 531 { 532 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 533 struct ntb_transport_mw *mw; 534 unsigned int rx_size, num_qps_mw; 535 unsigned int mw_num, mw_count, qp_count; 536 unsigned int i; 537 538 mw_count = nt->mw_count; 539 qp_count = nt->qp_count; 540 541 mw_num = QP_TO_MW(nt, qp_num); 542 mw = &nt->mw_vec[mw_num]; 543 544 if (!mw->virt_addr) 545 return -ENOMEM; 546 547 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 548 num_qps_mw = qp_count / mw_count + 1; 549 else 550 num_qps_mw = qp_count / mw_count; 551 552 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; 553 qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count; 554 rx_size -= sizeof(struct ntb_rx_info); 555 556 qp->remote_rx_info = qp->rx_buff + rx_size; 557 558 /* Due to housekeeping, there must be atleast 2 buffs */ 559 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 560 qp->rx_max_entry = rx_size / qp->rx_max_frame; 561 qp->rx_index = 0; 562 563 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 564 565 /* setup the hdr offsets with 0's */ 566 for (i = 0; i < qp->rx_max_entry; i++) { 567 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - 568 sizeof(struct ntb_payload_header)); 569 memset(offset, 0, sizeof(struct ntb_payload_header)); 570 } 571 572 qp->rx_pkts = 0; 573 qp->tx_pkts = 0; 574 qp->tx_index = 0; 575 576 return 0; 577 } 578 579 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 580 { 581 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 582 struct pci_dev *pdev = nt->ndev->pdev; 583 584 if (!mw->virt_addr) 585 return; 586 587 ntb_mw_clear_trans(nt->ndev, num_mw); 588 dma_free_coherent(&pdev->dev, mw->buff_size, 589 mw->virt_addr, mw->dma_addr); 590 mw->xlat_size = 0; 591 mw->buff_size = 0; 592 mw->virt_addr = NULL; 593 } 594 595 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 596 unsigned int size) 597 { 598 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 599 struct pci_dev *pdev = nt->ndev->pdev; 600 unsigned int xlat_size, buff_size; 601 int rc; 602 603 xlat_size = round_up(size, mw->xlat_align_size); 604 buff_size = round_up(size, mw->xlat_align); 605 606 /* No need to re-setup */ 607 if (mw->xlat_size == xlat_size) 608 return 0; 609 610 if (mw->buff_size) 611 ntb_free_mw(nt, num_mw); 612 613 /* Alloc memory for receiving data. Must be aligned */ 614 mw->xlat_size = xlat_size; 615 mw->buff_size = buff_size; 616 617 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size, 618 &mw->dma_addr, GFP_KERNEL); 619 if (!mw->virt_addr) { 620 mw->xlat_size = 0; 621 mw->buff_size = 0; 622 dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", 623 buff_size); 624 return -ENOMEM; 625 } 626 627 /* 628 * we must ensure that the memory address allocated is BAR size 629 * aligned in order for the XLAT register to take the value. This 630 * is a requirement of the hardware. It is recommended to setup CMA 631 * for BAR sizes equal or greater than 4MB. 632 */ 633 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { 634 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", 635 &mw->dma_addr); 636 ntb_free_mw(nt, num_mw); 637 return -ENOMEM; 638 } 639 640 /* Notify HW the memory location of the receive buffer */ 641 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size); 642 if (rc) { 643 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); 644 ntb_free_mw(nt, num_mw); 645 return -EIO; 646 } 647 648 return 0; 649 } 650 651 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 652 { 653 qp->link_is_up = false; 654 655 qp->tx_index = 0; 656 qp->rx_index = 0; 657 qp->rx_bytes = 0; 658 qp->rx_pkts = 0; 659 qp->rx_ring_empty = 0; 660 qp->rx_err_no_buf = 0; 661 qp->rx_err_oflow = 0; 662 qp->rx_err_ver = 0; 663 qp->rx_memcpy = 0; 664 qp->rx_async = 0; 665 qp->tx_bytes = 0; 666 qp->tx_pkts = 0; 667 qp->tx_ring_full = 0; 668 qp->tx_err_no_buf = 0; 669 qp->tx_memcpy = 0; 670 qp->tx_async = 0; 671 } 672 673 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 674 { 675 struct ntb_transport_ctx *nt = qp->transport; 676 struct pci_dev *pdev = nt->ndev->pdev; 677 678 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); 679 680 cancel_delayed_work_sync(&qp->link_work); 681 ntb_qp_link_down_reset(qp); 682 683 if (qp->event_handler) 684 qp->event_handler(qp->cb_data, qp->link_is_up); 685 } 686 687 static void ntb_qp_link_cleanup_work(struct work_struct *work) 688 { 689 struct ntb_transport_qp *qp = container_of(work, 690 struct ntb_transport_qp, 691 link_cleanup); 692 struct ntb_transport_ctx *nt = qp->transport; 693 694 ntb_qp_link_cleanup(qp); 695 696 if (nt->link_is_up) 697 schedule_delayed_work(&qp->link_work, 698 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 699 } 700 701 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 702 { 703 schedule_work(&qp->link_cleanup); 704 } 705 706 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 707 { 708 struct ntb_transport_qp *qp; 709 u64 qp_bitmap_alloc; 710 int i; 711 712 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 713 714 /* Pass along the info to any clients */ 715 for (i = 0; i < nt->qp_count; i++) 716 if (qp_bitmap_alloc & BIT_ULL(i)) { 717 qp = &nt->qp_vec[i]; 718 ntb_qp_link_cleanup(qp); 719 cancel_work_sync(&qp->link_cleanup); 720 cancel_delayed_work_sync(&qp->link_work); 721 } 722 723 if (!nt->link_is_up) 724 cancel_delayed_work_sync(&nt->link_work); 725 726 /* The scratchpad registers keep the values if the remote side 727 * goes down, blast them now to give them a sane value the next 728 * time they are accessed 729 */ 730 for (i = 0; i < MAX_SPAD; i++) 731 ntb_spad_write(nt->ndev, i, 0); 732 } 733 734 static void ntb_transport_link_cleanup_work(struct work_struct *work) 735 { 736 struct ntb_transport_ctx *nt = 737 container_of(work, struct ntb_transport_ctx, link_cleanup); 738 739 ntb_transport_link_cleanup(nt); 740 } 741 742 static void ntb_transport_event_callback(void *data) 743 { 744 struct ntb_transport_ctx *nt = data; 745 746 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) 747 schedule_delayed_work(&nt->link_work, 0); 748 else 749 schedule_work(&nt->link_cleanup); 750 } 751 752 static void ntb_transport_link_work(struct work_struct *work) 753 { 754 struct ntb_transport_ctx *nt = 755 container_of(work, struct ntb_transport_ctx, link_work.work); 756 struct ntb_dev *ndev = nt->ndev; 757 struct pci_dev *pdev = ndev->pdev; 758 resource_size_t size; 759 u32 val; 760 int rc, i, spad; 761 762 /* send the local info, in the opposite order of the way we read it */ 763 for (i = 0; i < nt->mw_count; i++) { 764 size = nt->mw_vec[i].phys_size; 765 766 if (max_mw_size && size > max_mw_size) 767 size = max_mw_size; 768 769 spad = MW0_SZ_HIGH + (i * 2); 770 ntb_peer_spad_write(ndev, spad, (u32)(size >> 32)); 771 772 spad = MW0_SZ_LOW + (i * 2); 773 ntb_peer_spad_write(ndev, spad, (u32)size); 774 } 775 776 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count); 777 778 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count); 779 780 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION); 781 782 /* Query the remote side for its info */ 783 val = ntb_spad_read(ndev, VERSION); 784 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 785 if (val != NTB_TRANSPORT_VERSION) 786 goto out; 787 788 val = ntb_spad_read(ndev, NUM_QPS); 789 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 790 if (val != nt->qp_count) 791 goto out; 792 793 val = ntb_spad_read(ndev, NUM_MWS); 794 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 795 if (val != nt->mw_count) 796 goto out; 797 798 for (i = 0; i < nt->mw_count; i++) { 799 u64 val64; 800 801 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); 802 val64 = (u64)val << 32; 803 804 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); 805 val64 |= val; 806 807 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); 808 809 rc = ntb_set_mw(nt, i, val64); 810 if (rc) 811 goto out1; 812 } 813 814 nt->link_is_up = true; 815 816 for (i = 0; i < nt->qp_count; i++) { 817 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 818 819 ntb_transport_setup_qp_mw(nt, i); 820 821 if (qp->client_ready) 822 schedule_delayed_work(&qp->link_work, 0); 823 } 824 825 return; 826 827 out1: 828 for (i = 0; i < nt->mw_count; i++) 829 ntb_free_mw(nt, i); 830 out: 831 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 832 schedule_delayed_work(&nt->link_work, 833 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 834 } 835 836 static void ntb_qp_link_work(struct work_struct *work) 837 { 838 struct ntb_transport_qp *qp = container_of(work, 839 struct ntb_transport_qp, 840 link_work.work); 841 struct pci_dev *pdev = qp->ndev->pdev; 842 struct ntb_transport_ctx *nt = qp->transport; 843 int val; 844 845 WARN_ON(!nt->link_is_up); 846 847 val = ntb_spad_read(nt->ndev, QP_LINKS); 848 849 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); 850 851 /* query remote spad for qp ready bits */ 852 ntb_peer_spad_read(nt->ndev, QP_LINKS); 853 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); 854 855 /* See if the remote side is up */ 856 if (val & BIT(qp->qp_num)) { 857 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 858 qp->link_is_up = true; 859 860 if (qp->event_handler) 861 qp->event_handler(qp->cb_data, qp->link_is_up); 862 } else if (nt->link_is_up) 863 schedule_delayed_work(&qp->link_work, 864 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 865 } 866 867 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, 868 unsigned int qp_num) 869 { 870 struct ntb_transport_qp *qp; 871 struct ntb_transport_mw *mw; 872 phys_addr_t mw_base; 873 resource_size_t mw_size; 874 unsigned int num_qps_mw, tx_size; 875 unsigned int mw_num, mw_count, qp_count; 876 u64 qp_offset; 877 878 mw_count = nt->mw_count; 879 qp_count = nt->qp_count; 880 881 mw_num = QP_TO_MW(nt, qp_num); 882 mw = &nt->mw_vec[mw_num]; 883 884 qp = &nt->qp_vec[qp_num]; 885 qp->qp_num = qp_num; 886 qp->transport = nt; 887 qp->ndev = nt->ndev; 888 qp->client_ready = false; 889 qp->event_handler = NULL; 890 ntb_qp_link_down_reset(qp); 891 892 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 893 num_qps_mw = qp_count / mw_count + 1; 894 else 895 num_qps_mw = qp_count / mw_count; 896 897 mw_base = nt->mw_vec[mw_num].phys_addr; 898 mw_size = nt->mw_vec[mw_num].phys_size; 899 900 tx_size = (unsigned int)mw_size / num_qps_mw; 901 qp_offset = tx_size * qp_num / mw_count; 902 903 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 904 if (!qp->tx_mw) 905 return -EINVAL; 906 907 qp->tx_mw_phys = mw_base + qp_offset; 908 if (!qp->tx_mw_phys) 909 return -EINVAL; 910 911 tx_size -= sizeof(struct ntb_rx_info); 912 qp->rx_info = qp->tx_mw + tx_size; 913 914 /* Due to housekeeping, there must be atleast 2 buffs */ 915 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 916 qp->tx_max_entry = tx_size / qp->tx_max_frame; 917 918 if (nt_debugfs_dir) { 919 char debugfs_name[4]; 920 921 snprintf(debugfs_name, 4, "qp%d", qp_num); 922 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 923 nt_debugfs_dir); 924 925 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 926 qp->debugfs_dir, qp, 927 &ntb_qp_debugfs_stats); 928 } else { 929 qp->debugfs_dir = NULL; 930 qp->debugfs_stats = NULL; 931 } 932 933 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 934 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 935 936 spin_lock_init(&qp->ntb_rx_pend_q_lock); 937 spin_lock_init(&qp->ntb_rx_free_q_lock); 938 spin_lock_init(&qp->ntb_tx_free_q_lock); 939 940 INIT_LIST_HEAD(&qp->rx_pend_q); 941 INIT_LIST_HEAD(&qp->rx_free_q); 942 INIT_LIST_HEAD(&qp->tx_free_q); 943 944 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, 945 (unsigned long)qp); 946 947 return 0; 948 } 949 950 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) 951 { 952 struct ntb_transport_ctx *nt; 953 struct ntb_transport_mw *mw; 954 unsigned int mw_count, qp_count; 955 u64 qp_bitmap; 956 int rc, i; 957 958 if (ntb_db_is_unsafe(ndev)) 959 dev_dbg(&ndev->dev, 960 "doorbell is unsafe, proceed anyway...\n"); 961 if (ntb_spad_is_unsafe(ndev)) 962 dev_dbg(&ndev->dev, 963 "scratchpad is unsafe, proceed anyway...\n"); 964 965 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 966 if (!nt) 967 return -ENOMEM; 968 969 nt->ndev = ndev; 970 971 mw_count = ntb_mw_count(ndev); 972 973 nt->mw_count = mw_count; 974 975 nt->mw_vec = kcalloc(mw_count, sizeof(*nt->mw_vec), GFP_KERNEL); 976 if (!nt->mw_vec) { 977 rc = -ENOMEM; 978 goto err; 979 } 980 981 for (i = 0; i < mw_count; i++) { 982 mw = &nt->mw_vec[i]; 983 984 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size, 985 &mw->xlat_align, &mw->xlat_align_size); 986 if (rc) 987 goto err1; 988 989 mw->vbase = ioremap(mw->phys_addr, mw->phys_size); 990 if (!mw->vbase) { 991 rc = -ENOMEM; 992 goto err1; 993 } 994 995 mw->buff_size = 0; 996 mw->xlat_size = 0; 997 mw->virt_addr = NULL; 998 mw->dma_addr = 0; 999 } 1000 1001 qp_bitmap = ntb_db_valid_mask(ndev); 1002 1003 qp_count = ilog2(qp_bitmap); 1004 if (max_num_clients && max_num_clients < qp_count) 1005 qp_count = max_num_clients; 1006 else if (mw_count < qp_count) 1007 qp_count = mw_count; 1008 1009 qp_bitmap &= BIT_ULL(qp_count) - 1; 1010 1011 nt->qp_count = qp_count; 1012 nt->qp_bitmap = qp_bitmap; 1013 nt->qp_bitmap_free = qp_bitmap; 1014 1015 nt->qp_vec = kcalloc(qp_count, sizeof(*nt->qp_vec), GFP_KERNEL); 1016 if (!nt->qp_vec) { 1017 rc = -ENOMEM; 1018 goto err2; 1019 } 1020 1021 for (i = 0; i < qp_count; i++) { 1022 rc = ntb_transport_init_queue(nt, i); 1023 if (rc) 1024 goto err3; 1025 } 1026 1027 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 1028 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); 1029 1030 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); 1031 if (rc) 1032 goto err3; 1033 1034 INIT_LIST_HEAD(&nt->client_devs); 1035 rc = ntb_bus_init(nt); 1036 if (rc) 1037 goto err4; 1038 1039 nt->link_is_up = false; 1040 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1041 ntb_link_event(ndev); 1042 1043 return 0; 1044 1045 err4: 1046 ntb_clear_ctx(ndev); 1047 err3: 1048 kfree(nt->qp_vec); 1049 err2: 1050 kfree(nt->mw_vec); 1051 err1: 1052 while (i--) { 1053 mw = &nt->mw_vec[i]; 1054 iounmap(mw->vbase); 1055 } 1056 err: 1057 kfree(nt); 1058 return rc; 1059 } 1060 1061 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) 1062 { 1063 struct ntb_transport_ctx *nt = ndev->ctx; 1064 struct ntb_transport_qp *qp; 1065 u64 qp_bitmap_alloc; 1066 int i; 1067 1068 ntb_transport_link_cleanup(nt); 1069 cancel_work_sync(&nt->link_cleanup); 1070 cancel_delayed_work_sync(&nt->link_work); 1071 1072 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 1073 1074 /* verify that all the qp's are freed */ 1075 for (i = 0; i < nt->qp_count; i++) { 1076 qp = &nt->qp_vec[i]; 1077 if (qp_bitmap_alloc & BIT_ULL(i)) 1078 ntb_transport_free_queue(qp); 1079 debugfs_remove_recursive(qp->debugfs_dir); 1080 } 1081 1082 ntb_link_disable(ndev); 1083 ntb_clear_ctx(ndev); 1084 1085 ntb_bus_remove(nt); 1086 1087 for (i = nt->mw_count; i--; ) { 1088 ntb_free_mw(nt, i); 1089 iounmap(nt->mw_vec[i].vbase); 1090 } 1091 1092 kfree(nt->qp_vec); 1093 kfree(nt->mw_vec); 1094 kfree(nt); 1095 } 1096 1097 static void ntb_rx_copy_callback(void *data) 1098 { 1099 struct ntb_queue_entry *entry = data; 1100 struct ntb_transport_qp *qp = entry->qp; 1101 void *cb_data = entry->cb_data; 1102 unsigned int len = entry->len; 1103 struct ntb_payload_header *hdr = entry->rx_hdr; 1104 1105 hdr->flags = 0; 1106 1107 iowrite32(entry->index, &qp->rx_info->entry); 1108 1109 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1110 1111 if (qp->rx_handler && qp->client_ready) 1112 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1113 } 1114 1115 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1116 { 1117 void *buf = entry->buf; 1118 size_t len = entry->len; 1119 1120 memcpy(buf, offset, len); 1121 1122 /* Ensure that the data is fully copied out before clearing the flag */ 1123 wmb(); 1124 1125 ntb_rx_copy_callback(entry); 1126 } 1127 1128 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, 1129 size_t len) 1130 { 1131 struct dma_async_tx_descriptor *txd; 1132 struct ntb_transport_qp *qp = entry->qp; 1133 struct dma_chan *chan = qp->dma_chan; 1134 struct dma_device *device; 1135 size_t pay_off, buff_off; 1136 struct dmaengine_unmap_data *unmap; 1137 dma_cookie_t cookie; 1138 void *buf = entry->buf; 1139 1140 entry->len = len; 1141 1142 if (!chan) 1143 goto err; 1144 1145 if (len < copy_bytes) 1146 goto err_wait; 1147 1148 device = chan->device; 1149 pay_off = (size_t)offset & ~PAGE_MASK; 1150 buff_off = (size_t)buf & ~PAGE_MASK; 1151 1152 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1153 goto err_wait; 1154 1155 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1156 if (!unmap) 1157 goto err_wait; 1158 1159 unmap->len = len; 1160 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1161 pay_off, len, DMA_TO_DEVICE); 1162 if (dma_mapping_error(device->dev, unmap->addr[0])) 1163 goto err_get_unmap; 1164 1165 unmap->to_cnt = 1; 1166 1167 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1168 buff_off, len, DMA_FROM_DEVICE); 1169 if (dma_mapping_error(device->dev, unmap->addr[1])) 1170 goto err_get_unmap; 1171 1172 unmap->from_cnt = 1; 1173 1174 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1175 unmap->addr[0], len, 1176 DMA_PREP_INTERRUPT); 1177 if (!txd) 1178 goto err_get_unmap; 1179 1180 txd->callback = ntb_rx_copy_callback; 1181 txd->callback_param = entry; 1182 dma_set_unmap(txd, unmap); 1183 1184 cookie = dmaengine_submit(txd); 1185 if (dma_submit_error(cookie)) 1186 goto err_set_unmap; 1187 1188 dmaengine_unmap_put(unmap); 1189 1190 qp->last_cookie = cookie; 1191 1192 qp->rx_async++; 1193 1194 return; 1195 1196 err_set_unmap: 1197 dmaengine_unmap_put(unmap); 1198 err_get_unmap: 1199 dmaengine_unmap_put(unmap); 1200 err_wait: 1201 /* If the callbacks come out of order, the writing of the index to the 1202 * last completed will be out of order. This may result in the 1203 * receive stalling forever. 1204 */ 1205 dma_sync_wait(chan, qp->last_cookie); 1206 err: 1207 ntb_memcpy_rx(entry, offset); 1208 qp->rx_memcpy++; 1209 } 1210 1211 static int ntb_process_rxc(struct ntb_transport_qp *qp) 1212 { 1213 struct ntb_payload_header *hdr; 1214 struct ntb_queue_entry *entry; 1215 void *offset; 1216 int rc; 1217 1218 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1219 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1220 1221 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", 1222 qp->qp_num, hdr->ver, hdr->len, hdr->flags); 1223 1224 if (!(hdr->flags & DESC_DONE_FLAG)) { 1225 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); 1226 qp->rx_ring_empty++; 1227 return -EAGAIN; 1228 } 1229 1230 if (hdr->flags & LINK_DOWN_FLAG) { 1231 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); 1232 ntb_qp_link_down(qp); 1233 hdr->flags = 0; 1234 return -EAGAIN; 1235 } 1236 1237 if (hdr->ver != (u32)qp->rx_pkts) { 1238 dev_dbg(&qp->ndev->pdev->dev, 1239 "version mismatch, expected %llu - got %u\n", 1240 qp->rx_pkts, hdr->ver); 1241 qp->rx_err_ver++; 1242 return -EIO; 1243 } 1244 1245 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1246 if (!entry) { 1247 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1248 qp->rx_err_no_buf++; 1249 1250 rc = -ENOMEM; 1251 goto err; 1252 } 1253 1254 if (hdr->len > entry->len) { 1255 dev_dbg(&qp->ndev->pdev->dev, 1256 "receive buffer overflow! Wanted %d got %d\n", 1257 hdr->len, entry->len); 1258 qp->rx_err_oflow++; 1259 1260 rc = -EIO; 1261 goto err; 1262 } 1263 1264 dev_dbg(&qp->ndev->pdev->dev, 1265 "RX OK index %u ver %u size %d into buf size %d\n", 1266 qp->rx_index, hdr->ver, hdr->len, entry->len); 1267 1268 qp->rx_bytes += hdr->len; 1269 qp->rx_pkts++; 1270 1271 entry->index = qp->rx_index; 1272 entry->rx_hdr = hdr; 1273 1274 ntb_async_rx(entry, offset, hdr->len); 1275 1276 qp->rx_index++; 1277 qp->rx_index %= qp->rx_max_entry; 1278 1279 return 0; 1280 1281 err: 1282 /* FIXME: if this syncrhonous update of the rx_index gets ahead of 1283 * asyncrhonous ntb_rx_copy_callback of previous entry, there are three 1284 * scenarios: 1285 * 1286 * 1) The peer might miss this update, but observe the update 1287 * from the memcpy completion callback. In this case, the buffer will 1288 * not be freed on the peer to be reused for a different packet. The 1289 * successful rx of a later packet would clear the condition, but the 1290 * condition could persist if several rx fail in a row. 1291 * 1292 * 2) The peer may observe this update before the asyncrhonous copy of 1293 * prior packets is completed. The peer may overwrite the buffers of 1294 * the prior packets before they are copied. 1295 * 1296 * 3) Both: the peer may observe the update, and then observe the index 1297 * decrement by the asynchronous completion callback. Who knows what 1298 * badness that will cause. 1299 */ 1300 hdr->flags = 0; 1301 iowrite32(qp->rx_index, &qp->rx_info->entry); 1302 1303 return rc; 1304 } 1305 1306 static void ntb_transport_rxc_db(unsigned long data) 1307 { 1308 struct ntb_transport_qp *qp = (void *)data; 1309 int rc, i; 1310 1311 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", 1312 __func__, qp->qp_num); 1313 1314 /* Limit the number of packets processed in a single interrupt to 1315 * provide fairness to others 1316 */ 1317 for (i = 0; i < qp->rx_max_entry; i++) { 1318 rc = ntb_process_rxc(qp); 1319 if (rc) 1320 break; 1321 } 1322 1323 if (qp->dma_chan) 1324 dma_async_issue_pending(qp->dma_chan); 1325 1326 if (i == qp->rx_max_entry) { 1327 /* there is more work to do */ 1328 tasklet_schedule(&qp->rxc_db_work); 1329 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1330 /* the doorbell bit is set: clear it */ 1331 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); 1332 /* ntb_db_read ensures ntb_db_clear write is committed */ 1333 ntb_db_read(qp->ndev); 1334 1335 /* an interrupt may have arrived between finishing 1336 * ntb_process_rxc and clearing the doorbell bit: 1337 * there might be some more work to do. 1338 */ 1339 tasklet_schedule(&qp->rxc_db_work); 1340 } 1341 } 1342 1343 static void ntb_tx_copy_callback(void *data) 1344 { 1345 struct ntb_queue_entry *entry = data; 1346 struct ntb_transport_qp *qp = entry->qp; 1347 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1348 1349 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1350 1351 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1352 1353 /* The entry length can only be zero if the packet is intended to be a 1354 * "link down" or similar. Since no payload is being sent in these 1355 * cases, there is nothing to add to the completion queue. 1356 */ 1357 if (entry->len > 0) { 1358 qp->tx_bytes += entry->len; 1359 1360 if (qp->tx_handler) 1361 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1362 entry->len); 1363 } 1364 1365 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1366 } 1367 1368 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) 1369 { 1370 memcpy_toio(offset, entry->buf, entry->len); 1371 1372 /* Ensure that the data is fully copied out before setting the flags */ 1373 wmb(); 1374 1375 ntb_tx_copy_callback(entry); 1376 } 1377 1378 static void ntb_async_tx(struct ntb_transport_qp *qp, 1379 struct ntb_queue_entry *entry) 1380 { 1381 struct ntb_payload_header __iomem *hdr; 1382 struct dma_async_tx_descriptor *txd; 1383 struct dma_chan *chan = qp->dma_chan; 1384 struct dma_device *device; 1385 size_t dest_off, buff_off; 1386 struct dmaengine_unmap_data *unmap; 1387 dma_addr_t dest; 1388 dma_cookie_t cookie; 1389 void __iomem *offset; 1390 size_t len = entry->len; 1391 void *buf = entry->buf; 1392 1393 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1394 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1395 entry->tx_hdr = hdr; 1396 1397 iowrite32(entry->len, &hdr->len); 1398 iowrite32((u32)qp->tx_pkts, &hdr->ver); 1399 1400 if (!chan) 1401 goto err; 1402 1403 if (len < copy_bytes) 1404 goto err; 1405 1406 device = chan->device; 1407 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; 1408 buff_off = (size_t)buf & ~PAGE_MASK; 1409 dest_off = (size_t)dest & ~PAGE_MASK; 1410 1411 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1412 goto err; 1413 1414 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); 1415 if (!unmap) 1416 goto err; 1417 1418 unmap->len = len; 1419 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), 1420 buff_off, len, DMA_TO_DEVICE); 1421 if (dma_mapping_error(device->dev, unmap->addr[0])) 1422 goto err_get_unmap; 1423 1424 unmap->to_cnt = 1; 1425 1426 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1427 DMA_PREP_INTERRUPT); 1428 if (!txd) 1429 goto err_get_unmap; 1430 1431 txd->callback = ntb_tx_copy_callback; 1432 txd->callback_param = entry; 1433 dma_set_unmap(txd, unmap); 1434 1435 cookie = dmaengine_submit(txd); 1436 if (dma_submit_error(cookie)) 1437 goto err_set_unmap; 1438 1439 dmaengine_unmap_put(unmap); 1440 1441 dma_async_issue_pending(chan); 1442 qp->tx_async++; 1443 1444 return; 1445 err_set_unmap: 1446 dmaengine_unmap_put(unmap); 1447 err_get_unmap: 1448 dmaengine_unmap_put(unmap); 1449 err: 1450 ntb_memcpy_tx(entry, offset); 1451 qp->tx_memcpy++; 1452 } 1453 1454 static int ntb_process_tx(struct ntb_transport_qp *qp, 1455 struct ntb_queue_entry *entry) 1456 { 1457 if (qp->tx_index == qp->remote_rx_info->entry) { 1458 qp->tx_ring_full++; 1459 return -EAGAIN; 1460 } 1461 1462 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1463 if (qp->tx_handler) 1464 qp->tx_handler(qp->cb_data, qp, NULL, -EIO); 1465 1466 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1467 &qp->tx_free_q); 1468 return 0; 1469 } 1470 1471 ntb_async_tx(qp, entry); 1472 1473 qp->tx_index++; 1474 qp->tx_index %= qp->tx_max_entry; 1475 1476 qp->tx_pkts++; 1477 1478 return 0; 1479 } 1480 1481 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1482 { 1483 struct pci_dev *pdev = qp->ndev->pdev; 1484 struct ntb_queue_entry *entry; 1485 int i, rc; 1486 1487 if (!qp->link_is_up) 1488 return; 1489 1490 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); 1491 1492 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1493 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1494 if (entry) 1495 break; 1496 msleep(100); 1497 } 1498 1499 if (!entry) 1500 return; 1501 1502 entry->cb_data = NULL; 1503 entry->buf = NULL; 1504 entry->len = 0; 1505 entry->flags = LINK_DOWN_FLAG; 1506 1507 rc = ntb_process_tx(qp, entry); 1508 if (rc) 1509 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1510 qp->qp_num); 1511 1512 ntb_qp_link_down_reset(qp); 1513 } 1514 1515 /** 1516 * ntb_transport_create_queue - Create a new NTB transport layer queue 1517 * @rx_handler: receive callback function 1518 * @tx_handler: transmit callback function 1519 * @event_handler: event callback function 1520 * 1521 * Create a new NTB transport layer queue and provide the queue with a callback 1522 * routine for both transmit and receive. The receive callback routine will be 1523 * used to pass up data when the transport has received it on the queue. The 1524 * transmit callback routine will be called when the transport has completed the 1525 * transmission of the data on the queue and the data is ready to be freed. 1526 * 1527 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1528 */ 1529 struct ntb_transport_qp * 1530 ntb_transport_create_queue(void *data, struct device *client_dev, 1531 const struct ntb_queue_handlers *handlers) 1532 { 1533 struct ntb_dev *ndev; 1534 struct pci_dev *pdev; 1535 struct ntb_transport_ctx *nt; 1536 struct ntb_queue_entry *entry; 1537 struct ntb_transport_qp *qp; 1538 u64 qp_bit; 1539 unsigned int free_queue; 1540 int i; 1541 1542 ndev = dev_ntb(client_dev->parent); 1543 pdev = ndev->pdev; 1544 nt = ndev->ctx; 1545 1546 free_queue = ffs(nt->qp_bitmap); 1547 if (!free_queue) 1548 goto err; 1549 1550 /* decrement free_queue to make it zero based */ 1551 free_queue--; 1552 1553 qp = &nt->qp_vec[free_queue]; 1554 qp_bit = BIT_ULL(qp->qp_num); 1555 1556 nt->qp_bitmap_free &= ~qp_bit; 1557 1558 qp->cb_data = data; 1559 qp->rx_handler = handlers->rx_handler; 1560 qp->tx_handler = handlers->tx_handler; 1561 qp->event_handler = handlers->event_handler; 1562 1563 dmaengine_get(); 1564 qp->dma_chan = dma_find_channel(DMA_MEMCPY); 1565 if (!qp->dma_chan) { 1566 dmaengine_put(); 1567 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); 1568 } 1569 1570 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1571 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1572 if (!entry) 1573 goto err1; 1574 1575 entry->qp = qp; 1576 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, 1577 &qp->rx_free_q); 1578 } 1579 1580 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1581 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1582 if (!entry) 1583 goto err2; 1584 1585 entry->qp = qp; 1586 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1587 &qp->tx_free_q); 1588 } 1589 1590 ntb_db_clear(qp->ndev, qp_bit); 1591 ntb_db_clear_mask(qp->ndev, qp_bit); 1592 1593 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1594 1595 return qp; 1596 1597 err2: 1598 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1599 kfree(entry); 1600 err1: 1601 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1602 kfree(entry); 1603 if (qp->dma_chan) 1604 dmaengine_put(); 1605 nt->qp_bitmap_free |= qp_bit; 1606 err: 1607 return NULL; 1608 } 1609 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 1610 1611 /** 1612 * ntb_transport_free_queue - Frees NTB transport queue 1613 * @qp: NTB queue to be freed 1614 * 1615 * Frees NTB transport queue 1616 */ 1617 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1618 { 1619 struct ntb_transport_ctx *nt = qp->transport; 1620 struct pci_dev *pdev; 1621 struct ntb_queue_entry *entry; 1622 u64 qp_bit; 1623 1624 if (!qp) 1625 return; 1626 1627 pdev = qp->ndev->pdev; 1628 1629 if (qp->dma_chan) { 1630 struct dma_chan *chan = qp->dma_chan; 1631 /* Putting the dma_chan to NULL will force any new traffic to be 1632 * processed by the CPU instead of the DAM engine 1633 */ 1634 qp->dma_chan = NULL; 1635 1636 /* Try to be nice and wait for any queued DMA engine 1637 * transactions to process before smashing it with a rock 1638 */ 1639 dma_sync_wait(chan, qp->last_cookie); 1640 dmaengine_terminate_all(chan); 1641 dmaengine_put(); 1642 } 1643 1644 qp_bit = BIT_ULL(qp->qp_num); 1645 1646 ntb_db_set_mask(qp->ndev, qp_bit); 1647 tasklet_disable(&qp->rxc_db_work); 1648 1649 cancel_delayed_work_sync(&qp->link_work); 1650 1651 qp->cb_data = NULL; 1652 qp->rx_handler = NULL; 1653 qp->tx_handler = NULL; 1654 qp->event_handler = NULL; 1655 1656 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1657 kfree(entry); 1658 1659 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { 1660 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); 1661 kfree(entry); 1662 } 1663 1664 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1665 kfree(entry); 1666 1667 nt->qp_bitmap_free |= qp_bit; 1668 1669 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1670 } 1671 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 1672 1673 /** 1674 * ntb_transport_rx_remove - Dequeues enqueued rx packet 1675 * @qp: NTB queue to be freed 1676 * @len: pointer to variable to write enqueued buffers length 1677 * 1678 * Dequeues unused buffers from receive queue. Should only be used during 1679 * shutdown of qp. 1680 * 1681 * RETURNS: NULL error value on error, or void* for success. 1682 */ 1683 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 1684 { 1685 struct ntb_queue_entry *entry; 1686 void *buf; 1687 1688 if (!qp || qp->client_ready) 1689 return NULL; 1690 1691 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1692 if (!entry) 1693 return NULL; 1694 1695 buf = entry->cb_data; 1696 *len = entry->len; 1697 1698 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1699 1700 return buf; 1701 } 1702 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 1703 1704 /** 1705 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 1706 * @qp: NTB transport layer queue the entry is to be enqueued on 1707 * @cb: per buffer pointer for callback function to use 1708 * @data: pointer to data buffer that incoming packets will be copied into 1709 * @len: length of the data buffer 1710 * 1711 * Enqueue a new receive buffer onto the transport queue into which a NTB 1712 * payload can be received into. 1713 * 1714 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1715 */ 1716 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1717 unsigned int len) 1718 { 1719 struct ntb_queue_entry *entry; 1720 1721 if (!qp) 1722 return -EINVAL; 1723 1724 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); 1725 if (!entry) 1726 return -ENOMEM; 1727 1728 entry->cb_data = cb; 1729 entry->buf = data; 1730 entry->len = len; 1731 1732 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); 1733 1734 return 0; 1735 } 1736 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 1737 1738 /** 1739 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 1740 * @qp: NTB transport layer queue the entry is to be enqueued on 1741 * @cb: per buffer pointer for callback function to use 1742 * @data: pointer to data buffer that will be sent 1743 * @len: length of the data buffer 1744 * 1745 * Enqueue a new transmit buffer onto the transport queue from which a NTB 1746 * payload will be transmitted. This assumes that a lock is being held to 1747 * serialize access to the qp. 1748 * 1749 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1750 */ 1751 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1752 unsigned int len) 1753 { 1754 struct ntb_queue_entry *entry; 1755 int rc; 1756 1757 if (!qp || !qp->link_is_up || !len) 1758 return -EINVAL; 1759 1760 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1761 if (!entry) { 1762 qp->tx_err_no_buf++; 1763 return -ENOMEM; 1764 } 1765 1766 entry->cb_data = cb; 1767 entry->buf = data; 1768 entry->len = len; 1769 entry->flags = 0; 1770 1771 rc = ntb_process_tx(qp, entry); 1772 if (rc) 1773 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1774 &qp->tx_free_q); 1775 1776 return rc; 1777 } 1778 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 1779 1780 /** 1781 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 1782 * @qp: NTB transport layer queue to be enabled 1783 * 1784 * Notify NTB transport layer of client readiness to use queue 1785 */ 1786 void ntb_transport_link_up(struct ntb_transport_qp *qp) 1787 { 1788 if (!qp) 1789 return; 1790 1791 qp->client_ready = true; 1792 1793 if (qp->transport->link_is_up) 1794 schedule_delayed_work(&qp->link_work, 0); 1795 } 1796 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 1797 1798 /** 1799 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1800 * @qp: NTB transport layer queue to be disabled 1801 * 1802 * Notify NTB transport layer of client's desire to no longer receive data on 1803 * transport queue specified. It is the client's responsibility to ensure all 1804 * entries on queue are purged or otherwise handled appropriately. 1805 */ 1806 void ntb_transport_link_down(struct ntb_transport_qp *qp) 1807 { 1808 struct pci_dev *pdev; 1809 int val; 1810 1811 if (!qp) 1812 return; 1813 1814 pdev = qp->ndev->pdev; 1815 qp->client_ready = false; 1816 1817 val = ntb_spad_read(qp->ndev, QP_LINKS); 1818 1819 ntb_peer_spad_write(qp->ndev, QP_LINKS, 1820 val & ~BIT(qp->qp_num)); 1821 1822 if (qp->link_is_up) 1823 ntb_send_link_down(qp); 1824 else 1825 cancel_delayed_work_sync(&qp->link_work); 1826 } 1827 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 1828 1829 /** 1830 * ntb_transport_link_query - Query transport link state 1831 * @qp: NTB transport layer queue to be queried 1832 * 1833 * Query connectivity to the remote system of the NTB transport queue 1834 * 1835 * RETURNS: true for link up or false for link down 1836 */ 1837 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 1838 { 1839 if (!qp) 1840 return false; 1841 1842 return qp->link_is_up; 1843 } 1844 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 1845 1846 /** 1847 * ntb_transport_qp_num - Query the qp number 1848 * @qp: NTB transport layer queue to be queried 1849 * 1850 * Query qp number of the NTB transport queue 1851 * 1852 * RETURNS: a zero based number specifying the qp number 1853 */ 1854 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 1855 { 1856 if (!qp) 1857 return 0; 1858 1859 return qp->qp_num; 1860 } 1861 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 1862 1863 /** 1864 * ntb_transport_max_size - Query the max payload size of a qp 1865 * @qp: NTB transport layer queue to be queried 1866 * 1867 * Query the maximum payload size permissible on the given qp 1868 * 1869 * RETURNS: the max payload size of a qp 1870 */ 1871 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 1872 { 1873 unsigned int max; 1874 1875 if (!qp) 1876 return 0; 1877 1878 if (!qp->dma_chan) 1879 return qp->tx_max_frame - sizeof(struct ntb_payload_header); 1880 1881 /* If DMA engine usage is possible, try to find the max size for that */ 1882 max = qp->tx_max_frame - sizeof(struct ntb_payload_header); 1883 max -= max % (1 << qp->dma_chan->device->copy_align); 1884 1885 return max; 1886 } 1887 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 1888 1889 static void ntb_transport_doorbell_callback(void *data, int vector) 1890 { 1891 struct ntb_transport_ctx *nt = data; 1892 struct ntb_transport_qp *qp; 1893 u64 db_bits; 1894 unsigned int qp_num; 1895 1896 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 1897 ntb_db_vector_mask(nt->ndev, vector)); 1898 1899 while (db_bits) { 1900 qp_num = __ffs(db_bits); 1901 qp = &nt->qp_vec[qp_num]; 1902 1903 tasklet_schedule(&qp->rxc_db_work); 1904 1905 db_bits &= ~BIT_ULL(qp_num); 1906 } 1907 } 1908 1909 static const struct ntb_ctx_ops ntb_transport_ops = { 1910 .link_event = ntb_transport_event_callback, 1911 .db_event = ntb_transport_doorbell_callback, 1912 }; 1913 1914 static struct ntb_client ntb_transport_client = { 1915 .ops = { 1916 .probe = ntb_transport_probe, 1917 .remove = ntb_transport_free, 1918 }, 1919 }; 1920 1921 static int __init ntb_transport_init(void) 1922 { 1923 int rc; 1924 1925 if (debugfs_initialized()) 1926 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 1927 1928 rc = bus_register(&ntb_transport_bus); 1929 if (rc) 1930 goto err_bus; 1931 1932 rc = ntb_register_client(&ntb_transport_client); 1933 if (rc) 1934 goto err_client; 1935 1936 return 0; 1937 1938 err_client: 1939 bus_unregister(&ntb_transport_bus); 1940 err_bus: 1941 debugfs_remove_recursive(nt_debugfs_dir); 1942 return rc; 1943 } 1944 module_init(ntb_transport_init); 1945 1946 static void __exit ntb_transport_exit(void) 1947 { 1948 debugfs_remove_recursive(nt_debugfs_dir); 1949 1950 ntb_unregister_client(&ntb_transport_client); 1951 bus_unregister(&ntb_transport_bus); 1952 } 1953 module_exit(ntb_transport_exit); 1954