1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Transport Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/debugfs.h> 51 #include <linux/delay.h> 52 #include <linux/dmaengine.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/errno.h> 55 #include <linux/export.h> 56 #include <linux/interrupt.h> 57 #include <linux/module.h> 58 #include <linux/pci.h> 59 #include <linux/slab.h> 60 #include <linux/types.h> 61 #include <linux/uaccess.h> 62 #include "linux/ntb.h" 63 #include "linux/ntb_transport.h" 64 65 #define NTB_TRANSPORT_VERSION 4 66 #define NTB_TRANSPORT_VER "4" 67 #define NTB_TRANSPORT_NAME "ntb_transport" 68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" 69 70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); 71 MODULE_VERSION(NTB_TRANSPORT_VER); 72 MODULE_LICENSE("Dual BSD/GPL"); 73 MODULE_AUTHOR("Intel Corporation"); 74 75 static unsigned long max_mw_size; 76 module_param(max_mw_size, ulong, 0644); 77 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); 78 79 static unsigned int transport_mtu = 0x10000; 80 module_param(transport_mtu, uint, 0644); 81 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 82 83 static unsigned char max_num_clients; 84 module_param(max_num_clients, byte, 0644); 85 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 86 87 static unsigned int copy_bytes = 1024; 88 module_param(copy_bytes, uint, 0644); 89 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 90 91 static bool use_dma; 92 module_param(use_dma, bool, 0644); 93 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); 94 95 static struct dentry *nt_debugfs_dir; 96 97 struct ntb_queue_entry { 98 /* ntb_queue list reference */ 99 struct list_head entry; 100 /* pointers to data to be transferred */ 101 void *cb_data; 102 void *buf; 103 unsigned int len; 104 unsigned int flags; 105 106 struct ntb_transport_qp *qp; 107 union { 108 struct ntb_payload_header __iomem *tx_hdr; 109 struct ntb_payload_header *rx_hdr; 110 }; 111 unsigned int index; 112 }; 113 114 struct ntb_rx_info { 115 unsigned int entry; 116 }; 117 118 struct ntb_transport_qp { 119 struct ntb_transport_ctx *transport; 120 struct ntb_dev *ndev; 121 void *cb_data; 122 struct dma_chan *dma_chan; 123 124 bool client_ready; 125 bool link_is_up; 126 127 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 128 u64 qp_bit; 129 130 struct ntb_rx_info __iomem *rx_info; 131 struct ntb_rx_info *remote_rx_info; 132 133 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 134 void *data, int len); 135 struct list_head tx_free_q; 136 spinlock_t ntb_tx_free_q_lock; 137 void __iomem *tx_mw; 138 dma_addr_t tx_mw_phys; 139 unsigned int tx_index; 140 unsigned int tx_max_entry; 141 unsigned int tx_max_frame; 142 143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 144 void *data, int len); 145 struct list_head rx_post_q; 146 struct list_head rx_pend_q; 147 struct list_head rx_free_q; 148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 149 spinlock_t ntb_rx_q_lock; 150 void *rx_buff; 151 unsigned int rx_index; 152 unsigned int rx_max_entry; 153 unsigned int rx_max_frame; 154 dma_cookie_t last_cookie; 155 struct tasklet_struct rxc_db_work; 156 157 void (*event_handler)(void *data, int status); 158 struct delayed_work link_work; 159 struct work_struct link_cleanup; 160 161 struct dentry *debugfs_dir; 162 struct dentry *debugfs_stats; 163 164 /* Stats */ 165 u64 rx_bytes; 166 u64 rx_pkts; 167 u64 rx_ring_empty; 168 u64 rx_err_no_buf; 169 u64 rx_err_oflow; 170 u64 rx_err_ver; 171 u64 rx_memcpy; 172 u64 rx_async; 173 u64 tx_bytes; 174 u64 tx_pkts; 175 u64 tx_ring_full; 176 u64 tx_err_no_buf; 177 u64 tx_memcpy; 178 u64 tx_async; 179 }; 180 181 struct ntb_transport_mw { 182 phys_addr_t phys_addr; 183 resource_size_t phys_size; 184 resource_size_t xlat_align; 185 resource_size_t xlat_align_size; 186 void __iomem *vbase; 187 size_t xlat_size; 188 size_t buff_size; 189 void *virt_addr; 190 dma_addr_t dma_addr; 191 }; 192 193 struct ntb_transport_client_dev { 194 struct list_head entry; 195 struct ntb_transport_ctx *nt; 196 struct device dev; 197 }; 198 199 struct ntb_transport_ctx { 200 struct list_head entry; 201 struct list_head client_devs; 202 203 struct ntb_dev *ndev; 204 205 struct ntb_transport_mw *mw_vec; 206 struct ntb_transport_qp *qp_vec; 207 unsigned int mw_count; 208 unsigned int qp_count; 209 u64 qp_bitmap; 210 u64 qp_bitmap_free; 211 212 bool link_is_up; 213 struct delayed_work link_work; 214 struct work_struct link_cleanup; 215 216 struct dentry *debugfs_node_dir; 217 }; 218 219 enum { 220 DESC_DONE_FLAG = BIT(0), 221 LINK_DOWN_FLAG = BIT(1), 222 }; 223 224 struct ntb_payload_header { 225 unsigned int ver; 226 unsigned int len; 227 unsigned int flags; 228 }; 229 230 enum { 231 VERSION = 0, 232 QP_LINKS, 233 NUM_QPS, 234 NUM_MWS, 235 MW0_SZ_HIGH, 236 MW0_SZ_LOW, 237 MW1_SZ_HIGH, 238 MW1_SZ_LOW, 239 MAX_SPAD, 240 }; 241 242 #define dev_client_dev(__dev) \ 243 container_of((__dev), struct ntb_transport_client_dev, dev) 244 245 #define drv_client(__drv) \ 246 container_of((__drv), struct ntb_transport_client, driver) 247 248 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 249 #define NTB_QP_DEF_NUM_ENTRIES 100 250 #define NTB_LINK_DOWN_TIMEOUT 10 251 252 static void ntb_transport_rxc_db(unsigned long data); 253 static const struct ntb_ctx_ops ntb_transport_ops; 254 static struct ntb_client ntb_transport_client; 255 256 static int ntb_transport_bus_match(struct device *dev, 257 struct device_driver *drv) 258 { 259 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 260 } 261 262 static int ntb_transport_bus_probe(struct device *dev) 263 { 264 const struct ntb_transport_client *client; 265 int rc = -EINVAL; 266 267 get_device(dev); 268 269 client = drv_client(dev->driver); 270 rc = client->probe(dev); 271 if (rc) 272 put_device(dev); 273 274 return rc; 275 } 276 277 static int ntb_transport_bus_remove(struct device *dev) 278 { 279 const struct ntb_transport_client *client; 280 281 client = drv_client(dev->driver); 282 client->remove(dev); 283 284 put_device(dev); 285 286 return 0; 287 } 288 289 static struct bus_type ntb_transport_bus = { 290 .name = "ntb_transport", 291 .match = ntb_transport_bus_match, 292 .probe = ntb_transport_bus_probe, 293 .remove = ntb_transport_bus_remove, 294 }; 295 296 static LIST_HEAD(ntb_transport_list); 297 298 static int ntb_bus_init(struct ntb_transport_ctx *nt) 299 { 300 list_add(&nt->entry, &ntb_transport_list); 301 return 0; 302 } 303 304 static void ntb_bus_remove(struct ntb_transport_ctx *nt) 305 { 306 struct ntb_transport_client_dev *client_dev, *cd; 307 308 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 309 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 310 dev_name(&client_dev->dev)); 311 list_del(&client_dev->entry); 312 device_unregister(&client_dev->dev); 313 } 314 315 list_del(&nt->entry); 316 } 317 318 static void ntb_transport_client_release(struct device *dev) 319 { 320 struct ntb_transport_client_dev *client_dev; 321 322 client_dev = dev_client_dev(dev); 323 kfree(client_dev); 324 } 325 326 /** 327 * ntb_transport_unregister_client_dev - Unregister NTB client device 328 * @device_name: Name of NTB client device 329 * 330 * Unregister an NTB client device with the NTB transport layer 331 */ 332 void ntb_transport_unregister_client_dev(char *device_name) 333 { 334 struct ntb_transport_client_dev *client, *cd; 335 struct ntb_transport_ctx *nt; 336 337 list_for_each_entry(nt, &ntb_transport_list, entry) 338 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 339 if (!strncmp(dev_name(&client->dev), device_name, 340 strlen(device_name))) { 341 list_del(&client->entry); 342 device_unregister(&client->dev); 343 } 344 } 345 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); 346 347 /** 348 * ntb_transport_register_client_dev - Register NTB client device 349 * @device_name: Name of NTB client device 350 * 351 * Register an NTB client device with the NTB transport layer 352 */ 353 int ntb_transport_register_client_dev(char *device_name) 354 { 355 struct ntb_transport_client_dev *client_dev; 356 struct ntb_transport_ctx *nt; 357 int node; 358 int rc, i = 0; 359 360 if (list_empty(&ntb_transport_list)) 361 return -ENODEV; 362 363 list_for_each_entry(nt, &ntb_transport_list, entry) { 364 struct device *dev; 365 366 node = dev_to_node(&nt->ndev->dev); 367 368 client_dev = kzalloc_node(sizeof(*client_dev), 369 GFP_KERNEL, node); 370 if (!client_dev) { 371 rc = -ENOMEM; 372 goto err; 373 } 374 375 dev = &client_dev->dev; 376 377 /* setup and register client devices */ 378 dev_set_name(dev, "%s%d", device_name, i); 379 dev->bus = &ntb_transport_bus; 380 dev->release = ntb_transport_client_release; 381 dev->parent = &nt->ndev->dev; 382 383 rc = device_register(dev); 384 if (rc) { 385 kfree(client_dev); 386 goto err; 387 } 388 389 list_add_tail(&client_dev->entry, &nt->client_devs); 390 i++; 391 } 392 393 return 0; 394 395 err: 396 ntb_transport_unregister_client_dev(device_name); 397 398 return rc; 399 } 400 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); 401 402 /** 403 * ntb_transport_register_client - Register NTB client driver 404 * @drv: NTB client driver to be registered 405 * 406 * Register an NTB client driver with the NTB transport layer 407 * 408 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 409 */ 410 int ntb_transport_register_client(struct ntb_transport_client *drv) 411 { 412 drv->driver.bus = &ntb_transport_bus; 413 414 if (list_empty(&ntb_transport_list)) 415 return -ENODEV; 416 417 return driver_register(&drv->driver); 418 } 419 EXPORT_SYMBOL_GPL(ntb_transport_register_client); 420 421 /** 422 * ntb_transport_unregister_client - Unregister NTB client driver 423 * @drv: NTB client driver to be unregistered 424 * 425 * Unregister an NTB client driver with the NTB transport layer 426 * 427 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 428 */ 429 void ntb_transport_unregister_client(struct ntb_transport_client *drv) 430 { 431 driver_unregister(&drv->driver); 432 } 433 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); 434 435 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 436 loff_t *offp) 437 { 438 struct ntb_transport_qp *qp; 439 char *buf; 440 ssize_t ret, out_offset, out_count; 441 442 qp = filp->private_data; 443 444 if (!qp || !qp->link_is_up) 445 return 0; 446 447 out_count = 1000; 448 449 buf = kmalloc(out_count, GFP_KERNEL); 450 if (!buf) 451 return -ENOMEM; 452 453 out_offset = 0; 454 out_offset += snprintf(buf + out_offset, out_count - out_offset, 455 "NTB QP stats\n"); 456 out_offset += snprintf(buf + out_offset, out_count - out_offset, 457 "rx_bytes - \t%llu\n", qp->rx_bytes); 458 out_offset += snprintf(buf + out_offset, out_count - out_offset, 459 "rx_pkts - \t%llu\n", qp->rx_pkts); 460 out_offset += snprintf(buf + out_offset, out_count - out_offset, 461 "rx_memcpy - \t%llu\n", qp->rx_memcpy); 462 out_offset += snprintf(buf + out_offset, out_count - out_offset, 463 "rx_async - \t%llu\n", qp->rx_async); 464 out_offset += snprintf(buf + out_offset, out_count - out_offset, 465 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 466 out_offset += snprintf(buf + out_offset, out_count - out_offset, 467 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 468 out_offset += snprintf(buf + out_offset, out_count - out_offset, 469 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 470 out_offset += snprintf(buf + out_offset, out_count - out_offset, 471 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 472 out_offset += snprintf(buf + out_offset, out_count - out_offset, 473 "rx_buff - \t%p\n", qp->rx_buff); 474 out_offset += snprintf(buf + out_offset, out_count - out_offset, 475 "rx_index - \t%u\n", qp->rx_index); 476 out_offset += snprintf(buf + out_offset, out_count - out_offset, 477 "rx_max_entry - \t%u\n", qp->rx_max_entry); 478 479 out_offset += snprintf(buf + out_offset, out_count - out_offset, 480 "tx_bytes - \t%llu\n", qp->tx_bytes); 481 out_offset += snprintf(buf + out_offset, out_count - out_offset, 482 "tx_pkts - \t%llu\n", qp->tx_pkts); 483 out_offset += snprintf(buf + out_offset, out_count - out_offset, 484 "tx_memcpy - \t%llu\n", qp->tx_memcpy); 485 out_offset += snprintf(buf + out_offset, out_count - out_offset, 486 "tx_async - \t%llu\n", qp->tx_async); 487 out_offset += snprintf(buf + out_offset, out_count - out_offset, 488 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 489 out_offset += snprintf(buf + out_offset, out_count - out_offset, 490 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 491 out_offset += snprintf(buf + out_offset, out_count - out_offset, 492 "tx_mw - \t%p\n", qp->tx_mw); 493 out_offset += snprintf(buf + out_offset, out_count - out_offset, 494 "tx_index - \t%u\n", qp->tx_index); 495 out_offset += snprintf(buf + out_offset, out_count - out_offset, 496 "tx_max_entry - \t%u\n", qp->tx_max_entry); 497 out_offset += snprintf(buf + out_offset, out_count - out_offset, 498 "qp->remote_rx_info->entry - \t%u\n", 499 qp->remote_rx_info->entry); 500 out_offset += snprintf(buf + out_offset, out_count - out_offset, 501 "free tx - \t%u\n", 502 ntb_transport_tx_free_entry(qp)); 503 504 out_offset += snprintf(buf + out_offset, out_count - out_offset, 505 "\nQP Link %s\n", 506 qp->link_is_up ? "Up" : "Down"); 507 if (out_offset > out_count) 508 out_offset = out_count; 509 510 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 511 kfree(buf); 512 return ret; 513 } 514 515 static const struct file_operations ntb_qp_debugfs_stats = { 516 .owner = THIS_MODULE, 517 .open = simple_open, 518 .read = debugfs_read, 519 }; 520 521 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 522 struct list_head *list) 523 { 524 unsigned long flags; 525 526 spin_lock_irqsave(lock, flags); 527 list_add_tail(entry, list); 528 spin_unlock_irqrestore(lock, flags); 529 } 530 531 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 532 struct list_head *list) 533 { 534 struct ntb_queue_entry *entry; 535 unsigned long flags; 536 537 spin_lock_irqsave(lock, flags); 538 if (list_empty(list)) { 539 entry = NULL; 540 goto out; 541 } 542 entry = list_first_entry(list, struct ntb_queue_entry, entry); 543 list_del(&entry->entry); 544 545 out: 546 spin_unlock_irqrestore(lock, flags); 547 548 return entry; 549 } 550 551 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, 552 struct list_head *list, 553 struct list_head *to_list) 554 { 555 struct ntb_queue_entry *entry; 556 unsigned long flags; 557 558 spin_lock_irqsave(lock, flags); 559 560 if (list_empty(list)) { 561 entry = NULL; 562 } else { 563 entry = list_first_entry(list, struct ntb_queue_entry, entry); 564 list_move_tail(&entry->entry, to_list); 565 } 566 567 spin_unlock_irqrestore(lock, flags); 568 569 return entry; 570 } 571 572 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 573 unsigned int qp_num) 574 { 575 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 576 struct ntb_transport_mw *mw; 577 unsigned int rx_size, num_qps_mw; 578 unsigned int mw_num, mw_count, qp_count; 579 unsigned int i; 580 581 mw_count = nt->mw_count; 582 qp_count = nt->qp_count; 583 584 mw_num = QP_TO_MW(nt, qp_num); 585 mw = &nt->mw_vec[mw_num]; 586 587 if (!mw->virt_addr) 588 return -ENOMEM; 589 590 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 591 num_qps_mw = qp_count / mw_count + 1; 592 else 593 num_qps_mw = qp_count / mw_count; 594 595 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; 596 qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count; 597 rx_size -= sizeof(struct ntb_rx_info); 598 599 qp->remote_rx_info = qp->rx_buff + rx_size; 600 601 /* Due to housekeeping, there must be atleast 2 buffs */ 602 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 603 qp->rx_max_entry = rx_size / qp->rx_max_frame; 604 qp->rx_index = 0; 605 606 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 607 608 /* setup the hdr offsets with 0's */ 609 for (i = 0; i < qp->rx_max_entry; i++) { 610 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - 611 sizeof(struct ntb_payload_header)); 612 memset(offset, 0, sizeof(struct ntb_payload_header)); 613 } 614 615 qp->rx_pkts = 0; 616 qp->tx_pkts = 0; 617 qp->tx_index = 0; 618 619 return 0; 620 } 621 622 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 623 { 624 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 625 struct pci_dev *pdev = nt->ndev->pdev; 626 627 if (!mw->virt_addr) 628 return; 629 630 ntb_mw_clear_trans(nt->ndev, num_mw); 631 dma_free_coherent(&pdev->dev, mw->buff_size, 632 mw->virt_addr, mw->dma_addr); 633 mw->xlat_size = 0; 634 mw->buff_size = 0; 635 mw->virt_addr = NULL; 636 } 637 638 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 639 resource_size_t size) 640 { 641 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 642 struct pci_dev *pdev = nt->ndev->pdev; 643 size_t xlat_size, buff_size; 644 int rc; 645 646 if (!size) 647 return -EINVAL; 648 649 xlat_size = round_up(size, mw->xlat_align_size); 650 buff_size = round_up(size, mw->xlat_align); 651 652 /* No need to re-setup */ 653 if (mw->xlat_size == xlat_size) 654 return 0; 655 656 if (mw->buff_size) 657 ntb_free_mw(nt, num_mw); 658 659 /* Alloc memory for receiving data. Must be aligned */ 660 mw->xlat_size = xlat_size; 661 mw->buff_size = buff_size; 662 663 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size, 664 &mw->dma_addr, GFP_KERNEL); 665 if (!mw->virt_addr) { 666 mw->xlat_size = 0; 667 mw->buff_size = 0; 668 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", 669 buff_size); 670 return -ENOMEM; 671 } 672 673 /* 674 * we must ensure that the memory address allocated is BAR size 675 * aligned in order for the XLAT register to take the value. This 676 * is a requirement of the hardware. It is recommended to setup CMA 677 * for BAR sizes equal or greater than 4MB. 678 */ 679 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { 680 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", 681 &mw->dma_addr); 682 ntb_free_mw(nt, num_mw); 683 return -ENOMEM; 684 } 685 686 /* Notify HW the memory location of the receive buffer */ 687 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size); 688 if (rc) { 689 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); 690 ntb_free_mw(nt, num_mw); 691 return -EIO; 692 } 693 694 return 0; 695 } 696 697 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 698 { 699 qp->link_is_up = false; 700 701 qp->tx_index = 0; 702 qp->rx_index = 0; 703 qp->rx_bytes = 0; 704 qp->rx_pkts = 0; 705 qp->rx_ring_empty = 0; 706 qp->rx_err_no_buf = 0; 707 qp->rx_err_oflow = 0; 708 qp->rx_err_ver = 0; 709 qp->rx_memcpy = 0; 710 qp->rx_async = 0; 711 qp->tx_bytes = 0; 712 qp->tx_pkts = 0; 713 qp->tx_ring_full = 0; 714 qp->tx_err_no_buf = 0; 715 qp->tx_memcpy = 0; 716 qp->tx_async = 0; 717 } 718 719 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 720 { 721 struct ntb_transport_ctx *nt = qp->transport; 722 struct pci_dev *pdev = nt->ndev->pdev; 723 724 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); 725 726 cancel_delayed_work_sync(&qp->link_work); 727 ntb_qp_link_down_reset(qp); 728 729 if (qp->event_handler) 730 qp->event_handler(qp->cb_data, qp->link_is_up); 731 } 732 733 static void ntb_qp_link_cleanup_work(struct work_struct *work) 734 { 735 struct ntb_transport_qp *qp = container_of(work, 736 struct ntb_transport_qp, 737 link_cleanup); 738 struct ntb_transport_ctx *nt = qp->transport; 739 740 ntb_qp_link_cleanup(qp); 741 742 if (nt->link_is_up) 743 schedule_delayed_work(&qp->link_work, 744 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 745 } 746 747 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 748 { 749 schedule_work(&qp->link_cleanup); 750 } 751 752 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 753 { 754 struct ntb_transport_qp *qp; 755 u64 qp_bitmap_alloc; 756 int i; 757 758 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 759 760 /* Pass along the info to any clients */ 761 for (i = 0; i < nt->qp_count; i++) 762 if (qp_bitmap_alloc & BIT_ULL(i)) { 763 qp = &nt->qp_vec[i]; 764 ntb_qp_link_cleanup(qp); 765 cancel_work_sync(&qp->link_cleanup); 766 cancel_delayed_work_sync(&qp->link_work); 767 } 768 769 if (!nt->link_is_up) 770 cancel_delayed_work_sync(&nt->link_work); 771 772 /* The scratchpad registers keep the values if the remote side 773 * goes down, blast them now to give them a sane value the next 774 * time they are accessed 775 */ 776 for (i = 0; i < MAX_SPAD; i++) 777 ntb_spad_write(nt->ndev, i, 0); 778 } 779 780 static void ntb_transport_link_cleanup_work(struct work_struct *work) 781 { 782 struct ntb_transport_ctx *nt = 783 container_of(work, struct ntb_transport_ctx, link_cleanup); 784 785 ntb_transport_link_cleanup(nt); 786 } 787 788 static void ntb_transport_event_callback(void *data) 789 { 790 struct ntb_transport_ctx *nt = data; 791 792 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) 793 schedule_delayed_work(&nt->link_work, 0); 794 else 795 schedule_work(&nt->link_cleanup); 796 } 797 798 static void ntb_transport_link_work(struct work_struct *work) 799 { 800 struct ntb_transport_ctx *nt = 801 container_of(work, struct ntb_transport_ctx, link_work.work); 802 struct ntb_dev *ndev = nt->ndev; 803 struct pci_dev *pdev = ndev->pdev; 804 resource_size_t size; 805 u32 val; 806 int rc, i, spad; 807 808 /* send the local info, in the opposite order of the way we read it */ 809 for (i = 0; i < nt->mw_count; i++) { 810 size = nt->mw_vec[i].phys_size; 811 812 if (max_mw_size && size > max_mw_size) 813 size = max_mw_size; 814 815 spad = MW0_SZ_HIGH + (i * 2); 816 ntb_peer_spad_write(ndev, spad, (u32)(size >> 32)); 817 818 spad = MW0_SZ_LOW + (i * 2); 819 ntb_peer_spad_write(ndev, spad, (u32)size); 820 } 821 822 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count); 823 824 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count); 825 826 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION); 827 828 /* Query the remote side for its info */ 829 val = ntb_spad_read(ndev, VERSION); 830 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 831 if (val != NTB_TRANSPORT_VERSION) 832 goto out; 833 834 val = ntb_spad_read(ndev, NUM_QPS); 835 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 836 if (val != nt->qp_count) 837 goto out; 838 839 val = ntb_spad_read(ndev, NUM_MWS); 840 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 841 if (val != nt->mw_count) 842 goto out; 843 844 for (i = 0; i < nt->mw_count; i++) { 845 u64 val64; 846 847 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); 848 val64 = (u64)val << 32; 849 850 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); 851 val64 |= val; 852 853 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); 854 855 rc = ntb_set_mw(nt, i, val64); 856 if (rc) 857 goto out1; 858 } 859 860 nt->link_is_up = true; 861 862 for (i = 0; i < nt->qp_count; i++) { 863 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 864 865 ntb_transport_setup_qp_mw(nt, i); 866 867 if (qp->client_ready) 868 schedule_delayed_work(&qp->link_work, 0); 869 } 870 871 return; 872 873 out1: 874 for (i = 0; i < nt->mw_count; i++) 875 ntb_free_mw(nt, i); 876 out: 877 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 878 schedule_delayed_work(&nt->link_work, 879 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 880 } 881 882 static void ntb_qp_link_work(struct work_struct *work) 883 { 884 struct ntb_transport_qp *qp = container_of(work, 885 struct ntb_transport_qp, 886 link_work.work); 887 struct pci_dev *pdev = qp->ndev->pdev; 888 struct ntb_transport_ctx *nt = qp->transport; 889 int val; 890 891 WARN_ON(!nt->link_is_up); 892 893 val = ntb_spad_read(nt->ndev, QP_LINKS); 894 895 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); 896 897 /* query remote spad for qp ready bits */ 898 ntb_peer_spad_read(nt->ndev, QP_LINKS); 899 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); 900 901 /* See if the remote side is up */ 902 if (val & BIT(qp->qp_num)) { 903 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 904 qp->link_is_up = true; 905 906 if (qp->event_handler) 907 qp->event_handler(qp->cb_data, qp->link_is_up); 908 909 tasklet_schedule(&qp->rxc_db_work); 910 } else if (nt->link_is_up) 911 schedule_delayed_work(&qp->link_work, 912 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 913 } 914 915 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, 916 unsigned int qp_num) 917 { 918 struct ntb_transport_qp *qp; 919 struct ntb_transport_mw *mw; 920 phys_addr_t mw_base; 921 resource_size_t mw_size; 922 unsigned int num_qps_mw, tx_size; 923 unsigned int mw_num, mw_count, qp_count; 924 u64 qp_offset; 925 926 mw_count = nt->mw_count; 927 qp_count = nt->qp_count; 928 929 mw_num = QP_TO_MW(nt, qp_num); 930 mw = &nt->mw_vec[mw_num]; 931 932 qp = &nt->qp_vec[qp_num]; 933 qp->qp_num = qp_num; 934 qp->transport = nt; 935 qp->ndev = nt->ndev; 936 qp->client_ready = false; 937 qp->event_handler = NULL; 938 ntb_qp_link_down_reset(qp); 939 940 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 941 num_qps_mw = qp_count / mw_count + 1; 942 else 943 num_qps_mw = qp_count / mw_count; 944 945 mw_base = nt->mw_vec[mw_num].phys_addr; 946 mw_size = nt->mw_vec[mw_num].phys_size; 947 948 tx_size = (unsigned int)mw_size / num_qps_mw; 949 qp_offset = tx_size * qp_num / mw_count; 950 951 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 952 if (!qp->tx_mw) 953 return -EINVAL; 954 955 qp->tx_mw_phys = mw_base + qp_offset; 956 if (!qp->tx_mw_phys) 957 return -EINVAL; 958 959 tx_size -= sizeof(struct ntb_rx_info); 960 qp->rx_info = qp->tx_mw + tx_size; 961 962 /* Due to housekeeping, there must be atleast 2 buffs */ 963 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 964 qp->tx_max_entry = tx_size / qp->tx_max_frame; 965 966 if (nt->debugfs_node_dir) { 967 char debugfs_name[4]; 968 969 snprintf(debugfs_name, 4, "qp%d", qp_num); 970 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 971 nt->debugfs_node_dir); 972 973 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 974 qp->debugfs_dir, qp, 975 &ntb_qp_debugfs_stats); 976 } else { 977 qp->debugfs_dir = NULL; 978 qp->debugfs_stats = NULL; 979 } 980 981 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 982 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 983 984 spin_lock_init(&qp->ntb_rx_q_lock); 985 spin_lock_init(&qp->ntb_tx_free_q_lock); 986 987 INIT_LIST_HEAD(&qp->rx_post_q); 988 INIT_LIST_HEAD(&qp->rx_pend_q); 989 INIT_LIST_HEAD(&qp->rx_free_q); 990 INIT_LIST_HEAD(&qp->tx_free_q); 991 992 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, 993 (unsigned long)qp); 994 995 return 0; 996 } 997 998 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) 999 { 1000 struct ntb_transport_ctx *nt; 1001 struct ntb_transport_mw *mw; 1002 unsigned int mw_count, qp_count; 1003 u64 qp_bitmap; 1004 int node; 1005 int rc, i; 1006 1007 if (ntb_db_is_unsafe(ndev)) 1008 dev_dbg(&ndev->dev, 1009 "doorbell is unsafe, proceed anyway...\n"); 1010 if (ntb_spad_is_unsafe(ndev)) 1011 dev_dbg(&ndev->dev, 1012 "scratchpad is unsafe, proceed anyway...\n"); 1013 1014 node = dev_to_node(&ndev->dev); 1015 1016 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); 1017 if (!nt) 1018 return -ENOMEM; 1019 1020 nt->ndev = ndev; 1021 1022 mw_count = ntb_mw_count(ndev); 1023 1024 nt->mw_count = mw_count; 1025 1026 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec), 1027 GFP_KERNEL, node); 1028 if (!nt->mw_vec) { 1029 rc = -ENOMEM; 1030 goto err; 1031 } 1032 1033 for (i = 0; i < mw_count; i++) { 1034 mw = &nt->mw_vec[i]; 1035 1036 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size, 1037 &mw->xlat_align, &mw->xlat_align_size); 1038 if (rc) 1039 goto err1; 1040 1041 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); 1042 if (!mw->vbase) { 1043 rc = -ENOMEM; 1044 goto err1; 1045 } 1046 1047 mw->buff_size = 0; 1048 mw->xlat_size = 0; 1049 mw->virt_addr = NULL; 1050 mw->dma_addr = 0; 1051 } 1052 1053 qp_bitmap = ntb_db_valid_mask(ndev); 1054 1055 qp_count = ilog2(qp_bitmap); 1056 if (max_num_clients && max_num_clients < qp_count) 1057 qp_count = max_num_clients; 1058 else if (mw_count < qp_count) 1059 qp_count = mw_count; 1060 1061 qp_bitmap &= BIT_ULL(qp_count) - 1; 1062 1063 nt->qp_count = qp_count; 1064 nt->qp_bitmap = qp_bitmap; 1065 nt->qp_bitmap_free = qp_bitmap; 1066 1067 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec), 1068 GFP_KERNEL, node); 1069 if (!nt->qp_vec) { 1070 rc = -ENOMEM; 1071 goto err2; 1072 } 1073 1074 if (nt_debugfs_dir) { 1075 nt->debugfs_node_dir = 1076 debugfs_create_dir(pci_name(ndev->pdev), 1077 nt_debugfs_dir); 1078 } 1079 1080 for (i = 0; i < qp_count; i++) { 1081 rc = ntb_transport_init_queue(nt, i); 1082 if (rc) 1083 goto err3; 1084 } 1085 1086 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 1087 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); 1088 1089 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); 1090 if (rc) 1091 goto err3; 1092 1093 INIT_LIST_HEAD(&nt->client_devs); 1094 rc = ntb_bus_init(nt); 1095 if (rc) 1096 goto err4; 1097 1098 nt->link_is_up = false; 1099 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1100 ntb_link_event(ndev); 1101 1102 return 0; 1103 1104 err4: 1105 ntb_clear_ctx(ndev); 1106 err3: 1107 kfree(nt->qp_vec); 1108 err2: 1109 kfree(nt->mw_vec); 1110 err1: 1111 while (i--) { 1112 mw = &nt->mw_vec[i]; 1113 iounmap(mw->vbase); 1114 } 1115 err: 1116 kfree(nt); 1117 return rc; 1118 } 1119 1120 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) 1121 { 1122 struct ntb_transport_ctx *nt = ndev->ctx; 1123 struct ntb_transport_qp *qp; 1124 u64 qp_bitmap_alloc; 1125 int i; 1126 1127 ntb_transport_link_cleanup(nt); 1128 cancel_work_sync(&nt->link_cleanup); 1129 cancel_delayed_work_sync(&nt->link_work); 1130 1131 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 1132 1133 /* verify that all the qp's are freed */ 1134 for (i = 0; i < nt->qp_count; i++) { 1135 qp = &nt->qp_vec[i]; 1136 if (qp_bitmap_alloc & BIT_ULL(i)) 1137 ntb_transport_free_queue(qp); 1138 debugfs_remove_recursive(qp->debugfs_dir); 1139 } 1140 1141 ntb_link_disable(ndev); 1142 ntb_clear_ctx(ndev); 1143 1144 ntb_bus_remove(nt); 1145 1146 for (i = nt->mw_count; i--; ) { 1147 ntb_free_mw(nt, i); 1148 iounmap(nt->mw_vec[i].vbase); 1149 } 1150 1151 kfree(nt->qp_vec); 1152 kfree(nt->mw_vec); 1153 kfree(nt); 1154 } 1155 1156 static void ntb_complete_rxc(struct ntb_transport_qp *qp) 1157 { 1158 struct ntb_queue_entry *entry; 1159 void *cb_data; 1160 unsigned int len; 1161 unsigned long irqflags; 1162 1163 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1164 1165 while (!list_empty(&qp->rx_post_q)) { 1166 entry = list_first_entry(&qp->rx_post_q, 1167 struct ntb_queue_entry, entry); 1168 if (!(entry->flags & DESC_DONE_FLAG)) 1169 break; 1170 1171 entry->rx_hdr->flags = 0; 1172 iowrite32(entry->index, &qp->rx_info->entry); 1173 1174 cb_data = entry->cb_data; 1175 len = entry->len; 1176 1177 list_move_tail(&entry->entry, &qp->rx_free_q); 1178 1179 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1180 1181 if (qp->rx_handler && qp->client_ready) 1182 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1183 1184 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1185 } 1186 1187 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1188 } 1189 1190 static void ntb_rx_copy_callback(void *data) 1191 { 1192 struct ntb_queue_entry *entry = data; 1193 1194 entry->flags |= DESC_DONE_FLAG; 1195 1196 ntb_complete_rxc(entry->qp); 1197 } 1198 1199 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1200 { 1201 void *buf = entry->buf; 1202 size_t len = entry->len; 1203 1204 memcpy(buf, offset, len); 1205 1206 /* Ensure that the data is fully copied out before clearing the flag */ 1207 wmb(); 1208 1209 ntb_rx_copy_callback(entry); 1210 } 1211 1212 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1213 { 1214 struct dma_async_tx_descriptor *txd; 1215 struct ntb_transport_qp *qp = entry->qp; 1216 struct dma_chan *chan = qp->dma_chan; 1217 struct dma_device *device; 1218 size_t pay_off, buff_off, len; 1219 struct dmaengine_unmap_data *unmap; 1220 dma_cookie_t cookie; 1221 void *buf = entry->buf; 1222 1223 len = entry->len; 1224 1225 if (!chan) 1226 goto err; 1227 1228 if (len < copy_bytes) 1229 goto err_wait; 1230 1231 device = chan->device; 1232 pay_off = (size_t)offset & ~PAGE_MASK; 1233 buff_off = (size_t)buf & ~PAGE_MASK; 1234 1235 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1236 goto err_wait; 1237 1238 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1239 if (!unmap) 1240 goto err_wait; 1241 1242 unmap->len = len; 1243 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1244 pay_off, len, DMA_TO_DEVICE); 1245 if (dma_mapping_error(device->dev, unmap->addr[0])) 1246 goto err_get_unmap; 1247 1248 unmap->to_cnt = 1; 1249 1250 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1251 buff_off, len, DMA_FROM_DEVICE); 1252 if (dma_mapping_error(device->dev, unmap->addr[1])) 1253 goto err_get_unmap; 1254 1255 unmap->from_cnt = 1; 1256 1257 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1258 unmap->addr[0], len, 1259 DMA_PREP_INTERRUPT); 1260 if (!txd) 1261 goto err_get_unmap; 1262 1263 txd->callback = ntb_rx_copy_callback; 1264 txd->callback_param = entry; 1265 dma_set_unmap(txd, unmap); 1266 1267 cookie = dmaengine_submit(txd); 1268 if (dma_submit_error(cookie)) 1269 goto err_set_unmap; 1270 1271 dmaengine_unmap_put(unmap); 1272 1273 qp->last_cookie = cookie; 1274 1275 qp->rx_async++; 1276 1277 return; 1278 1279 err_set_unmap: 1280 dmaengine_unmap_put(unmap); 1281 err_get_unmap: 1282 dmaengine_unmap_put(unmap); 1283 err_wait: 1284 /* If the callbacks come out of order, the writing of the index to the 1285 * last completed will be out of order. This may result in the 1286 * receive stalling forever. 1287 */ 1288 dma_sync_wait(chan, qp->last_cookie); 1289 err: 1290 ntb_memcpy_rx(entry, offset); 1291 qp->rx_memcpy++; 1292 } 1293 1294 static int ntb_process_rxc(struct ntb_transport_qp *qp) 1295 { 1296 struct ntb_payload_header *hdr; 1297 struct ntb_queue_entry *entry; 1298 void *offset; 1299 1300 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1301 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1302 1303 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", 1304 qp->qp_num, hdr->ver, hdr->len, hdr->flags); 1305 1306 if (!(hdr->flags & DESC_DONE_FLAG)) { 1307 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); 1308 qp->rx_ring_empty++; 1309 return -EAGAIN; 1310 } 1311 1312 if (hdr->flags & LINK_DOWN_FLAG) { 1313 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); 1314 ntb_qp_link_down(qp); 1315 hdr->flags = 0; 1316 return -EAGAIN; 1317 } 1318 1319 if (hdr->ver != (u32)qp->rx_pkts) { 1320 dev_dbg(&qp->ndev->pdev->dev, 1321 "version mismatch, expected %llu - got %u\n", 1322 qp->rx_pkts, hdr->ver); 1323 qp->rx_err_ver++; 1324 return -EIO; 1325 } 1326 1327 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 1328 if (!entry) { 1329 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1330 qp->rx_err_no_buf++; 1331 return -EAGAIN; 1332 } 1333 1334 entry->rx_hdr = hdr; 1335 entry->index = qp->rx_index; 1336 1337 if (hdr->len > entry->len) { 1338 dev_dbg(&qp->ndev->pdev->dev, 1339 "receive buffer overflow! Wanted %d got %d\n", 1340 hdr->len, entry->len); 1341 qp->rx_err_oflow++; 1342 1343 entry->len = -EIO; 1344 entry->flags |= DESC_DONE_FLAG; 1345 1346 ntb_complete_rxc(qp); 1347 } else { 1348 dev_dbg(&qp->ndev->pdev->dev, 1349 "RX OK index %u ver %u size %d into buf size %d\n", 1350 qp->rx_index, hdr->ver, hdr->len, entry->len); 1351 1352 qp->rx_bytes += hdr->len; 1353 qp->rx_pkts++; 1354 1355 entry->len = hdr->len; 1356 1357 ntb_async_rx(entry, offset); 1358 } 1359 1360 qp->rx_index++; 1361 qp->rx_index %= qp->rx_max_entry; 1362 1363 return 0; 1364 } 1365 1366 static void ntb_transport_rxc_db(unsigned long data) 1367 { 1368 struct ntb_transport_qp *qp = (void *)data; 1369 int rc, i; 1370 1371 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", 1372 __func__, qp->qp_num); 1373 1374 /* Limit the number of packets processed in a single interrupt to 1375 * provide fairness to others 1376 */ 1377 for (i = 0; i < qp->rx_max_entry; i++) { 1378 rc = ntb_process_rxc(qp); 1379 if (rc) 1380 break; 1381 } 1382 1383 if (i && qp->dma_chan) 1384 dma_async_issue_pending(qp->dma_chan); 1385 1386 if (i == qp->rx_max_entry) { 1387 /* there is more work to do */ 1388 tasklet_schedule(&qp->rxc_db_work); 1389 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1390 /* the doorbell bit is set: clear it */ 1391 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); 1392 /* ntb_db_read ensures ntb_db_clear write is committed */ 1393 ntb_db_read(qp->ndev); 1394 1395 /* an interrupt may have arrived between finishing 1396 * ntb_process_rxc and clearing the doorbell bit: 1397 * there might be some more work to do. 1398 */ 1399 tasklet_schedule(&qp->rxc_db_work); 1400 } 1401 } 1402 1403 static void ntb_tx_copy_callback(void *data) 1404 { 1405 struct ntb_queue_entry *entry = data; 1406 struct ntb_transport_qp *qp = entry->qp; 1407 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1408 1409 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1410 1411 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1412 1413 /* The entry length can only be zero if the packet is intended to be a 1414 * "link down" or similar. Since no payload is being sent in these 1415 * cases, there is nothing to add to the completion queue. 1416 */ 1417 if (entry->len > 0) { 1418 qp->tx_bytes += entry->len; 1419 1420 if (qp->tx_handler) 1421 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1422 entry->len); 1423 } 1424 1425 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1426 } 1427 1428 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) 1429 { 1430 #ifdef ARCH_HAS_NOCACHE_UACCESS 1431 /* 1432 * Using non-temporal mov to improve performance on non-cached 1433 * writes, even though we aren't actually copying from user space. 1434 */ 1435 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); 1436 #else 1437 memcpy_toio(offset, entry->buf, entry->len); 1438 #endif 1439 1440 /* Ensure that the data is fully copied out before setting the flags */ 1441 wmb(); 1442 1443 ntb_tx_copy_callback(entry); 1444 } 1445 1446 static void ntb_async_tx(struct ntb_transport_qp *qp, 1447 struct ntb_queue_entry *entry) 1448 { 1449 struct ntb_payload_header __iomem *hdr; 1450 struct dma_async_tx_descriptor *txd; 1451 struct dma_chan *chan = qp->dma_chan; 1452 struct dma_device *device; 1453 size_t dest_off, buff_off; 1454 struct dmaengine_unmap_data *unmap; 1455 dma_addr_t dest; 1456 dma_cookie_t cookie; 1457 void __iomem *offset; 1458 size_t len = entry->len; 1459 void *buf = entry->buf; 1460 1461 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1462 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1463 entry->tx_hdr = hdr; 1464 1465 iowrite32(entry->len, &hdr->len); 1466 iowrite32((u32)qp->tx_pkts, &hdr->ver); 1467 1468 if (!chan) 1469 goto err; 1470 1471 if (len < copy_bytes) 1472 goto err; 1473 1474 device = chan->device; 1475 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; 1476 buff_off = (size_t)buf & ~PAGE_MASK; 1477 dest_off = (size_t)dest & ~PAGE_MASK; 1478 1479 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1480 goto err; 1481 1482 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); 1483 if (!unmap) 1484 goto err; 1485 1486 unmap->len = len; 1487 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), 1488 buff_off, len, DMA_TO_DEVICE); 1489 if (dma_mapping_error(device->dev, unmap->addr[0])) 1490 goto err_get_unmap; 1491 1492 unmap->to_cnt = 1; 1493 1494 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1495 DMA_PREP_INTERRUPT); 1496 if (!txd) 1497 goto err_get_unmap; 1498 1499 txd->callback = ntb_tx_copy_callback; 1500 txd->callback_param = entry; 1501 dma_set_unmap(txd, unmap); 1502 1503 cookie = dmaengine_submit(txd); 1504 if (dma_submit_error(cookie)) 1505 goto err_set_unmap; 1506 1507 dmaengine_unmap_put(unmap); 1508 1509 dma_async_issue_pending(chan); 1510 qp->tx_async++; 1511 1512 return; 1513 err_set_unmap: 1514 dmaengine_unmap_put(unmap); 1515 err_get_unmap: 1516 dmaengine_unmap_put(unmap); 1517 err: 1518 ntb_memcpy_tx(entry, offset); 1519 qp->tx_memcpy++; 1520 } 1521 1522 static int ntb_process_tx(struct ntb_transport_qp *qp, 1523 struct ntb_queue_entry *entry) 1524 { 1525 if (qp->tx_index == qp->remote_rx_info->entry) { 1526 qp->tx_ring_full++; 1527 return -EAGAIN; 1528 } 1529 1530 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1531 if (qp->tx_handler) 1532 qp->tx_handler(qp->cb_data, qp, NULL, -EIO); 1533 1534 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1535 &qp->tx_free_q); 1536 return 0; 1537 } 1538 1539 ntb_async_tx(qp, entry); 1540 1541 qp->tx_index++; 1542 qp->tx_index %= qp->tx_max_entry; 1543 1544 qp->tx_pkts++; 1545 1546 return 0; 1547 } 1548 1549 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1550 { 1551 struct pci_dev *pdev = qp->ndev->pdev; 1552 struct ntb_queue_entry *entry; 1553 int i, rc; 1554 1555 if (!qp->link_is_up) 1556 return; 1557 1558 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); 1559 1560 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1561 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1562 if (entry) 1563 break; 1564 msleep(100); 1565 } 1566 1567 if (!entry) 1568 return; 1569 1570 entry->cb_data = NULL; 1571 entry->buf = NULL; 1572 entry->len = 0; 1573 entry->flags = LINK_DOWN_FLAG; 1574 1575 rc = ntb_process_tx(qp, entry); 1576 if (rc) 1577 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1578 qp->qp_num); 1579 1580 ntb_qp_link_down_reset(qp); 1581 } 1582 1583 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) 1584 { 1585 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; 1586 } 1587 1588 /** 1589 * ntb_transport_create_queue - Create a new NTB transport layer queue 1590 * @rx_handler: receive callback function 1591 * @tx_handler: transmit callback function 1592 * @event_handler: event callback function 1593 * 1594 * Create a new NTB transport layer queue and provide the queue with a callback 1595 * routine for both transmit and receive. The receive callback routine will be 1596 * used to pass up data when the transport has received it on the queue. The 1597 * transmit callback routine will be called when the transport has completed the 1598 * transmission of the data on the queue and the data is ready to be freed. 1599 * 1600 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1601 */ 1602 struct ntb_transport_qp * 1603 ntb_transport_create_queue(void *data, struct device *client_dev, 1604 const struct ntb_queue_handlers *handlers) 1605 { 1606 struct ntb_dev *ndev; 1607 struct pci_dev *pdev; 1608 struct ntb_transport_ctx *nt; 1609 struct ntb_queue_entry *entry; 1610 struct ntb_transport_qp *qp; 1611 u64 qp_bit; 1612 unsigned int free_queue; 1613 dma_cap_mask_t dma_mask; 1614 int node; 1615 int i; 1616 1617 ndev = dev_ntb(client_dev->parent); 1618 pdev = ndev->pdev; 1619 nt = ndev->ctx; 1620 1621 node = dev_to_node(&ndev->dev); 1622 1623 free_queue = ffs(nt->qp_bitmap); 1624 if (!free_queue) 1625 goto err; 1626 1627 /* decrement free_queue to make it zero based */ 1628 free_queue--; 1629 1630 qp = &nt->qp_vec[free_queue]; 1631 qp_bit = BIT_ULL(qp->qp_num); 1632 1633 nt->qp_bitmap_free &= ~qp_bit; 1634 1635 qp->cb_data = data; 1636 qp->rx_handler = handlers->rx_handler; 1637 qp->tx_handler = handlers->tx_handler; 1638 qp->event_handler = handlers->event_handler; 1639 1640 dma_cap_zero(dma_mask); 1641 dma_cap_set(DMA_MEMCPY, dma_mask); 1642 1643 if (use_dma) { 1644 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn, 1645 (void *)(unsigned long)node); 1646 if (!qp->dma_chan) 1647 dev_info(&pdev->dev, "Unable to allocate DMA channel\n"); 1648 } else { 1649 qp->dma_chan = NULL; 1650 } 1651 dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU"); 1652 1653 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1654 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1655 if (!entry) 1656 goto err1; 1657 1658 entry->qp = qp; 1659 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 1660 &qp->rx_free_q); 1661 } 1662 1663 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1664 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1665 if (!entry) 1666 goto err2; 1667 1668 entry->qp = qp; 1669 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1670 &qp->tx_free_q); 1671 } 1672 1673 ntb_db_clear(qp->ndev, qp_bit); 1674 ntb_db_clear_mask(qp->ndev, qp_bit); 1675 1676 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1677 1678 return qp; 1679 1680 err2: 1681 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1682 kfree(entry); 1683 err1: 1684 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1685 kfree(entry); 1686 if (qp->dma_chan) 1687 dma_release_channel(qp->dma_chan); 1688 nt->qp_bitmap_free |= qp_bit; 1689 err: 1690 return NULL; 1691 } 1692 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 1693 1694 /** 1695 * ntb_transport_free_queue - Frees NTB transport queue 1696 * @qp: NTB queue to be freed 1697 * 1698 * Frees NTB transport queue 1699 */ 1700 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1701 { 1702 struct pci_dev *pdev; 1703 struct ntb_queue_entry *entry; 1704 u64 qp_bit; 1705 1706 if (!qp) 1707 return; 1708 1709 pdev = qp->ndev->pdev; 1710 1711 if (qp->dma_chan) { 1712 struct dma_chan *chan = qp->dma_chan; 1713 /* Putting the dma_chan to NULL will force any new traffic to be 1714 * processed by the CPU instead of the DAM engine 1715 */ 1716 qp->dma_chan = NULL; 1717 1718 /* Try to be nice and wait for any queued DMA engine 1719 * transactions to process before smashing it with a rock 1720 */ 1721 dma_sync_wait(chan, qp->last_cookie); 1722 dmaengine_terminate_all(chan); 1723 dma_release_channel(chan); 1724 } 1725 1726 qp_bit = BIT_ULL(qp->qp_num); 1727 1728 ntb_db_set_mask(qp->ndev, qp_bit); 1729 tasklet_disable(&qp->rxc_db_work); 1730 1731 cancel_delayed_work_sync(&qp->link_work); 1732 1733 qp->cb_data = NULL; 1734 qp->rx_handler = NULL; 1735 qp->tx_handler = NULL; 1736 qp->event_handler = NULL; 1737 1738 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1739 kfree(entry); 1740 1741 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { 1742 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); 1743 kfree(entry); 1744 } 1745 1746 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { 1747 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); 1748 kfree(entry); 1749 } 1750 1751 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1752 kfree(entry); 1753 1754 qp->transport->qp_bitmap_free |= qp_bit; 1755 1756 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1757 } 1758 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 1759 1760 /** 1761 * ntb_transport_rx_remove - Dequeues enqueued rx packet 1762 * @qp: NTB queue to be freed 1763 * @len: pointer to variable to write enqueued buffers length 1764 * 1765 * Dequeues unused buffers from receive queue. Should only be used during 1766 * shutdown of qp. 1767 * 1768 * RETURNS: NULL error value on error, or void* for success. 1769 */ 1770 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 1771 { 1772 struct ntb_queue_entry *entry; 1773 void *buf; 1774 1775 if (!qp || qp->client_ready) 1776 return NULL; 1777 1778 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); 1779 if (!entry) 1780 return NULL; 1781 1782 buf = entry->cb_data; 1783 *len = entry->len; 1784 1785 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); 1786 1787 return buf; 1788 } 1789 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 1790 1791 /** 1792 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 1793 * @qp: NTB transport layer queue the entry is to be enqueued on 1794 * @cb: per buffer pointer for callback function to use 1795 * @data: pointer to data buffer that incoming packets will be copied into 1796 * @len: length of the data buffer 1797 * 1798 * Enqueue a new receive buffer onto the transport queue into which a NTB 1799 * payload can be received into. 1800 * 1801 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1802 */ 1803 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1804 unsigned int len) 1805 { 1806 struct ntb_queue_entry *entry; 1807 1808 if (!qp) 1809 return -EINVAL; 1810 1811 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); 1812 if (!entry) 1813 return -ENOMEM; 1814 1815 entry->cb_data = cb; 1816 entry->buf = data; 1817 entry->len = len; 1818 entry->flags = 0; 1819 1820 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 1821 1822 tasklet_schedule(&qp->rxc_db_work); 1823 1824 return 0; 1825 } 1826 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 1827 1828 /** 1829 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 1830 * @qp: NTB transport layer queue the entry is to be enqueued on 1831 * @cb: per buffer pointer for callback function to use 1832 * @data: pointer to data buffer that will be sent 1833 * @len: length of the data buffer 1834 * 1835 * Enqueue a new transmit buffer onto the transport queue from which a NTB 1836 * payload will be transmitted. This assumes that a lock is being held to 1837 * serialize access to the qp. 1838 * 1839 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1840 */ 1841 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1842 unsigned int len) 1843 { 1844 struct ntb_queue_entry *entry; 1845 int rc; 1846 1847 if (!qp || !qp->link_is_up || !len) 1848 return -EINVAL; 1849 1850 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1851 if (!entry) { 1852 qp->tx_err_no_buf++; 1853 return -EBUSY; 1854 } 1855 1856 entry->cb_data = cb; 1857 entry->buf = data; 1858 entry->len = len; 1859 entry->flags = 0; 1860 1861 rc = ntb_process_tx(qp, entry); 1862 if (rc) 1863 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1864 &qp->tx_free_q); 1865 1866 return rc; 1867 } 1868 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 1869 1870 /** 1871 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 1872 * @qp: NTB transport layer queue to be enabled 1873 * 1874 * Notify NTB transport layer of client readiness to use queue 1875 */ 1876 void ntb_transport_link_up(struct ntb_transport_qp *qp) 1877 { 1878 if (!qp) 1879 return; 1880 1881 qp->client_ready = true; 1882 1883 if (qp->transport->link_is_up) 1884 schedule_delayed_work(&qp->link_work, 0); 1885 } 1886 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 1887 1888 /** 1889 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1890 * @qp: NTB transport layer queue to be disabled 1891 * 1892 * Notify NTB transport layer of client's desire to no longer receive data on 1893 * transport queue specified. It is the client's responsibility to ensure all 1894 * entries on queue are purged or otherwise handled appropriately. 1895 */ 1896 void ntb_transport_link_down(struct ntb_transport_qp *qp) 1897 { 1898 struct pci_dev *pdev; 1899 int val; 1900 1901 if (!qp) 1902 return; 1903 1904 pdev = qp->ndev->pdev; 1905 qp->client_ready = false; 1906 1907 val = ntb_spad_read(qp->ndev, QP_LINKS); 1908 1909 ntb_peer_spad_write(qp->ndev, QP_LINKS, 1910 val & ~BIT(qp->qp_num)); 1911 1912 if (qp->link_is_up) 1913 ntb_send_link_down(qp); 1914 else 1915 cancel_delayed_work_sync(&qp->link_work); 1916 } 1917 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 1918 1919 /** 1920 * ntb_transport_link_query - Query transport link state 1921 * @qp: NTB transport layer queue to be queried 1922 * 1923 * Query connectivity to the remote system of the NTB transport queue 1924 * 1925 * RETURNS: true for link up or false for link down 1926 */ 1927 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 1928 { 1929 if (!qp) 1930 return false; 1931 1932 return qp->link_is_up; 1933 } 1934 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 1935 1936 /** 1937 * ntb_transport_qp_num - Query the qp number 1938 * @qp: NTB transport layer queue to be queried 1939 * 1940 * Query qp number of the NTB transport queue 1941 * 1942 * RETURNS: a zero based number specifying the qp number 1943 */ 1944 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 1945 { 1946 if (!qp) 1947 return 0; 1948 1949 return qp->qp_num; 1950 } 1951 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 1952 1953 /** 1954 * ntb_transport_max_size - Query the max payload size of a qp 1955 * @qp: NTB transport layer queue to be queried 1956 * 1957 * Query the maximum payload size permissible on the given qp 1958 * 1959 * RETURNS: the max payload size of a qp 1960 */ 1961 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 1962 { 1963 unsigned int max; 1964 1965 if (!qp) 1966 return 0; 1967 1968 if (!qp->dma_chan) 1969 return qp->tx_max_frame - sizeof(struct ntb_payload_header); 1970 1971 /* If DMA engine usage is possible, try to find the max size for that */ 1972 max = qp->tx_max_frame - sizeof(struct ntb_payload_header); 1973 max -= max % (1 << qp->dma_chan->device->copy_align); 1974 1975 return max; 1976 } 1977 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 1978 1979 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) 1980 { 1981 unsigned int head = qp->tx_index; 1982 unsigned int tail = qp->remote_rx_info->entry; 1983 1984 return tail > head ? tail - head : qp->tx_max_entry + tail - head; 1985 } 1986 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry); 1987 1988 static void ntb_transport_doorbell_callback(void *data, int vector) 1989 { 1990 struct ntb_transport_ctx *nt = data; 1991 struct ntb_transport_qp *qp; 1992 u64 db_bits; 1993 unsigned int qp_num; 1994 1995 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 1996 ntb_db_vector_mask(nt->ndev, vector)); 1997 1998 while (db_bits) { 1999 qp_num = __ffs(db_bits); 2000 qp = &nt->qp_vec[qp_num]; 2001 2002 tasklet_schedule(&qp->rxc_db_work); 2003 2004 db_bits &= ~BIT_ULL(qp_num); 2005 } 2006 } 2007 2008 static const struct ntb_ctx_ops ntb_transport_ops = { 2009 .link_event = ntb_transport_event_callback, 2010 .db_event = ntb_transport_doorbell_callback, 2011 }; 2012 2013 static struct ntb_client ntb_transport_client = { 2014 .ops = { 2015 .probe = ntb_transport_probe, 2016 .remove = ntb_transport_free, 2017 }, 2018 }; 2019 2020 static int __init ntb_transport_init(void) 2021 { 2022 int rc; 2023 2024 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER); 2025 2026 if (debugfs_initialized()) 2027 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 2028 2029 rc = bus_register(&ntb_transport_bus); 2030 if (rc) 2031 goto err_bus; 2032 2033 rc = ntb_register_client(&ntb_transport_client); 2034 if (rc) 2035 goto err_client; 2036 2037 return 0; 2038 2039 err_client: 2040 bus_unregister(&ntb_transport_bus); 2041 err_bus: 2042 debugfs_remove_recursive(nt_debugfs_dir); 2043 return rc; 2044 } 2045 module_init(ntb_transport_init); 2046 2047 static void __exit ntb_transport_exit(void) 2048 { 2049 debugfs_remove_recursive(nt_debugfs_dir); 2050 2051 ntb_unregister_client(&ntb_transport_client); 2052 bus_unregister(&ntb_transport_bus); 2053 } 2054 module_exit(ntb_transport_exit); 2055