1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Transport Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/debugfs.h> 51 #include <linux/delay.h> 52 #include <linux/dmaengine.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/errno.h> 55 #include <linux/export.h> 56 #include <linux/interrupt.h> 57 #include <linux/module.h> 58 #include <linux/pci.h> 59 #include <linux/slab.h> 60 #include <linux/types.h> 61 #include <linux/uaccess.h> 62 #include "linux/ntb.h" 63 #include "linux/ntb_transport.h" 64 65 #define NTB_TRANSPORT_VERSION 4 66 #define NTB_TRANSPORT_VER "4" 67 #define NTB_TRANSPORT_NAME "ntb_transport" 68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" 69 70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); 71 MODULE_VERSION(NTB_TRANSPORT_VER); 72 MODULE_LICENSE("Dual BSD/GPL"); 73 MODULE_AUTHOR("Intel Corporation"); 74 75 static unsigned long max_mw_size; 76 module_param(max_mw_size, ulong, 0644); 77 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); 78 79 static unsigned int transport_mtu = 0x10000; 80 module_param(transport_mtu, uint, 0644); 81 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 82 83 static unsigned char max_num_clients; 84 module_param(max_num_clients, byte, 0644); 85 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 86 87 static unsigned int copy_bytes = 1024; 88 module_param(copy_bytes, uint, 0644); 89 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 90 91 static bool use_dma; 92 module_param(use_dma, bool, 0644); 93 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); 94 95 static struct dentry *nt_debugfs_dir; 96 97 struct ntb_queue_entry { 98 /* ntb_queue list reference */ 99 struct list_head entry; 100 /* pointers to data to be transferred */ 101 void *cb_data; 102 void *buf; 103 unsigned int len; 104 unsigned int flags; 105 106 struct ntb_transport_qp *qp; 107 union { 108 struct ntb_payload_header __iomem *tx_hdr; 109 struct ntb_payload_header *rx_hdr; 110 }; 111 unsigned int index; 112 }; 113 114 struct ntb_rx_info { 115 unsigned int entry; 116 }; 117 118 struct ntb_transport_qp { 119 struct ntb_transport_ctx *transport; 120 struct ntb_dev *ndev; 121 void *cb_data; 122 struct dma_chan *tx_dma_chan; 123 struct dma_chan *rx_dma_chan; 124 125 bool client_ready; 126 bool link_is_up; 127 bool active; 128 129 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 130 u64 qp_bit; 131 132 struct ntb_rx_info __iomem *rx_info; 133 struct ntb_rx_info *remote_rx_info; 134 135 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 136 void *data, int len); 137 struct list_head tx_free_q; 138 spinlock_t ntb_tx_free_q_lock; 139 void __iomem *tx_mw; 140 dma_addr_t tx_mw_phys; 141 unsigned int tx_index; 142 unsigned int tx_max_entry; 143 unsigned int tx_max_frame; 144 145 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 146 void *data, int len); 147 struct list_head rx_post_q; 148 struct list_head rx_pend_q; 149 struct list_head rx_free_q; 150 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 151 spinlock_t ntb_rx_q_lock; 152 void *rx_buff; 153 unsigned int rx_index; 154 unsigned int rx_max_entry; 155 unsigned int rx_max_frame; 156 dma_cookie_t last_cookie; 157 struct tasklet_struct rxc_db_work; 158 159 void (*event_handler)(void *data, int status); 160 struct delayed_work link_work; 161 struct work_struct link_cleanup; 162 163 struct dentry *debugfs_dir; 164 struct dentry *debugfs_stats; 165 166 /* Stats */ 167 u64 rx_bytes; 168 u64 rx_pkts; 169 u64 rx_ring_empty; 170 u64 rx_err_no_buf; 171 u64 rx_err_oflow; 172 u64 rx_err_ver; 173 u64 rx_memcpy; 174 u64 rx_async; 175 u64 dma_rx_prep_err; 176 u64 tx_bytes; 177 u64 tx_pkts; 178 u64 tx_ring_full; 179 u64 tx_err_no_buf; 180 u64 tx_memcpy; 181 u64 tx_async; 182 u64 dma_tx_prep_err; 183 }; 184 185 struct ntb_transport_mw { 186 phys_addr_t phys_addr; 187 resource_size_t phys_size; 188 resource_size_t xlat_align; 189 resource_size_t xlat_align_size; 190 void __iomem *vbase; 191 size_t xlat_size; 192 size_t buff_size; 193 void *virt_addr; 194 dma_addr_t dma_addr; 195 }; 196 197 struct ntb_transport_client_dev { 198 struct list_head entry; 199 struct ntb_transport_ctx *nt; 200 struct device dev; 201 }; 202 203 struct ntb_transport_ctx { 204 struct list_head entry; 205 struct list_head client_devs; 206 207 struct ntb_dev *ndev; 208 209 struct ntb_transport_mw *mw_vec; 210 struct ntb_transport_qp *qp_vec; 211 unsigned int mw_count; 212 unsigned int qp_count; 213 u64 qp_bitmap; 214 u64 qp_bitmap_free; 215 216 bool link_is_up; 217 struct delayed_work link_work; 218 struct work_struct link_cleanup; 219 220 struct dentry *debugfs_node_dir; 221 }; 222 223 enum { 224 DESC_DONE_FLAG = BIT(0), 225 LINK_DOWN_FLAG = BIT(1), 226 }; 227 228 struct ntb_payload_header { 229 unsigned int ver; 230 unsigned int len; 231 unsigned int flags; 232 }; 233 234 enum { 235 VERSION = 0, 236 QP_LINKS, 237 NUM_QPS, 238 NUM_MWS, 239 MW0_SZ_HIGH, 240 MW0_SZ_LOW, 241 MW1_SZ_HIGH, 242 MW1_SZ_LOW, 243 MAX_SPAD, 244 }; 245 246 #define dev_client_dev(__dev) \ 247 container_of((__dev), struct ntb_transport_client_dev, dev) 248 249 #define drv_client(__drv) \ 250 container_of((__drv), struct ntb_transport_client, driver) 251 252 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 253 #define NTB_QP_DEF_NUM_ENTRIES 100 254 #define NTB_LINK_DOWN_TIMEOUT 10 255 #define DMA_RETRIES 20 256 #define DMA_OUT_RESOURCE_TO 50 257 258 static void ntb_transport_rxc_db(unsigned long data); 259 static const struct ntb_ctx_ops ntb_transport_ops; 260 static struct ntb_client ntb_transport_client; 261 262 static int ntb_transport_bus_match(struct device *dev, 263 struct device_driver *drv) 264 { 265 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 266 } 267 268 static int ntb_transport_bus_probe(struct device *dev) 269 { 270 const struct ntb_transport_client *client; 271 int rc = -EINVAL; 272 273 get_device(dev); 274 275 client = drv_client(dev->driver); 276 rc = client->probe(dev); 277 if (rc) 278 put_device(dev); 279 280 return rc; 281 } 282 283 static int ntb_transport_bus_remove(struct device *dev) 284 { 285 const struct ntb_transport_client *client; 286 287 client = drv_client(dev->driver); 288 client->remove(dev); 289 290 put_device(dev); 291 292 return 0; 293 } 294 295 static struct bus_type ntb_transport_bus = { 296 .name = "ntb_transport", 297 .match = ntb_transport_bus_match, 298 .probe = ntb_transport_bus_probe, 299 .remove = ntb_transport_bus_remove, 300 }; 301 302 static LIST_HEAD(ntb_transport_list); 303 304 static int ntb_bus_init(struct ntb_transport_ctx *nt) 305 { 306 list_add_tail(&nt->entry, &ntb_transport_list); 307 return 0; 308 } 309 310 static void ntb_bus_remove(struct ntb_transport_ctx *nt) 311 { 312 struct ntb_transport_client_dev *client_dev, *cd; 313 314 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 315 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 316 dev_name(&client_dev->dev)); 317 list_del(&client_dev->entry); 318 device_unregister(&client_dev->dev); 319 } 320 321 list_del(&nt->entry); 322 } 323 324 static void ntb_transport_client_release(struct device *dev) 325 { 326 struct ntb_transport_client_dev *client_dev; 327 328 client_dev = dev_client_dev(dev); 329 kfree(client_dev); 330 } 331 332 /** 333 * ntb_transport_unregister_client_dev - Unregister NTB client device 334 * @device_name: Name of NTB client device 335 * 336 * Unregister an NTB client device with the NTB transport layer 337 */ 338 void ntb_transport_unregister_client_dev(char *device_name) 339 { 340 struct ntb_transport_client_dev *client, *cd; 341 struct ntb_transport_ctx *nt; 342 343 list_for_each_entry(nt, &ntb_transport_list, entry) 344 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 345 if (!strncmp(dev_name(&client->dev), device_name, 346 strlen(device_name))) { 347 list_del(&client->entry); 348 device_unregister(&client->dev); 349 } 350 } 351 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); 352 353 /** 354 * ntb_transport_register_client_dev - Register NTB client device 355 * @device_name: Name of NTB client device 356 * 357 * Register an NTB client device with the NTB transport layer 358 */ 359 int ntb_transport_register_client_dev(char *device_name) 360 { 361 struct ntb_transport_client_dev *client_dev; 362 struct ntb_transport_ctx *nt; 363 int node; 364 int rc, i = 0; 365 366 if (list_empty(&ntb_transport_list)) 367 return -ENODEV; 368 369 list_for_each_entry(nt, &ntb_transport_list, entry) { 370 struct device *dev; 371 372 node = dev_to_node(&nt->ndev->dev); 373 374 client_dev = kzalloc_node(sizeof(*client_dev), 375 GFP_KERNEL, node); 376 if (!client_dev) { 377 rc = -ENOMEM; 378 goto err; 379 } 380 381 dev = &client_dev->dev; 382 383 /* setup and register client devices */ 384 dev_set_name(dev, "%s%d", device_name, i); 385 dev->bus = &ntb_transport_bus; 386 dev->release = ntb_transport_client_release; 387 dev->parent = &nt->ndev->dev; 388 389 rc = device_register(dev); 390 if (rc) { 391 kfree(client_dev); 392 goto err; 393 } 394 395 list_add_tail(&client_dev->entry, &nt->client_devs); 396 i++; 397 } 398 399 return 0; 400 401 err: 402 ntb_transport_unregister_client_dev(device_name); 403 404 return rc; 405 } 406 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); 407 408 /** 409 * ntb_transport_register_client - Register NTB client driver 410 * @drv: NTB client driver to be registered 411 * 412 * Register an NTB client driver with the NTB transport layer 413 * 414 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 415 */ 416 int ntb_transport_register_client(struct ntb_transport_client *drv) 417 { 418 drv->driver.bus = &ntb_transport_bus; 419 420 if (list_empty(&ntb_transport_list)) 421 return -ENODEV; 422 423 return driver_register(&drv->driver); 424 } 425 EXPORT_SYMBOL_GPL(ntb_transport_register_client); 426 427 /** 428 * ntb_transport_unregister_client - Unregister NTB client driver 429 * @drv: NTB client driver to be unregistered 430 * 431 * Unregister an NTB client driver with the NTB transport layer 432 * 433 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 434 */ 435 void ntb_transport_unregister_client(struct ntb_transport_client *drv) 436 { 437 driver_unregister(&drv->driver); 438 } 439 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); 440 441 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 442 loff_t *offp) 443 { 444 struct ntb_transport_qp *qp; 445 char *buf; 446 ssize_t ret, out_offset, out_count; 447 448 qp = filp->private_data; 449 450 if (!qp || !qp->link_is_up) 451 return 0; 452 453 out_count = 1000; 454 455 buf = kmalloc(out_count, GFP_KERNEL); 456 if (!buf) 457 return -ENOMEM; 458 459 out_offset = 0; 460 out_offset += snprintf(buf + out_offset, out_count - out_offset, 461 "\nNTB QP stats:\n\n"); 462 out_offset += snprintf(buf + out_offset, out_count - out_offset, 463 "rx_bytes - \t%llu\n", qp->rx_bytes); 464 out_offset += snprintf(buf + out_offset, out_count - out_offset, 465 "rx_pkts - \t%llu\n", qp->rx_pkts); 466 out_offset += snprintf(buf + out_offset, out_count - out_offset, 467 "rx_memcpy - \t%llu\n", qp->rx_memcpy); 468 out_offset += snprintf(buf + out_offset, out_count - out_offset, 469 "rx_async - \t%llu\n", qp->rx_async); 470 out_offset += snprintf(buf + out_offset, out_count - out_offset, 471 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 472 out_offset += snprintf(buf + out_offset, out_count - out_offset, 473 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 474 out_offset += snprintf(buf + out_offset, out_count - out_offset, 475 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 476 out_offset += snprintf(buf + out_offset, out_count - out_offset, 477 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 478 out_offset += snprintf(buf + out_offset, out_count - out_offset, 479 "rx_buff - \t0x%p\n", qp->rx_buff); 480 out_offset += snprintf(buf + out_offset, out_count - out_offset, 481 "rx_index - \t%u\n", qp->rx_index); 482 out_offset += snprintf(buf + out_offset, out_count - out_offset, 483 "rx_max_entry - \t%u\n\n", qp->rx_max_entry); 484 485 out_offset += snprintf(buf + out_offset, out_count - out_offset, 486 "tx_bytes - \t%llu\n", qp->tx_bytes); 487 out_offset += snprintf(buf + out_offset, out_count - out_offset, 488 "tx_pkts - \t%llu\n", qp->tx_pkts); 489 out_offset += snprintf(buf + out_offset, out_count - out_offset, 490 "tx_memcpy - \t%llu\n", qp->tx_memcpy); 491 out_offset += snprintf(buf + out_offset, out_count - out_offset, 492 "tx_async - \t%llu\n", qp->tx_async); 493 out_offset += snprintf(buf + out_offset, out_count - out_offset, 494 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 495 out_offset += snprintf(buf + out_offset, out_count - out_offset, 496 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 497 out_offset += snprintf(buf + out_offset, out_count - out_offset, 498 "tx_mw - \t0x%p\n", qp->tx_mw); 499 out_offset += snprintf(buf + out_offset, out_count - out_offset, 500 "tx_index (H) - \t%u\n", qp->tx_index); 501 out_offset += snprintf(buf + out_offset, out_count - out_offset, 502 "RRI (T) - \t%u\n", 503 qp->remote_rx_info->entry); 504 out_offset += snprintf(buf + out_offset, out_count - out_offset, 505 "tx_max_entry - \t%u\n", qp->tx_max_entry); 506 out_offset += snprintf(buf + out_offset, out_count - out_offset, 507 "free tx - \t%u\n", 508 ntb_transport_tx_free_entry(qp)); 509 out_offset += snprintf(buf + out_offset, out_count - out_offset, 510 "DMA tx prep err - \t%llu\n", 511 qp->dma_tx_prep_err); 512 out_offset += snprintf(buf + out_offset, out_count - out_offset, 513 "DMA rx prep err - \t%llu\n", 514 qp->dma_rx_prep_err); 515 516 out_offset += snprintf(buf + out_offset, out_count - out_offset, 517 "\n"); 518 out_offset += snprintf(buf + out_offset, out_count - out_offset, 519 "Using TX DMA - \t%s\n", 520 qp->tx_dma_chan ? "Yes" : "No"); 521 out_offset += snprintf(buf + out_offset, out_count - out_offset, 522 "Using RX DMA - \t%s\n", 523 qp->rx_dma_chan ? "Yes" : "No"); 524 out_offset += snprintf(buf + out_offset, out_count - out_offset, 525 "QP Link - \t%s\n", 526 qp->link_is_up ? "Up" : "Down"); 527 out_offset += snprintf(buf + out_offset, out_count - out_offset, 528 "\n"); 529 530 if (out_offset > out_count) 531 out_offset = out_count; 532 533 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 534 kfree(buf); 535 return ret; 536 } 537 538 static const struct file_operations ntb_qp_debugfs_stats = { 539 .owner = THIS_MODULE, 540 .open = simple_open, 541 .read = debugfs_read, 542 }; 543 544 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 545 struct list_head *list) 546 { 547 unsigned long flags; 548 549 spin_lock_irqsave(lock, flags); 550 list_add_tail(entry, list); 551 spin_unlock_irqrestore(lock, flags); 552 } 553 554 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 555 struct list_head *list) 556 { 557 struct ntb_queue_entry *entry; 558 unsigned long flags; 559 560 spin_lock_irqsave(lock, flags); 561 if (list_empty(list)) { 562 entry = NULL; 563 goto out; 564 } 565 entry = list_first_entry(list, struct ntb_queue_entry, entry); 566 list_del(&entry->entry); 567 568 out: 569 spin_unlock_irqrestore(lock, flags); 570 571 return entry; 572 } 573 574 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, 575 struct list_head *list, 576 struct list_head *to_list) 577 { 578 struct ntb_queue_entry *entry; 579 unsigned long flags; 580 581 spin_lock_irqsave(lock, flags); 582 583 if (list_empty(list)) { 584 entry = NULL; 585 } else { 586 entry = list_first_entry(list, struct ntb_queue_entry, entry); 587 list_move_tail(&entry->entry, to_list); 588 } 589 590 spin_unlock_irqrestore(lock, flags); 591 592 return entry; 593 } 594 595 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 596 unsigned int qp_num) 597 { 598 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 599 struct ntb_transport_mw *mw; 600 unsigned int rx_size, num_qps_mw; 601 unsigned int mw_num, mw_count, qp_count; 602 unsigned int i; 603 604 mw_count = nt->mw_count; 605 qp_count = nt->qp_count; 606 607 mw_num = QP_TO_MW(nt, qp_num); 608 mw = &nt->mw_vec[mw_num]; 609 610 if (!mw->virt_addr) 611 return -ENOMEM; 612 613 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 614 num_qps_mw = qp_count / mw_count + 1; 615 else 616 num_qps_mw = qp_count / mw_count; 617 618 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; 619 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); 620 rx_size -= sizeof(struct ntb_rx_info); 621 622 qp->remote_rx_info = qp->rx_buff + rx_size; 623 624 /* Due to housekeeping, there must be atleast 2 buffs */ 625 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 626 qp->rx_max_entry = rx_size / qp->rx_max_frame; 627 qp->rx_index = 0; 628 629 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 630 631 /* setup the hdr offsets with 0's */ 632 for (i = 0; i < qp->rx_max_entry; i++) { 633 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - 634 sizeof(struct ntb_payload_header)); 635 memset(offset, 0, sizeof(struct ntb_payload_header)); 636 } 637 638 qp->rx_pkts = 0; 639 qp->tx_pkts = 0; 640 qp->tx_index = 0; 641 642 return 0; 643 } 644 645 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 646 { 647 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 648 struct pci_dev *pdev = nt->ndev->pdev; 649 650 if (!mw->virt_addr) 651 return; 652 653 ntb_mw_clear_trans(nt->ndev, num_mw); 654 dma_free_coherent(&pdev->dev, mw->buff_size, 655 mw->virt_addr, mw->dma_addr); 656 mw->xlat_size = 0; 657 mw->buff_size = 0; 658 mw->virt_addr = NULL; 659 } 660 661 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 662 resource_size_t size) 663 { 664 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 665 struct pci_dev *pdev = nt->ndev->pdev; 666 size_t xlat_size, buff_size; 667 int rc; 668 669 if (!size) 670 return -EINVAL; 671 672 xlat_size = round_up(size, mw->xlat_align_size); 673 buff_size = round_up(size, mw->xlat_align); 674 675 /* No need to re-setup */ 676 if (mw->xlat_size == xlat_size) 677 return 0; 678 679 if (mw->buff_size) 680 ntb_free_mw(nt, num_mw); 681 682 /* Alloc memory for receiving data. Must be aligned */ 683 mw->xlat_size = xlat_size; 684 mw->buff_size = buff_size; 685 686 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size, 687 &mw->dma_addr, GFP_KERNEL); 688 if (!mw->virt_addr) { 689 mw->xlat_size = 0; 690 mw->buff_size = 0; 691 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", 692 buff_size); 693 return -ENOMEM; 694 } 695 696 /* 697 * we must ensure that the memory address allocated is BAR size 698 * aligned in order for the XLAT register to take the value. This 699 * is a requirement of the hardware. It is recommended to setup CMA 700 * for BAR sizes equal or greater than 4MB. 701 */ 702 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { 703 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", 704 &mw->dma_addr); 705 ntb_free_mw(nt, num_mw); 706 return -ENOMEM; 707 } 708 709 /* Notify HW the memory location of the receive buffer */ 710 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size); 711 if (rc) { 712 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); 713 ntb_free_mw(nt, num_mw); 714 return -EIO; 715 } 716 717 return 0; 718 } 719 720 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 721 { 722 qp->link_is_up = false; 723 qp->active = false; 724 725 qp->tx_index = 0; 726 qp->rx_index = 0; 727 qp->rx_bytes = 0; 728 qp->rx_pkts = 0; 729 qp->rx_ring_empty = 0; 730 qp->rx_err_no_buf = 0; 731 qp->rx_err_oflow = 0; 732 qp->rx_err_ver = 0; 733 qp->rx_memcpy = 0; 734 qp->rx_async = 0; 735 qp->tx_bytes = 0; 736 qp->tx_pkts = 0; 737 qp->tx_ring_full = 0; 738 qp->tx_err_no_buf = 0; 739 qp->tx_memcpy = 0; 740 qp->tx_async = 0; 741 qp->dma_tx_prep_err = 0; 742 qp->dma_rx_prep_err = 0; 743 } 744 745 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 746 { 747 struct ntb_transport_ctx *nt = qp->transport; 748 struct pci_dev *pdev = nt->ndev->pdev; 749 750 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); 751 752 cancel_delayed_work_sync(&qp->link_work); 753 ntb_qp_link_down_reset(qp); 754 755 if (qp->event_handler) 756 qp->event_handler(qp->cb_data, qp->link_is_up); 757 } 758 759 static void ntb_qp_link_cleanup_work(struct work_struct *work) 760 { 761 struct ntb_transport_qp *qp = container_of(work, 762 struct ntb_transport_qp, 763 link_cleanup); 764 struct ntb_transport_ctx *nt = qp->transport; 765 766 ntb_qp_link_cleanup(qp); 767 768 if (nt->link_is_up) 769 schedule_delayed_work(&qp->link_work, 770 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 771 } 772 773 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 774 { 775 schedule_work(&qp->link_cleanup); 776 } 777 778 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 779 { 780 struct ntb_transport_qp *qp; 781 u64 qp_bitmap_alloc; 782 int i; 783 784 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 785 786 /* Pass along the info to any clients */ 787 for (i = 0; i < nt->qp_count; i++) 788 if (qp_bitmap_alloc & BIT_ULL(i)) { 789 qp = &nt->qp_vec[i]; 790 ntb_qp_link_cleanup(qp); 791 cancel_work_sync(&qp->link_cleanup); 792 cancel_delayed_work_sync(&qp->link_work); 793 } 794 795 if (!nt->link_is_up) 796 cancel_delayed_work_sync(&nt->link_work); 797 798 /* The scratchpad registers keep the values if the remote side 799 * goes down, blast them now to give them a sane value the next 800 * time they are accessed 801 */ 802 for (i = 0; i < MAX_SPAD; i++) 803 ntb_spad_write(nt->ndev, i, 0); 804 } 805 806 static void ntb_transport_link_cleanup_work(struct work_struct *work) 807 { 808 struct ntb_transport_ctx *nt = 809 container_of(work, struct ntb_transport_ctx, link_cleanup); 810 811 ntb_transport_link_cleanup(nt); 812 } 813 814 static void ntb_transport_event_callback(void *data) 815 { 816 struct ntb_transport_ctx *nt = data; 817 818 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) 819 schedule_delayed_work(&nt->link_work, 0); 820 else 821 schedule_work(&nt->link_cleanup); 822 } 823 824 static void ntb_transport_link_work(struct work_struct *work) 825 { 826 struct ntb_transport_ctx *nt = 827 container_of(work, struct ntb_transport_ctx, link_work.work); 828 struct ntb_dev *ndev = nt->ndev; 829 struct pci_dev *pdev = ndev->pdev; 830 resource_size_t size; 831 u32 val; 832 int rc, i, spad; 833 834 /* send the local info, in the opposite order of the way we read it */ 835 for (i = 0; i < nt->mw_count; i++) { 836 size = nt->mw_vec[i].phys_size; 837 838 if (max_mw_size && size > max_mw_size) 839 size = max_mw_size; 840 841 spad = MW0_SZ_HIGH + (i * 2); 842 ntb_peer_spad_write(ndev, spad, upper_32_bits(size)); 843 844 spad = MW0_SZ_LOW + (i * 2); 845 ntb_peer_spad_write(ndev, spad, lower_32_bits(size)); 846 } 847 848 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count); 849 850 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count); 851 852 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION); 853 854 /* Query the remote side for its info */ 855 val = ntb_spad_read(ndev, VERSION); 856 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 857 if (val != NTB_TRANSPORT_VERSION) 858 goto out; 859 860 val = ntb_spad_read(ndev, NUM_QPS); 861 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 862 if (val != nt->qp_count) 863 goto out; 864 865 val = ntb_spad_read(ndev, NUM_MWS); 866 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 867 if (val != nt->mw_count) 868 goto out; 869 870 for (i = 0; i < nt->mw_count; i++) { 871 u64 val64; 872 873 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); 874 val64 = (u64)val << 32; 875 876 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); 877 val64 |= val; 878 879 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); 880 881 rc = ntb_set_mw(nt, i, val64); 882 if (rc) 883 goto out1; 884 } 885 886 nt->link_is_up = true; 887 888 for (i = 0; i < nt->qp_count; i++) { 889 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 890 891 ntb_transport_setup_qp_mw(nt, i); 892 893 if (qp->client_ready) 894 schedule_delayed_work(&qp->link_work, 0); 895 } 896 897 return; 898 899 out1: 900 for (i = 0; i < nt->mw_count; i++) 901 ntb_free_mw(nt, i); 902 out: 903 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 904 schedule_delayed_work(&nt->link_work, 905 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 906 } 907 908 static void ntb_qp_link_work(struct work_struct *work) 909 { 910 struct ntb_transport_qp *qp = container_of(work, 911 struct ntb_transport_qp, 912 link_work.work); 913 struct pci_dev *pdev = qp->ndev->pdev; 914 struct ntb_transport_ctx *nt = qp->transport; 915 int val; 916 917 WARN_ON(!nt->link_is_up); 918 919 val = ntb_spad_read(nt->ndev, QP_LINKS); 920 921 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); 922 923 /* query remote spad for qp ready bits */ 924 ntb_peer_spad_read(nt->ndev, QP_LINKS); 925 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); 926 927 /* See if the remote side is up */ 928 if (val & BIT(qp->qp_num)) { 929 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 930 qp->link_is_up = true; 931 qp->active = true; 932 933 if (qp->event_handler) 934 qp->event_handler(qp->cb_data, qp->link_is_up); 935 936 if (qp->active) 937 tasklet_schedule(&qp->rxc_db_work); 938 } else if (nt->link_is_up) 939 schedule_delayed_work(&qp->link_work, 940 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 941 } 942 943 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, 944 unsigned int qp_num) 945 { 946 struct ntb_transport_qp *qp; 947 phys_addr_t mw_base; 948 resource_size_t mw_size; 949 unsigned int num_qps_mw, tx_size; 950 unsigned int mw_num, mw_count, qp_count; 951 u64 qp_offset; 952 953 mw_count = nt->mw_count; 954 qp_count = nt->qp_count; 955 956 mw_num = QP_TO_MW(nt, qp_num); 957 958 qp = &nt->qp_vec[qp_num]; 959 qp->qp_num = qp_num; 960 qp->transport = nt; 961 qp->ndev = nt->ndev; 962 qp->client_ready = false; 963 qp->event_handler = NULL; 964 ntb_qp_link_down_reset(qp); 965 966 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 967 num_qps_mw = qp_count / mw_count + 1; 968 else 969 num_qps_mw = qp_count / mw_count; 970 971 mw_base = nt->mw_vec[mw_num].phys_addr; 972 mw_size = nt->mw_vec[mw_num].phys_size; 973 974 tx_size = (unsigned int)mw_size / num_qps_mw; 975 qp_offset = tx_size * (qp_num / mw_count); 976 977 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 978 if (!qp->tx_mw) 979 return -EINVAL; 980 981 qp->tx_mw_phys = mw_base + qp_offset; 982 if (!qp->tx_mw_phys) 983 return -EINVAL; 984 985 tx_size -= sizeof(struct ntb_rx_info); 986 qp->rx_info = qp->tx_mw + tx_size; 987 988 /* Due to housekeeping, there must be atleast 2 buffs */ 989 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 990 qp->tx_max_entry = tx_size / qp->tx_max_frame; 991 992 if (nt->debugfs_node_dir) { 993 char debugfs_name[4]; 994 995 snprintf(debugfs_name, 4, "qp%d", qp_num); 996 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 997 nt->debugfs_node_dir); 998 999 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 1000 qp->debugfs_dir, qp, 1001 &ntb_qp_debugfs_stats); 1002 } else { 1003 qp->debugfs_dir = NULL; 1004 qp->debugfs_stats = NULL; 1005 } 1006 1007 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 1008 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 1009 1010 spin_lock_init(&qp->ntb_rx_q_lock); 1011 spin_lock_init(&qp->ntb_tx_free_q_lock); 1012 1013 INIT_LIST_HEAD(&qp->rx_post_q); 1014 INIT_LIST_HEAD(&qp->rx_pend_q); 1015 INIT_LIST_HEAD(&qp->rx_free_q); 1016 INIT_LIST_HEAD(&qp->tx_free_q); 1017 1018 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, 1019 (unsigned long)qp); 1020 1021 return 0; 1022 } 1023 1024 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) 1025 { 1026 struct ntb_transport_ctx *nt; 1027 struct ntb_transport_mw *mw; 1028 unsigned int mw_count, qp_count; 1029 u64 qp_bitmap; 1030 int node; 1031 int rc, i; 1032 1033 if (ntb_db_is_unsafe(ndev)) 1034 dev_dbg(&ndev->dev, 1035 "doorbell is unsafe, proceed anyway...\n"); 1036 if (ntb_spad_is_unsafe(ndev)) 1037 dev_dbg(&ndev->dev, 1038 "scratchpad is unsafe, proceed anyway...\n"); 1039 1040 node = dev_to_node(&ndev->dev); 1041 1042 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); 1043 if (!nt) 1044 return -ENOMEM; 1045 1046 nt->ndev = ndev; 1047 1048 mw_count = ntb_mw_count(ndev); 1049 1050 nt->mw_count = mw_count; 1051 1052 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec), 1053 GFP_KERNEL, node); 1054 if (!nt->mw_vec) { 1055 rc = -ENOMEM; 1056 goto err; 1057 } 1058 1059 for (i = 0; i < mw_count; i++) { 1060 mw = &nt->mw_vec[i]; 1061 1062 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size, 1063 &mw->xlat_align, &mw->xlat_align_size); 1064 if (rc) 1065 goto err1; 1066 1067 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); 1068 if (!mw->vbase) { 1069 rc = -ENOMEM; 1070 goto err1; 1071 } 1072 1073 mw->buff_size = 0; 1074 mw->xlat_size = 0; 1075 mw->virt_addr = NULL; 1076 mw->dma_addr = 0; 1077 } 1078 1079 qp_bitmap = ntb_db_valid_mask(ndev); 1080 1081 qp_count = ilog2(qp_bitmap); 1082 if (max_num_clients && max_num_clients < qp_count) 1083 qp_count = max_num_clients; 1084 else if (mw_count < qp_count) 1085 qp_count = mw_count; 1086 1087 qp_bitmap &= BIT_ULL(qp_count) - 1; 1088 1089 nt->qp_count = qp_count; 1090 nt->qp_bitmap = qp_bitmap; 1091 nt->qp_bitmap_free = qp_bitmap; 1092 1093 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec), 1094 GFP_KERNEL, node); 1095 if (!nt->qp_vec) { 1096 rc = -ENOMEM; 1097 goto err1; 1098 } 1099 1100 if (nt_debugfs_dir) { 1101 nt->debugfs_node_dir = 1102 debugfs_create_dir(pci_name(ndev->pdev), 1103 nt_debugfs_dir); 1104 } 1105 1106 for (i = 0; i < qp_count; i++) { 1107 rc = ntb_transport_init_queue(nt, i); 1108 if (rc) 1109 goto err2; 1110 } 1111 1112 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 1113 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); 1114 1115 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); 1116 if (rc) 1117 goto err2; 1118 1119 INIT_LIST_HEAD(&nt->client_devs); 1120 rc = ntb_bus_init(nt); 1121 if (rc) 1122 goto err3; 1123 1124 nt->link_is_up = false; 1125 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1126 ntb_link_event(ndev); 1127 1128 return 0; 1129 1130 err3: 1131 ntb_clear_ctx(ndev); 1132 err2: 1133 kfree(nt->qp_vec); 1134 err1: 1135 while (i--) { 1136 mw = &nt->mw_vec[i]; 1137 iounmap(mw->vbase); 1138 } 1139 kfree(nt->mw_vec); 1140 err: 1141 kfree(nt); 1142 return rc; 1143 } 1144 1145 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) 1146 { 1147 struct ntb_transport_ctx *nt = ndev->ctx; 1148 struct ntb_transport_qp *qp; 1149 u64 qp_bitmap_alloc; 1150 int i; 1151 1152 ntb_transport_link_cleanup(nt); 1153 cancel_work_sync(&nt->link_cleanup); 1154 cancel_delayed_work_sync(&nt->link_work); 1155 1156 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 1157 1158 /* verify that all the qp's are freed */ 1159 for (i = 0; i < nt->qp_count; i++) { 1160 qp = &nt->qp_vec[i]; 1161 if (qp_bitmap_alloc & BIT_ULL(i)) 1162 ntb_transport_free_queue(qp); 1163 debugfs_remove_recursive(qp->debugfs_dir); 1164 } 1165 1166 ntb_link_disable(ndev); 1167 ntb_clear_ctx(ndev); 1168 1169 ntb_bus_remove(nt); 1170 1171 for (i = nt->mw_count; i--; ) { 1172 ntb_free_mw(nt, i); 1173 iounmap(nt->mw_vec[i].vbase); 1174 } 1175 1176 kfree(nt->qp_vec); 1177 kfree(nt->mw_vec); 1178 kfree(nt); 1179 } 1180 1181 static void ntb_complete_rxc(struct ntb_transport_qp *qp) 1182 { 1183 struct ntb_queue_entry *entry; 1184 void *cb_data; 1185 unsigned int len; 1186 unsigned long irqflags; 1187 1188 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1189 1190 while (!list_empty(&qp->rx_post_q)) { 1191 entry = list_first_entry(&qp->rx_post_q, 1192 struct ntb_queue_entry, entry); 1193 if (!(entry->flags & DESC_DONE_FLAG)) 1194 break; 1195 1196 entry->rx_hdr->flags = 0; 1197 iowrite32(entry->index, &qp->rx_info->entry); 1198 1199 cb_data = entry->cb_data; 1200 len = entry->len; 1201 1202 list_move_tail(&entry->entry, &qp->rx_free_q); 1203 1204 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1205 1206 if (qp->rx_handler && qp->client_ready) 1207 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1208 1209 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1210 } 1211 1212 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1213 } 1214 1215 static void ntb_rx_copy_callback(void *data) 1216 { 1217 struct ntb_queue_entry *entry = data; 1218 1219 entry->flags |= DESC_DONE_FLAG; 1220 1221 ntb_complete_rxc(entry->qp); 1222 } 1223 1224 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1225 { 1226 void *buf = entry->buf; 1227 size_t len = entry->len; 1228 1229 memcpy(buf, offset, len); 1230 1231 /* Ensure that the data is fully copied out before clearing the flag */ 1232 wmb(); 1233 1234 ntb_rx_copy_callback(entry); 1235 } 1236 1237 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1238 { 1239 struct dma_async_tx_descriptor *txd; 1240 struct ntb_transport_qp *qp = entry->qp; 1241 struct dma_chan *chan = qp->rx_dma_chan; 1242 struct dma_device *device; 1243 size_t pay_off, buff_off, len; 1244 struct dmaengine_unmap_data *unmap; 1245 dma_cookie_t cookie; 1246 void *buf = entry->buf; 1247 int retries = 0; 1248 1249 len = entry->len; 1250 1251 if (!chan) 1252 goto err; 1253 1254 if (len < copy_bytes) 1255 goto err; 1256 1257 device = chan->device; 1258 pay_off = (size_t)offset & ~PAGE_MASK; 1259 buff_off = (size_t)buf & ~PAGE_MASK; 1260 1261 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1262 goto err; 1263 1264 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1265 if (!unmap) 1266 goto err; 1267 1268 unmap->len = len; 1269 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1270 pay_off, len, DMA_TO_DEVICE); 1271 if (dma_mapping_error(device->dev, unmap->addr[0])) 1272 goto err_get_unmap; 1273 1274 unmap->to_cnt = 1; 1275 1276 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1277 buff_off, len, DMA_FROM_DEVICE); 1278 if (dma_mapping_error(device->dev, unmap->addr[1])) 1279 goto err_get_unmap; 1280 1281 unmap->from_cnt = 1; 1282 1283 for (retries = 0; retries < DMA_RETRIES; retries++) { 1284 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1285 unmap->addr[0], len, 1286 DMA_PREP_INTERRUPT); 1287 if (txd) 1288 break; 1289 1290 set_current_state(TASK_INTERRUPTIBLE); 1291 schedule_timeout(DMA_OUT_RESOURCE_TO); 1292 } 1293 1294 if (!txd) { 1295 qp->dma_rx_prep_err++; 1296 goto err_get_unmap; 1297 } 1298 1299 txd->callback = ntb_rx_copy_callback; 1300 txd->callback_param = entry; 1301 dma_set_unmap(txd, unmap); 1302 1303 cookie = dmaengine_submit(txd); 1304 if (dma_submit_error(cookie)) 1305 goto err_set_unmap; 1306 1307 dmaengine_unmap_put(unmap); 1308 1309 qp->last_cookie = cookie; 1310 1311 qp->rx_async++; 1312 1313 return; 1314 1315 err_set_unmap: 1316 dmaengine_unmap_put(unmap); 1317 err_get_unmap: 1318 dmaengine_unmap_put(unmap); 1319 err: 1320 ntb_memcpy_rx(entry, offset); 1321 qp->rx_memcpy++; 1322 } 1323 1324 static int ntb_process_rxc(struct ntb_transport_qp *qp) 1325 { 1326 struct ntb_payload_header *hdr; 1327 struct ntb_queue_entry *entry; 1328 void *offset; 1329 1330 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1331 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1332 1333 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", 1334 qp->qp_num, hdr->ver, hdr->len, hdr->flags); 1335 1336 if (!(hdr->flags & DESC_DONE_FLAG)) { 1337 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); 1338 qp->rx_ring_empty++; 1339 return -EAGAIN; 1340 } 1341 1342 if (hdr->flags & LINK_DOWN_FLAG) { 1343 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); 1344 ntb_qp_link_down(qp); 1345 hdr->flags = 0; 1346 return -EAGAIN; 1347 } 1348 1349 if (hdr->ver != (u32)qp->rx_pkts) { 1350 dev_dbg(&qp->ndev->pdev->dev, 1351 "version mismatch, expected %llu - got %u\n", 1352 qp->rx_pkts, hdr->ver); 1353 qp->rx_err_ver++; 1354 return -EIO; 1355 } 1356 1357 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 1358 if (!entry) { 1359 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1360 qp->rx_err_no_buf++; 1361 return -EAGAIN; 1362 } 1363 1364 entry->rx_hdr = hdr; 1365 entry->index = qp->rx_index; 1366 1367 if (hdr->len > entry->len) { 1368 dev_dbg(&qp->ndev->pdev->dev, 1369 "receive buffer overflow! Wanted %d got %d\n", 1370 hdr->len, entry->len); 1371 qp->rx_err_oflow++; 1372 1373 entry->len = -EIO; 1374 entry->flags |= DESC_DONE_FLAG; 1375 1376 ntb_complete_rxc(qp); 1377 } else { 1378 dev_dbg(&qp->ndev->pdev->dev, 1379 "RX OK index %u ver %u size %d into buf size %d\n", 1380 qp->rx_index, hdr->ver, hdr->len, entry->len); 1381 1382 qp->rx_bytes += hdr->len; 1383 qp->rx_pkts++; 1384 1385 entry->len = hdr->len; 1386 1387 ntb_async_rx(entry, offset); 1388 } 1389 1390 qp->rx_index++; 1391 qp->rx_index %= qp->rx_max_entry; 1392 1393 return 0; 1394 } 1395 1396 static void ntb_transport_rxc_db(unsigned long data) 1397 { 1398 struct ntb_transport_qp *qp = (void *)data; 1399 int rc, i; 1400 1401 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", 1402 __func__, qp->qp_num); 1403 1404 /* Limit the number of packets processed in a single interrupt to 1405 * provide fairness to others 1406 */ 1407 for (i = 0; i < qp->rx_max_entry; i++) { 1408 rc = ntb_process_rxc(qp); 1409 if (rc) 1410 break; 1411 } 1412 1413 if (i && qp->rx_dma_chan) 1414 dma_async_issue_pending(qp->rx_dma_chan); 1415 1416 if (i == qp->rx_max_entry) { 1417 /* there is more work to do */ 1418 if (qp->active) 1419 tasklet_schedule(&qp->rxc_db_work); 1420 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1421 /* the doorbell bit is set: clear it */ 1422 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); 1423 /* ntb_db_read ensures ntb_db_clear write is committed */ 1424 ntb_db_read(qp->ndev); 1425 1426 /* an interrupt may have arrived between finishing 1427 * ntb_process_rxc and clearing the doorbell bit: 1428 * there might be some more work to do. 1429 */ 1430 if (qp->active) 1431 tasklet_schedule(&qp->rxc_db_work); 1432 } 1433 } 1434 1435 static void ntb_tx_copy_callback(void *data) 1436 { 1437 struct ntb_queue_entry *entry = data; 1438 struct ntb_transport_qp *qp = entry->qp; 1439 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1440 1441 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1442 1443 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1444 1445 /* The entry length can only be zero if the packet is intended to be a 1446 * "link down" or similar. Since no payload is being sent in these 1447 * cases, there is nothing to add to the completion queue. 1448 */ 1449 if (entry->len > 0) { 1450 qp->tx_bytes += entry->len; 1451 1452 if (qp->tx_handler) 1453 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1454 entry->len); 1455 } 1456 1457 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1458 } 1459 1460 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) 1461 { 1462 #ifdef ARCH_HAS_NOCACHE_UACCESS 1463 /* 1464 * Using non-temporal mov to improve performance on non-cached 1465 * writes, even though we aren't actually copying from user space. 1466 */ 1467 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); 1468 #else 1469 memcpy_toio(offset, entry->buf, entry->len); 1470 #endif 1471 1472 /* Ensure that the data is fully copied out before setting the flags */ 1473 wmb(); 1474 1475 ntb_tx_copy_callback(entry); 1476 } 1477 1478 static void ntb_async_tx(struct ntb_transport_qp *qp, 1479 struct ntb_queue_entry *entry) 1480 { 1481 struct ntb_payload_header __iomem *hdr; 1482 struct dma_async_tx_descriptor *txd; 1483 struct dma_chan *chan = qp->tx_dma_chan; 1484 struct dma_device *device; 1485 size_t dest_off, buff_off; 1486 struct dmaengine_unmap_data *unmap; 1487 dma_addr_t dest; 1488 dma_cookie_t cookie; 1489 void __iomem *offset; 1490 size_t len = entry->len; 1491 void *buf = entry->buf; 1492 int retries = 0; 1493 1494 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1495 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1496 entry->tx_hdr = hdr; 1497 1498 iowrite32(entry->len, &hdr->len); 1499 iowrite32((u32)qp->tx_pkts, &hdr->ver); 1500 1501 if (!chan) 1502 goto err; 1503 1504 if (len < copy_bytes) 1505 goto err; 1506 1507 device = chan->device; 1508 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; 1509 buff_off = (size_t)buf & ~PAGE_MASK; 1510 dest_off = (size_t)dest & ~PAGE_MASK; 1511 1512 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1513 goto err; 1514 1515 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); 1516 if (!unmap) 1517 goto err; 1518 1519 unmap->len = len; 1520 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), 1521 buff_off, len, DMA_TO_DEVICE); 1522 if (dma_mapping_error(device->dev, unmap->addr[0])) 1523 goto err_get_unmap; 1524 1525 unmap->to_cnt = 1; 1526 1527 for (retries = 0; retries < DMA_RETRIES; retries++) { 1528 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], 1529 len, DMA_PREP_INTERRUPT); 1530 if (txd) 1531 break; 1532 1533 set_current_state(TASK_INTERRUPTIBLE); 1534 schedule_timeout(DMA_OUT_RESOURCE_TO); 1535 } 1536 1537 if (!txd) { 1538 qp->dma_tx_prep_err++; 1539 goto err_get_unmap; 1540 } 1541 1542 txd->callback = ntb_tx_copy_callback; 1543 txd->callback_param = entry; 1544 dma_set_unmap(txd, unmap); 1545 1546 cookie = dmaengine_submit(txd); 1547 if (dma_submit_error(cookie)) 1548 goto err_set_unmap; 1549 1550 dmaengine_unmap_put(unmap); 1551 1552 dma_async_issue_pending(chan); 1553 qp->tx_async++; 1554 1555 return; 1556 err_set_unmap: 1557 dmaengine_unmap_put(unmap); 1558 err_get_unmap: 1559 dmaengine_unmap_put(unmap); 1560 err: 1561 ntb_memcpy_tx(entry, offset); 1562 qp->tx_memcpy++; 1563 } 1564 1565 static int ntb_process_tx(struct ntb_transport_qp *qp, 1566 struct ntb_queue_entry *entry) 1567 { 1568 if (qp->tx_index == qp->remote_rx_info->entry) { 1569 qp->tx_ring_full++; 1570 return -EAGAIN; 1571 } 1572 1573 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1574 if (qp->tx_handler) 1575 qp->tx_handler(qp, qp->cb_data, NULL, -EIO); 1576 1577 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1578 &qp->tx_free_q); 1579 return 0; 1580 } 1581 1582 ntb_async_tx(qp, entry); 1583 1584 qp->tx_index++; 1585 qp->tx_index %= qp->tx_max_entry; 1586 1587 qp->tx_pkts++; 1588 1589 return 0; 1590 } 1591 1592 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1593 { 1594 struct pci_dev *pdev = qp->ndev->pdev; 1595 struct ntb_queue_entry *entry; 1596 int i, rc; 1597 1598 if (!qp->link_is_up) 1599 return; 1600 1601 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); 1602 1603 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1604 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1605 if (entry) 1606 break; 1607 msleep(100); 1608 } 1609 1610 if (!entry) 1611 return; 1612 1613 entry->cb_data = NULL; 1614 entry->buf = NULL; 1615 entry->len = 0; 1616 entry->flags = LINK_DOWN_FLAG; 1617 1618 rc = ntb_process_tx(qp, entry); 1619 if (rc) 1620 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1621 qp->qp_num); 1622 1623 ntb_qp_link_down_reset(qp); 1624 } 1625 1626 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) 1627 { 1628 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; 1629 } 1630 1631 /** 1632 * ntb_transport_create_queue - Create a new NTB transport layer queue 1633 * @rx_handler: receive callback function 1634 * @tx_handler: transmit callback function 1635 * @event_handler: event callback function 1636 * 1637 * Create a new NTB transport layer queue and provide the queue with a callback 1638 * routine for both transmit and receive. The receive callback routine will be 1639 * used to pass up data when the transport has received it on the queue. The 1640 * transmit callback routine will be called when the transport has completed the 1641 * transmission of the data on the queue and the data is ready to be freed. 1642 * 1643 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1644 */ 1645 struct ntb_transport_qp * 1646 ntb_transport_create_queue(void *data, struct device *client_dev, 1647 const struct ntb_queue_handlers *handlers) 1648 { 1649 struct ntb_dev *ndev; 1650 struct pci_dev *pdev; 1651 struct ntb_transport_ctx *nt; 1652 struct ntb_queue_entry *entry; 1653 struct ntb_transport_qp *qp; 1654 u64 qp_bit; 1655 unsigned int free_queue; 1656 dma_cap_mask_t dma_mask; 1657 int node; 1658 int i; 1659 1660 ndev = dev_ntb(client_dev->parent); 1661 pdev = ndev->pdev; 1662 nt = ndev->ctx; 1663 1664 node = dev_to_node(&ndev->dev); 1665 1666 free_queue = ffs(nt->qp_bitmap); 1667 if (!free_queue) 1668 goto err; 1669 1670 /* decrement free_queue to make it zero based */ 1671 free_queue--; 1672 1673 qp = &nt->qp_vec[free_queue]; 1674 qp_bit = BIT_ULL(qp->qp_num); 1675 1676 nt->qp_bitmap_free &= ~qp_bit; 1677 1678 qp->cb_data = data; 1679 qp->rx_handler = handlers->rx_handler; 1680 qp->tx_handler = handlers->tx_handler; 1681 qp->event_handler = handlers->event_handler; 1682 1683 dma_cap_zero(dma_mask); 1684 dma_cap_set(DMA_MEMCPY, dma_mask); 1685 1686 if (use_dma) { 1687 qp->tx_dma_chan = 1688 dma_request_channel(dma_mask, ntb_dma_filter_fn, 1689 (void *)(unsigned long)node); 1690 if (!qp->tx_dma_chan) 1691 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n"); 1692 1693 qp->rx_dma_chan = 1694 dma_request_channel(dma_mask, ntb_dma_filter_fn, 1695 (void *)(unsigned long)node); 1696 if (!qp->rx_dma_chan) 1697 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n"); 1698 } else { 1699 qp->tx_dma_chan = NULL; 1700 qp->rx_dma_chan = NULL; 1701 } 1702 1703 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", 1704 qp->tx_dma_chan ? "DMA" : "CPU"); 1705 1706 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n", 1707 qp->rx_dma_chan ? "DMA" : "CPU"); 1708 1709 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1710 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1711 if (!entry) 1712 goto err1; 1713 1714 entry->qp = qp; 1715 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 1716 &qp->rx_free_q); 1717 } 1718 1719 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1720 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1721 if (!entry) 1722 goto err2; 1723 1724 entry->qp = qp; 1725 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1726 &qp->tx_free_q); 1727 } 1728 1729 ntb_db_clear(qp->ndev, qp_bit); 1730 ntb_db_clear_mask(qp->ndev, qp_bit); 1731 1732 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1733 1734 return qp; 1735 1736 err2: 1737 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1738 kfree(entry); 1739 err1: 1740 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1741 kfree(entry); 1742 if (qp->tx_dma_chan) 1743 dma_release_channel(qp->tx_dma_chan); 1744 if (qp->rx_dma_chan) 1745 dma_release_channel(qp->rx_dma_chan); 1746 nt->qp_bitmap_free |= qp_bit; 1747 err: 1748 return NULL; 1749 } 1750 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 1751 1752 /** 1753 * ntb_transport_free_queue - Frees NTB transport queue 1754 * @qp: NTB queue to be freed 1755 * 1756 * Frees NTB transport queue 1757 */ 1758 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1759 { 1760 struct pci_dev *pdev; 1761 struct ntb_queue_entry *entry; 1762 u64 qp_bit; 1763 1764 if (!qp) 1765 return; 1766 1767 pdev = qp->ndev->pdev; 1768 1769 qp->active = false; 1770 1771 if (qp->tx_dma_chan) { 1772 struct dma_chan *chan = qp->tx_dma_chan; 1773 /* Putting the dma_chan to NULL will force any new traffic to be 1774 * processed by the CPU instead of the DAM engine 1775 */ 1776 qp->tx_dma_chan = NULL; 1777 1778 /* Try to be nice and wait for any queued DMA engine 1779 * transactions to process before smashing it with a rock 1780 */ 1781 dma_sync_wait(chan, qp->last_cookie); 1782 dmaengine_terminate_all(chan); 1783 dma_release_channel(chan); 1784 } 1785 1786 if (qp->rx_dma_chan) { 1787 struct dma_chan *chan = qp->rx_dma_chan; 1788 /* Putting the dma_chan to NULL will force any new traffic to be 1789 * processed by the CPU instead of the DAM engine 1790 */ 1791 qp->rx_dma_chan = NULL; 1792 1793 /* Try to be nice and wait for any queued DMA engine 1794 * transactions to process before smashing it with a rock 1795 */ 1796 dma_sync_wait(chan, qp->last_cookie); 1797 dmaengine_terminate_all(chan); 1798 dma_release_channel(chan); 1799 } 1800 1801 qp_bit = BIT_ULL(qp->qp_num); 1802 1803 ntb_db_set_mask(qp->ndev, qp_bit); 1804 tasklet_kill(&qp->rxc_db_work); 1805 1806 cancel_delayed_work_sync(&qp->link_work); 1807 1808 qp->cb_data = NULL; 1809 qp->rx_handler = NULL; 1810 qp->tx_handler = NULL; 1811 qp->event_handler = NULL; 1812 1813 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1814 kfree(entry); 1815 1816 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { 1817 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); 1818 kfree(entry); 1819 } 1820 1821 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { 1822 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); 1823 kfree(entry); 1824 } 1825 1826 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1827 kfree(entry); 1828 1829 qp->transport->qp_bitmap_free |= qp_bit; 1830 1831 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1832 } 1833 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 1834 1835 /** 1836 * ntb_transport_rx_remove - Dequeues enqueued rx packet 1837 * @qp: NTB queue to be freed 1838 * @len: pointer to variable to write enqueued buffers length 1839 * 1840 * Dequeues unused buffers from receive queue. Should only be used during 1841 * shutdown of qp. 1842 * 1843 * RETURNS: NULL error value on error, or void* for success. 1844 */ 1845 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 1846 { 1847 struct ntb_queue_entry *entry; 1848 void *buf; 1849 1850 if (!qp || qp->client_ready) 1851 return NULL; 1852 1853 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); 1854 if (!entry) 1855 return NULL; 1856 1857 buf = entry->cb_data; 1858 *len = entry->len; 1859 1860 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); 1861 1862 return buf; 1863 } 1864 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 1865 1866 /** 1867 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 1868 * @qp: NTB transport layer queue the entry is to be enqueued on 1869 * @cb: per buffer pointer for callback function to use 1870 * @data: pointer to data buffer that incoming packets will be copied into 1871 * @len: length of the data buffer 1872 * 1873 * Enqueue a new receive buffer onto the transport queue into which a NTB 1874 * payload can be received into. 1875 * 1876 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1877 */ 1878 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1879 unsigned int len) 1880 { 1881 struct ntb_queue_entry *entry; 1882 1883 if (!qp) 1884 return -EINVAL; 1885 1886 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); 1887 if (!entry) 1888 return -ENOMEM; 1889 1890 entry->cb_data = cb; 1891 entry->buf = data; 1892 entry->len = len; 1893 entry->flags = 0; 1894 1895 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 1896 1897 if (qp->active) 1898 tasklet_schedule(&qp->rxc_db_work); 1899 1900 return 0; 1901 } 1902 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 1903 1904 /** 1905 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 1906 * @qp: NTB transport layer queue the entry is to be enqueued on 1907 * @cb: per buffer pointer for callback function to use 1908 * @data: pointer to data buffer that will be sent 1909 * @len: length of the data buffer 1910 * 1911 * Enqueue a new transmit buffer onto the transport queue from which a NTB 1912 * payload will be transmitted. This assumes that a lock is being held to 1913 * serialize access to the qp. 1914 * 1915 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1916 */ 1917 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1918 unsigned int len) 1919 { 1920 struct ntb_queue_entry *entry; 1921 int rc; 1922 1923 if (!qp || !qp->link_is_up || !len) 1924 return -EINVAL; 1925 1926 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1927 if (!entry) { 1928 qp->tx_err_no_buf++; 1929 return -EBUSY; 1930 } 1931 1932 entry->cb_data = cb; 1933 entry->buf = data; 1934 entry->len = len; 1935 entry->flags = 0; 1936 1937 rc = ntb_process_tx(qp, entry); 1938 if (rc) 1939 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1940 &qp->tx_free_q); 1941 1942 return rc; 1943 } 1944 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 1945 1946 /** 1947 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 1948 * @qp: NTB transport layer queue to be enabled 1949 * 1950 * Notify NTB transport layer of client readiness to use queue 1951 */ 1952 void ntb_transport_link_up(struct ntb_transport_qp *qp) 1953 { 1954 if (!qp) 1955 return; 1956 1957 qp->client_ready = true; 1958 1959 if (qp->transport->link_is_up) 1960 schedule_delayed_work(&qp->link_work, 0); 1961 } 1962 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 1963 1964 /** 1965 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1966 * @qp: NTB transport layer queue to be disabled 1967 * 1968 * Notify NTB transport layer of client's desire to no longer receive data on 1969 * transport queue specified. It is the client's responsibility to ensure all 1970 * entries on queue are purged or otherwise handled appropriately. 1971 */ 1972 void ntb_transport_link_down(struct ntb_transport_qp *qp) 1973 { 1974 int val; 1975 1976 if (!qp) 1977 return; 1978 1979 qp->client_ready = false; 1980 1981 val = ntb_spad_read(qp->ndev, QP_LINKS); 1982 1983 ntb_peer_spad_write(qp->ndev, QP_LINKS, 1984 val & ~BIT(qp->qp_num)); 1985 1986 if (qp->link_is_up) 1987 ntb_send_link_down(qp); 1988 else 1989 cancel_delayed_work_sync(&qp->link_work); 1990 } 1991 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 1992 1993 /** 1994 * ntb_transport_link_query - Query transport link state 1995 * @qp: NTB transport layer queue to be queried 1996 * 1997 * Query connectivity to the remote system of the NTB transport queue 1998 * 1999 * RETURNS: true for link up or false for link down 2000 */ 2001 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 2002 { 2003 if (!qp) 2004 return false; 2005 2006 return qp->link_is_up; 2007 } 2008 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 2009 2010 /** 2011 * ntb_transport_qp_num - Query the qp number 2012 * @qp: NTB transport layer queue to be queried 2013 * 2014 * Query qp number of the NTB transport queue 2015 * 2016 * RETURNS: a zero based number specifying the qp number 2017 */ 2018 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 2019 { 2020 if (!qp) 2021 return 0; 2022 2023 return qp->qp_num; 2024 } 2025 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 2026 2027 /** 2028 * ntb_transport_max_size - Query the max payload size of a qp 2029 * @qp: NTB transport layer queue to be queried 2030 * 2031 * Query the maximum payload size permissible on the given qp 2032 * 2033 * RETURNS: the max payload size of a qp 2034 */ 2035 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 2036 { 2037 unsigned int max_size; 2038 unsigned int copy_align; 2039 struct dma_chan *rx_chan, *tx_chan; 2040 2041 if (!qp) 2042 return 0; 2043 2044 rx_chan = qp->rx_dma_chan; 2045 tx_chan = qp->tx_dma_chan; 2046 2047 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0, 2048 tx_chan ? tx_chan->device->copy_align : 0); 2049 2050 /* If DMA engine usage is possible, try to find the max size for that */ 2051 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); 2052 max_size = round_down(max_size, 1 << copy_align); 2053 2054 return max_size; 2055 } 2056 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 2057 2058 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) 2059 { 2060 unsigned int head = qp->tx_index; 2061 unsigned int tail = qp->remote_rx_info->entry; 2062 2063 return tail > head ? tail - head : qp->tx_max_entry + tail - head; 2064 } 2065 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry); 2066 2067 static void ntb_transport_doorbell_callback(void *data, int vector) 2068 { 2069 struct ntb_transport_ctx *nt = data; 2070 struct ntb_transport_qp *qp; 2071 u64 db_bits; 2072 unsigned int qp_num; 2073 2074 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 2075 ntb_db_vector_mask(nt->ndev, vector)); 2076 2077 while (db_bits) { 2078 qp_num = __ffs(db_bits); 2079 qp = &nt->qp_vec[qp_num]; 2080 2081 if (qp->active) 2082 tasklet_schedule(&qp->rxc_db_work); 2083 2084 db_bits &= ~BIT_ULL(qp_num); 2085 } 2086 } 2087 2088 static const struct ntb_ctx_ops ntb_transport_ops = { 2089 .link_event = ntb_transport_event_callback, 2090 .db_event = ntb_transport_doorbell_callback, 2091 }; 2092 2093 static struct ntb_client ntb_transport_client = { 2094 .ops = { 2095 .probe = ntb_transport_probe, 2096 .remove = ntb_transport_free, 2097 }, 2098 }; 2099 2100 static int __init ntb_transport_init(void) 2101 { 2102 int rc; 2103 2104 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER); 2105 2106 if (debugfs_initialized()) 2107 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 2108 2109 rc = bus_register(&ntb_transport_bus); 2110 if (rc) 2111 goto err_bus; 2112 2113 rc = ntb_register_client(&ntb_transport_client); 2114 if (rc) 2115 goto err_client; 2116 2117 return 0; 2118 2119 err_client: 2120 bus_unregister(&ntb_transport_bus); 2121 err_bus: 2122 debugfs_remove_recursive(nt_debugfs_dir); 2123 return rc; 2124 } 2125 module_init(ntb_transport_init); 2126 2127 static void __exit ntb_transport_exit(void) 2128 { 2129 debugfs_remove_recursive(nt_debugfs_dir); 2130 2131 ntb_unregister_client(&ntb_transport_client); 2132 bus_unregister(&ntb_transport_bus); 2133 } 2134 module_exit(ntb_transport_exit); 2135