1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Transport Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/debugfs.h> 51 #include <linux/delay.h> 52 #include <linux/dmaengine.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/errno.h> 55 #include <linux/export.h> 56 #include <linux/interrupt.h> 57 #include <linux/module.h> 58 #include <linux/pci.h> 59 #include <linux/slab.h> 60 #include <linux/types.h> 61 #include <linux/uaccess.h> 62 #include "linux/ntb.h" 63 #include "linux/ntb_transport.h" 64 65 #define NTB_TRANSPORT_VERSION 4 66 #define NTB_TRANSPORT_VER "4" 67 #define NTB_TRANSPORT_NAME "ntb_transport" 68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" 69 #define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2) 70 71 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); 72 MODULE_VERSION(NTB_TRANSPORT_VER); 73 MODULE_LICENSE("Dual BSD/GPL"); 74 MODULE_AUTHOR("Intel Corporation"); 75 76 static unsigned long max_mw_size; 77 module_param(max_mw_size, ulong, 0644); 78 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); 79 80 static unsigned int transport_mtu = 0x10000; 81 module_param(transport_mtu, uint, 0644); 82 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 83 84 static unsigned char max_num_clients; 85 module_param(max_num_clients, byte, 0644); 86 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 87 88 static unsigned int copy_bytes = 1024; 89 module_param(copy_bytes, uint, 0644); 90 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 91 92 static bool use_dma; 93 module_param(use_dma, bool, 0644); 94 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); 95 96 static struct dentry *nt_debugfs_dir; 97 98 struct ntb_queue_entry { 99 /* ntb_queue list reference */ 100 struct list_head entry; 101 /* pointers to data to be transferred */ 102 void *cb_data; 103 void *buf; 104 unsigned int len; 105 unsigned int flags; 106 int retries; 107 int errors; 108 unsigned int tx_index; 109 unsigned int rx_index; 110 111 struct ntb_transport_qp *qp; 112 union { 113 struct ntb_payload_header __iomem *tx_hdr; 114 struct ntb_payload_header *rx_hdr; 115 }; 116 }; 117 118 struct ntb_rx_info { 119 unsigned int entry; 120 }; 121 122 struct ntb_transport_qp { 123 struct ntb_transport_ctx *transport; 124 struct ntb_dev *ndev; 125 void *cb_data; 126 struct dma_chan *tx_dma_chan; 127 struct dma_chan *rx_dma_chan; 128 129 bool client_ready; 130 bool link_is_up; 131 bool active; 132 133 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 134 u64 qp_bit; 135 136 struct ntb_rx_info __iomem *rx_info; 137 struct ntb_rx_info *remote_rx_info; 138 139 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 140 void *data, int len); 141 struct list_head tx_free_q; 142 spinlock_t ntb_tx_free_q_lock; 143 void __iomem *tx_mw; 144 dma_addr_t tx_mw_phys; 145 unsigned int tx_index; 146 unsigned int tx_max_entry; 147 unsigned int tx_max_frame; 148 149 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 150 void *data, int len); 151 struct list_head rx_post_q; 152 struct list_head rx_pend_q; 153 struct list_head rx_free_q; 154 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 155 spinlock_t ntb_rx_q_lock; 156 void *rx_buff; 157 unsigned int rx_index; 158 unsigned int rx_max_entry; 159 unsigned int rx_max_frame; 160 unsigned int rx_alloc_entry; 161 dma_cookie_t last_cookie; 162 struct tasklet_struct rxc_db_work; 163 164 void (*event_handler)(void *data, int status); 165 struct delayed_work link_work; 166 struct work_struct link_cleanup; 167 168 struct dentry *debugfs_dir; 169 struct dentry *debugfs_stats; 170 171 /* Stats */ 172 u64 rx_bytes; 173 u64 rx_pkts; 174 u64 rx_ring_empty; 175 u64 rx_err_no_buf; 176 u64 rx_err_oflow; 177 u64 rx_err_ver; 178 u64 rx_memcpy; 179 u64 rx_async; 180 u64 tx_bytes; 181 u64 tx_pkts; 182 u64 tx_ring_full; 183 u64 tx_err_no_buf; 184 u64 tx_memcpy; 185 u64 tx_async; 186 }; 187 188 struct ntb_transport_mw { 189 phys_addr_t phys_addr; 190 resource_size_t phys_size; 191 resource_size_t xlat_align; 192 resource_size_t xlat_align_size; 193 void __iomem *vbase; 194 size_t xlat_size; 195 size_t buff_size; 196 void *virt_addr; 197 dma_addr_t dma_addr; 198 }; 199 200 struct ntb_transport_client_dev { 201 struct list_head entry; 202 struct ntb_transport_ctx *nt; 203 struct device dev; 204 }; 205 206 struct ntb_transport_ctx { 207 struct list_head entry; 208 struct list_head client_devs; 209 210 struct ntb_dev *ndev; 211 212 struct ntb_transport_mw *mw_vec; 213 struct ntb_transport_qp *qp_vec; 214 unsigned int mw_count; 215 unsigned int qp_count; 216 u64 qp_bitmap; 217 u64 qp_bitmap_free; 218 219 bool link_is_up; 220 struct delayed_work link_work; 221 struct work_struct link_cleanup; 222 223 struct dentry *debugfs_node_dir; 224 }; 225 226 enum { 227 DESC_DONE_FLAG = BIT(0), 228 LINK_DOWN_FLAG = BIT(1), 229 }; 230 231 struct ntb_payload_header { 232 unsigned int ver; 233 unsigned int len; 234 unsigned int flags; 235 }; 236 237 enum { 238 VERSION = 0, 239 QP_LINKS, 240 NUM_QPS, 241 NUM_MWS, 242 MW0_SZ_HIGH, 243 MW0_SZ_LOW, 244 }; 245 246 #define dev_client_dev(__dev) \ 247 container_of((__dev), struct ntb_transport_client_dev, dev) 248 249 #define drv_client(__drv) \ 250 container_of((__drv), struct ntb_transport_client, driver) 251 252 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 253 #define NTB_QP_DEF_NUM_ENTRIES 100 254 #define NTB_LINK_DOWN_TIMEOUT 10 255 256 static void ntb_transport_rxc_db(unsigned long data); 257 static const struct ntb_ctx_ops ntb_transport_ops; 258 static struct ntb_client ntb_transport_client; 259 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 260 struct ntb_queue_entry *entry); 261 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); 262 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset); 263 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset); 264 265 266 static int ntb_transport_bus_match(struct device *dev, 267 struct device_driver *drv) 268 { 269 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 270 } 271 272 static int ntb_transport_bus_probe(struct device *dev) 273 { 274 const struct ntb_transport_client *client; 275 int rc = -EINVAL; 276 277 get_device(dev); 278 279 client = drv_client(dev->driver); 280 rc = client->probe(dev); 281 if (rc) 282 put_device(dev); 283 284 return rc; 285 } 286 287 static int ntb_transport_bus_remove(struct device *dev) 288 { 289 const struct ntb_transport_client *client; 290 291 client = drv_client(dev->driver); 292 client->remove(dev); 293 294 put_device(dev); 295 296 return 0; 297 } 298 299 static struct bus_type ntb_transport_bus = { 300 .name = "ntb_transport", 301 .match = ntb_transport_bus_match, 302 .probe = ntb_transport_bus_probe, 303 .remove = ntb_transport_bus_remove, 304 }; 305 306 static LIST_HEAD(ntb_transport_list); 307 308 static int ntb_bus_init(struct ntb_transport_ctx *nt) 309 { 310 list_add_tail(&nt->entry, &ntb_transport_list); 311 return 0; 312 } 313 314 static void ntb_bus_remove(struct ntb_transport_ctx *nt) 315 { 316 struct ntb_transport_client_dev *client_dev, *cd; 317 318 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 319 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 320 dev_name(&client_dev->dev)); 321 list_del(&client_dev->entry); 322 device_unregister(&client_dev->dev); 323 } 324 325 list_del(&nt->entry); 326 } 327 328 static void ntb_transport_client_release(struct device *dev) 329 { 330 struct ntb_transport_client_dev *client_dev; 331 332 client_dev = dev_client_dev(dev); 333 kfree(client_dev); 334 } 335 336 /** 337 * ntb_transport_unregister_client_dev - Unregister NTB client device 338 * @device_name: Name of NTB client device 339 * 340 * Unregister an NTB client device with the NTB transport layer 341 */ 342 void ntb_transport_unregister_client_dev(char *device_name) 343 { 344 struct ntb_transport_client_dev *client, *cd; 345 struct ntb_transport_ctx *nt; 346 347 list_for_each_entry(nt, &ntb_transport_list, entry) 348 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 349 if (!strncmp(dev_name(&client->dev), device_name, 350 strlen(device_name))) { 351 list_del(&client->entry); 352 device_unregister(&client->dev); 353 } 354 } 355 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); 356 357 /** 358 * ntb_transport_register_client_dev - Register NTB client device 359 * @device_name: Name of NTB client device 360 * 361 * Register an NTB client device with the NTB transport layer 362 */ 363 int ntb_transport_register_client_dev(char *device_name) 364 { 365 struct ntb_transport_client_dev *client_dev; 366 struct ntb_transport_ctx *nt; 367 int node; 368 int rc, i = 0; 369 370 if (list_empty(&ntb_transport_list)) 371 return -ENODEV; 372 373 list_for_each_entry(nt, &ntb_transport_list, entry) { 374 struct device *dev; 375 376 node = dev_to_node(&nt->ndev->dev); 377 378 client_dev = kzalloc_node(sizeof(*client_dev), 379 GFP_KERNEL, node); 380 if (!client_dev) { 381 rc = -ENOMEM; 382 goto err; 383 } 384 385 dev = &client_dev->dev; 386 387 /* setup and register client devices */ 388 dev_set_name(dev, "%s%d", device_name, i); 389 dev->bus = &ntb_transport_bus; 390 dev->release = ntb_transport_client_release; 391 dev->parent = &nt->ndev->dev; 392 393 rc = device_register(dev); 394 if (rc) { 395 kfree(client_dev); 396 goto err; 397 } 398 399 list_add_tail(&client_dev->entry, &nt->client_devs); 400 i++; 401 } 402 403 return 0; 404 405 err: 406 ntb_transport_unregister_client_dev(device_name); 407 408 return rc; 409 } 410 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); 411 412 /** 413 * ntb_transport_register_client - Register NTB client driver 414 * @drv: NTB client driver to be registered 415 * 416 * Register an NTB client driver with the NTB transport layer 417 * 418 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 419 */ 420 int ntb_transport_register_client(struct ntb_transport_client *drv) 421 { 422 drv->driver.bus = &ntb_transport_bus; 423 424 if (list_empty(&ntb_transport_list)) 425 return -ENODEV; 426 427 return driver_register(&drv->driver); 428 } 429 EXPORT_SYMBOL_GPL(ntb_transport_register_client); 430 431 /** 432 * ntb_transport_unregister_client - Unregister NTB client driver 433 * @drv: NTB client driver to be unregistered 434 * 435 * Unregister an NTB client driver with the NTB transport layer 436 * 437 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 438 */ 439 void ntb_transport_unregister_client(struct ntb_transport_client *drv) 440 { 441 driver_unregister(&drv->driver); 442 } 443 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); 444 445 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 446 loff_t *offp) 447 { 448 struct ntb_transport_qp *qp; 449 char *buf; 450 ssize_t ret, out_offset, out_count; 451 452 qp = filp->private_data; 453 454 if (!qp || !qp->link_is_up) 455 return 0; 456 457 out_count = 1000; 458 459 buf = kmalloc(out_count, GFP_KERNEL); 460 if (!buf) 461 return -ENOMEM; 462 463 out_offset = 0; 464 out_offset += snprintf(buf + out_offset, out_count - out_offset, 465 "\nNTB QP stats:\n\n"); 466 out_offset += snprintf(buf + out_offset, out_count - out_offset, 467 "rx_bytes - \t%llu\n", qp->rx_bytes); 468 out_offset += snprintf(buf + out_offset, out_count - out_offset, 469 "rx_pkts - \t%llu\n", qp->rx_pkts); 470 out_offset += snprintf(buf + out_offset, out_count - out_offset, 471 "rx_memcpy - \t%llu\n", qp->rx_memcpy); 472 out_offset += snprintf(buf + out_offset, out_count - out_offset, 473 "rx_async - \t%llu\n", qp->rx_async); 474 out_offset += snprintf(buf + out_offset, out_count - out_offset, 475 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 476 out_offset += snprintf(buf + out_offset, out_count - out_offset, 477 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 478 out_offset += snprintf(buf + out_offset, out_count - out_offset, 479 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 480 out_offset += snprintf(buf + out_offset, out_count - out_offset, 481 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 482 out_offset += snprintf(buf + out_offset, out_count - out_offset, 483 "rx_buff - \t0x%p\n", qp->rx_buff); 484 out_offset += snprintf(buf + out_offset, out_count - out_offset, 485 "rx_index - \t%u\n", qp->rx_index); 486 out_offset += snprintf(buf + out_offset, out_count - out_offset, 487 "rx_max_entry - \t%u\n", qp->rx_max_entry); 488 out_offset += snprintf(buf + out_offset, out_count - out_offset, 489 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry); 490 491 out_offset += snprintf(buf + out_offset, out_count - out_offset, 492 "tx_bytes - \t%llu\n", qp->tx_bytes); 493 out_offset += snprintf(buf + out_offset, out_count - out_offset, 494 "tx_pkts - \t%llu\n", qp->tx_pkts); 495 out_offset += snprintf(buf + out_offset, out_count - out_offset, 496 "tx_memcpy - \t%llu\n", qp->tx_memcpy); 497 out_offset += snprintf(buf + out_offset, out_count - out_offset, 498 "tx_async - \t%llu\n", qp->tx_async); 499 out_offset += snprintf(buf + out_offset, out_count - out_offset, 500 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 501 out_offset += snprintf(buf + out_offset, out_count - out_offset, 502 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 503 out_offset += snprintf(buf + out_offset, out_count - out_offset, 504 "tx_mw - \t0x%p\n", qp->tx_mw); 505 out_offset += snprintf(buf + out_offset, out_count - out_offset, 506 "tx_index (H) - \t%u\n", qp->tx_index); 507 out_offset += snprintf(buf + out_offset, out_count - out_offset, 508 "RRI (T) - \t%u\n", 509 qp->remote_rx_info->entry); 510 out_offset += snprintf(buf + out_offset, out_count - out_offset, 511 "tx_max_entry - \t%u\n", qp->tx_max_entry); 512 out_offset += snprintf(buf + out_offset, out_count - out_offset, 513 "free tx - \t%u\n", 514 ntb_transport_tx_free_entry(qp)); 515 516 out_offset += snprintf(buf + out_offset, out_count - out_offset, 517 "\n"); 518 out_offset += snprintf(buf + out_offset, out_count - out_offset, 519 "Using TX DMA - \t%s\n", 520 qp->tx_dma_chan ? "Yes" : "No"); 521 out_offset += snprintf(buf + out_offset, out_count - out_offset, 522 "Using RX DMA - \t%s\n", 523 qp->rx_dma_chan ? "Yes" : "No"); 524 out_offset += snprintf(buf + out_offset, out_count - out_offset, 525 "QP Link - \t%s\n", 526 qp->link_is_up ? "Up" : "Down"); 527 out_offset += snprintf(buf + out_offset, out_count - out_offset, 528 "\n"); 529 530 if (out_offset > out_count) 531 out_offset = out_count; 532 533 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 534 kfree(buf); 535 return ret; 536 } 537 538 static const struct file_operations ntb_qp_debugfs_stats = { 539 .owner = THIS_MODULE, 540 .open = simple_open, 541 .read = debugfs_read, 542 }; 543 544 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 545 struct list_head *list) 546 { 547 unsigned long flags; 548 549 spin_lock_irqsave(lock, flags); 550 list_add_tail(entry, list); 551 spin_unlock_irqrestore(lock, flags); 552 } 553 554 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 555 struct list_head *list) 556 { 557 struct ntb_queue_entry *entry; 558 unsigned long flags; 559 560 spin_lock_irqsave(lock, flags); 561 if (list_empty(list)) { 562 entry = NULL; 563 goto out; 564 } 565 entry = list_first_entry(list, struct ntb_queue_entry, entry); 566 list_del(&entry->entry); 567 568 out: 569 spin_unlock_irqrestore(lock, flags); 570 571 return entry; 572 } 573 574 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, 575 struct list_head *list, 576 struct list_head *to_list) 577 { 578 struct ntb_queue_entry *entry; 579 unsigned long flags; 580 581 spin_lock_irqsave(lock, flags); 582 583 if (list_empty(list)) { 584 entry = NULL; 585 } else { 586 entry = list_first_entry(list, struct ntb_queue_entry, entry); 587 list_move_tail(&entry->entry, to_list); 588 } 589 590 spin_unlock_irqrestore(lock, flags); 591 592 return entry; 593 } 594 595 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 596 unsigned int qp_num) 597 { 598 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 599 struct ntb_transport_mw *mw; 600 struct ntb_dev *ndev = nt->ndev; 601 struct ntb_queue_entry *entry; 602 unsigned int rx_size, num_qps_mw; 603 unsigned int mw_num, mw_count, qp_count; 604 unsigned int i; 605 int node; 606 607 mw_count = nt->mw_count; 608 qp_count = nt->qp_count; 609 610 mw_num = QP_TO_MW(nt, qp_num); 611 mw = &nt->mw_vec[mw_num]; 612 613 if (!mw->virt_addr) 614 return -ENOMEM; 615 616 if (mw_num < qp_count % mw_count) 617 num_qps_mw = qp_count / mw_count + 1; 618 else 619 num_qps_mw = qp_count / mw_count; 620 621 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; 622 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); 623 rx_size -= sizeof(struct ntb_rx_info); 624 625 qp->remote_rx_info = qp->rx_buff + rx_size; 626 627 /* Due to housekeeping, there must be atleast 2 buffs */ 628 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 629 qp->rx_max_entry = rx_size / qp->rx_max_frame; 630 qp->rx_index = 0; 631 632 /* 633 * Checking to see if we have more entries than the default. 634 * We should add additional entries if that is the case so we 635 * can be in sync with the transport frames. 636 */ 637 node = dev_to_node(&ndev->dev); 638 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) { 639 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 640 if (!entry) 641 return -ENOMEM; 642 643 entry->qp = qp; 644 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 645 &qp->rx_free_q); 646 qp->rx_alloc_entry++; 647 } 648 649 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 650 651 /* setup the hdr offsets with 0's */ 652 for (i = 0; i < qp->rx_max_entry; i++) { 653 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - 654 sizeof(struct ntb_payload_header)); 655 memset(offset, 0, sizeof(struct ntb_payload_header)); 656 } 657 658 qp->rx_pkts = 0; 659 qp->tx_pkts = 0; 660 qp->tx_index = 0; 661 662 return 0; 663 } 664 665 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 666 { 667 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 668 struct pci_dev *pdev = nt->ndev->pdev; 669 670 if (!mw->virt_addr) 671 return; 672 673 ntb_mw_clear_trans(nt->ndev, num_mw); 674 dma_free_coherent(&pdev->dev, mw->buff_size, 675 mw->virt_addr, mw->dma_addr); 676 mw->xlat_size = 0; 677 mw->buff_size = 0; 678 mw->virt_addr = NULL; 679 } 680 681 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 682 resource_size_t size) 683 { 684 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 685 struct pci_dev *pdev = nt->ndev->pdev; 686 size_t xlat_size, buff_size; 687 int rc; 688 689 if (!size) 690 return -EINVAL; 691 692 xlat_size = round_up(size, mw->xlat_align_size); 693 buff_size = round_up(size, mw->xlat_align); 694 695 /* No need to re-setup */ 696 if (mw->xlat_size == xlat_size) 697 return 0; 698 699 if (mw->buff_size) 700 ntb_free_mw(nt, num_mw); 701 702 /* Alloc memory for receiving data. Must be aligned */ 703 mw->xlat_size = xlat_size; 704 mw->buff_size = buff_size; 705 706 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size, 707 &mw->dma_addr, GFP_KERNEL); 708 if (!mw->virt_addr) { 709 mw->xlat_size = 0; 710 mw->buff_size = 0; 711 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", 712 buff_size); 713 return -ENOMEM; 714 } 715 716 /* 717 * we must ensure that the memory address allocated is BAR size 718 * aligned in order for the XLAT register to take the value. This 719 * is a requirement of the hardware. It is recommended to setup CMA 720 * for BAR sizes equal or greater than 4MB. 721 */ 722 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { 723 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", 724 &mw->dma_addr); 725 ntb_free_mw(nt, num_mw); 726 return -ENOMEM; 727 } 728 729 /* Notify HW the memory location of the receive buffer */ 730 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size); 731 if (rc) { 732 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); 733 ntb_free_mw(nt, num_mw); 734 return -EIO; 735 } 736 737 return 0; 738 } 739 740 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 741 { 742 qp->link_is_up = false; 743 qp->active = false; 744 745 qp->tx_index = 0; 746 qp->rx_index = 0; 747 qp->rx_bytes = 0; 748 qp->rx_pkts = 0; 749 qp->rx_ring_empty = 0; 750 qp->rx_err_no_buf = 0; 751 qp->rx_err_oflow = 0; 752 qp->rx_err_ver = 0; 753 qp->rx_memcpy = 0; 754 qp->rx_async = 0; 755 qp->tx_bytes = 0; 756 qp->tx_pkts = 0; 757 qp->tx_ring_full = 0; 758 qp->tx_err_no_buf = 0; 759 qp->tx_memcpy = 0; 760 qp->tx_async = 0; 761 } 762 763 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 764 { 765 struct ntb_transport_ctx *nt = qp->transport; 766 struct pci_dev *pdev = nt->ndev->pdev; 767 768 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); 769 770 cancel_delayed_work_sync(&qp->link_work); 771 ntb_qp_link_down_reset(qp); 772 773 if (qp->event_handler) 774 qp->event_handler(qp->cb_data, qp->link_is_up); 775 } 776 777 static void ntb_qp_link_cleanup_work(struct work_struct *work) 778 { 779 struct ntb_transport_qp *qp = container_of(work, 780 struct ntb_transport_qp, 781 link_cleanup); 782 struct ntb_transport_ctx *nt = qp->transport; 783 784 ntb_qp_link_cleanup(qp); 785 786 if (nt->link_is_up) 787 schedule_delayed_work(&qp->link_work, 788 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 789 } 790 791 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 792 { 793 schedule_work(&qp->link_cleanup); 794 } 795 796 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 797 { 798 struct ntb_transport_qp *qp; 799 u64 qp_bitmap_alloc; 800 unsigned int i, count; 801 802 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 803 804 /* Pass along the info to any clients */ 805 for (i = 0; i < nt->qp_count; i++) 806 if (qp_bitmap_alloc & BIT_ULL(i)) { 807 qp = &nt->qp_vec[i]; 808 ntb_qp_link_cleanup(qp); 809 cancel_work_sync(&qp->link_cleanup); 810 cancel_delayed_work_sync(&qp->link_work); 811 } 812 813 if (!nt->link_is_up) 814 cancel_delayed_work_sync(&nt->link_work); 815 816 /* The scratchpad registers keep the values if the remote side 817 * goes down, blast them now to give them a sane value the next 818 * time they are accessed 819 */ 820 count = ntb_spad_count(nt->ndev); 821 for (i = 0; i < count; i++) 822 ntb_spad_write(nt->ndev, i, 0); 823 } 824 825 static void ntb_transport_link_cleanup_work(struct work_struct *work) 826 { 827 struct ntb_transport_ctx *nt = 828 container_of(work, struct ntb_transport_ctx, link_cleanup); 829 830 ntb_transport_link_cleanup(nt); 831 } 832 833 static void ntb_transport_event_callback(void *data) 834 { 835 struct ntb_transport_ctx *nt = data; 836 837 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) 838 schedule_delayed_work(&nt->link_work, 0); 839 else 840 schedule_work(&nt->link_cleanup); 841 } 842 843 static void ntb_transport_link_work(struct work_struct *work) 844 { 845 struct ntb_transport_ctx *nt = 846 container_of(work, struct ntb_transport_ctx, link_work.work); 847 struct ntb_dev *ndev = nt->ndev; 848 struct pci_dev *pdev = ndev->pdev; 849 resource_size_t size; 850 u32 val; 851 int rc = 0, i, spad; 852 853 /* send the local info, in the opposite order of the way we read it */ 854 for (i = 0; i < nt->mw_count; i++) { 855 size = nt->mw_vec[i].phys_size; 856 857 if (max_mw_size && size > max_mw_size) 858 size = max_mw_size; 859 860 spad = MW0_SZ_HIGH + (i * 2); 861 ntb_peer_spad_write(ndev, spad, upper_32_bits(size)); 862 863 spad = MW0_SZ_LOW + (i * 2); 864 ntb_peer_spad_write(ndev, spad, lower_32_bits(size)); 865 } 866 867 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count); 868 869 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count); 870 871 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION); 872 873 /* Query the remote side for its info */ 874 val = ntb_spad_read(ndev, VERSION); 875 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 876 if (val != NTB_TRANSPORT_VERSION) 877 goto out; 878 879 val = ntb_spad_read(ndev, NUM_QPS); 880 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 881 if (val != nt->qp_count) 882 goto out; 883 884 val = ntb_spad_read(ndev, NUM_MWS); 885 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 886 if (val != nt->mw_count) 887 goto out; 888 889 for (i = 0; i < nt->mw_count; i++) { 890 u64 val64; 891 892 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); 893 val64 = (u64)val << 32; 894 895 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); 896 val64 |= val; 897 898 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); 899 900 rc = ntb_set_mw(nt, i, val64); 901 if (rc) 902 goto out1; 903 } 904 905 nt->link_is_up = true; 906 907 for (i = 0; i < nt->qp_count; i++) { 908 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 909 910 ntb_transport_setup_qp_mw(nt, i); 911 912 if (qp->client_ready) 913 schedule_delayed_work(&qp->link_work, 0); 914 } 915 916 return; 917 918 out1: 919 for (i = 0; i < nt->mw_count; i++) 920 ntb_free_mw(nt, i); 921 922 /* if there's an actual failure, we should just bail */ 923 if (rc < 0) { 924 ntb_link_disable(ndev); 925 return; 926 } 927 928 out: 929 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 930 schedule_delayed_work(&nt->link_work, 931 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 932 } 933 934 static void ntb_qp_link_work(struct work_struct *work) 935 { 936 struct ntb_transport_qp *qp = container_of(work, 937 struct ntb_transport_qp, 938 link_work.work); 939 struct pci_dev *pdev = qp->ndev->pdev; 940 struct ntb_transport_ctx *nt = qp->transport; 941 int val; 942 943 WARN_ON(!nt->link_is_up); 944 945 val = ntb_spad_read(nt->ndev, QP_LINKS); 946 947 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); 948 949 /* query remote spad for qp ready bits */ 950 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); 951 952 /* See if the remote side is up */ 953 if (val & BIT(qp->qp_num)) { 954 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 955 qp->link_is_up = true; 956 qp->active = true; 957 958 if (qp->event_handler) 959 qp->event_handler(qp->cb_data, qp->link_is_up); 960 961 if (qp->active) 962 tasklet_schedule(&qp->rxc_db_work); 963 } else if (nt->link_is_up) 964 schedule_delayed_work(&qp->link_work, 965 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 966 } 967 968 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, 969 unsigned int qp_num) 970 { 971 struct ntb_transport_qp *qp; 972 phys_addr_t mw_base; 973 resource_size_t mw_size; 974 unsigned int num_qps_mw, tx_size; 975 unsigned int mw_num, mw_count, qp_count; 976 u64 qp_offset; 977 978 mw_count = nt->mw_count; 979 qp_count = nt->qp_count; 980 981 mw_num = QP_TO_MW(nt, qp_num); 982 983 qp = &nt->qp_vec[qp_num]; 984 qp->qp_num = qp_num; 985 qp->transport = nt; 986 qp->ndev = nt->ndev; 987 qp->client_ready = false; 988 qp->event_handler = NULL; 989 ntb_qp_link_down_reset(qp); 990 991 if (mw_num < qp_count % mw_count) 992 num_qps_mw = qp_count / mw_count + 1; 993 else 994 num_qps_mw = qp_count / mw_count; 995 996 mw_base = nt->mw_vec[mw_num].phys_addr; 997 mw_size = nt->mw_vec[mw_num].phys_size; 998 999 tx_size = (unsigned int)mw_size / num_qps_mw; 1000 qp_offset = tx_size * (qp_num / mw_count); 1001 1002 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 1003 if (!qp->tx_mw) 1004 return -EINVAL; 1005 1006 qp->tx_mw_phys = mw_base + qp_offset; 1007 if (!qp->tx_mw_phys) 1008 return -EINVAL; 1009 1010 tx_size -= sizeof(struct ntb_rx_info); 1011 qp->rx_info = qp->tx_mw + tx_size; 1012 1013 /* Due to housekeeping, there must be atleast 2 buffs */ 1014 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 1015 qp->tx_max_entry = tx_size / qp->tx_max_frame; 1016 1017 if (nt->debugfs_node_dir) { 1018 char debugfs_name[4]; 1019 1020 snprintf(debugfs_name, 4, "qp%d", qp_num); 1021 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 1022 nt->debugfs_node_dir); 1023 1024 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 1025 qp->debugfs_dir, qp, 1026 &ntb_qp_debugfs_stats); 1027 } else { 1028 qp->debugfs_dir = NULL; 1029 qp->debugfs_stats = NULL; 1030 } 1031 1032 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 1033 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 1034 1035 spin_lock_init(&qp->ntb_rx_q_lock); 1036 spin_lock_init(&qp->ntb_tx_free_q_lock); 1037 1038 INIT_LIST_HEAD(&qp->rx_post_q); 1039 INIT_LIST_HEAD(&qp->rx_pend_q); 1040 INIT_LIST_HEAD(&qp->rx_free_q); 1041 INIT_LIST_HEAD(&qp->tx_free_q); 1042 1043 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, 1044 (unsigned long)qp); 1045 1046 return 0; 1047 } 1048 1049 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) 1050 { 1051 struct ntb_transport_ctx *nt; 1052 struct ntb_transport_mw *mw; 1053 unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads; 1054 u64 qp_bitmap; 1055 int node; 1056 int rc, i; 1057 1058 mw_count = ntb_mw_count(ndev); 1059 1060 if (ntb_db_is_unsafe(ndev)) 1061 dev_dbg(&ndev->dev, 1062 "doorbell is unsafe, proceed anyway...\n"); 1063 if (ntb_spad_is_unsafe(ndev)) 1064 dev_dbg(&ndev->dev, 1065 "scratchpad is unsafe, proceed anyway...\n"); 1066 1067 node = dev_to_node(&ndev->dev); 1068 1069 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); 1070 if (!nt) 1071 return -ENOMEM; 1072 1073 nt->ndev = ndev; 1074 spad_count = ntb_spad_count(ndev); 1075 1076 /* Limit the MW's based on the availability of scratchpads */ 1077 1078 if (spad_count < NTB_TRANSPORT_MIN_SPADS) { 1079 nt->mw_count = 0; 1080 rc = -EINVAL; 1081 goto err; 1082 } 1083 1084 max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2; 1085 nt->mw_count = min(mw_count, max_mw_count_for_spads); 1086 1087 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec), 1088 GFP_KERNEL, node); 1089 if (!nt->mw_vec) { 1090 rc = -ENOMEM; 1091 goto err; 1092 } 1093 1094 for (i = 0; i < mw_count; i++) { 1095 mw = &nt->mw_vec[i]; 1096 1097 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size, 1098 &mw->xlat_align, &mw->xlat_align_size); 1099 if (rc) 1100 goto err1; 1101 1102 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); 1103 if (!mw->vbase) { 1104 rc = -ENOMEM; 1105 goto err1; 1106 } 1107 1108 mw->buff_size = 0; 1109 mw->xlat_size = 0; 1110 mw->virt_addr = NULL; 1111 mw->dma_addr = 0; 1112 } 1113 1114 qp_bitmap = ntb_db_valid_mask(ndev); 1115 1116 qp_count = ilog2(qp_bitmap); 1117 if (max_num_clients && max_num_clients < qp_count) 1118 qp_count = max_num_clients; 1119 else if (nt->mw_count < qp_count) 1120 qp_count = nt->mw_count; 1121 1122 qp_bitmap &= BIT_ULL(qp_count) - 1; 1123 1124 nt->qp_count = qp_count; 1125 nt->qp_bitmap = qp_bitmap; 1126 nt->qp_bitmap_free = qp_bitmap; 1127 1128 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec), 1129 GFP_KERNEL, node); 1130 if (!nt->qp_vec) { 1131 rc = -ENOMEM; 1132 goto err1; 1133 } 1134 1135 if (nt_debugfs_dir) { 1136 nt->debugfs_node_dir = 1137 debugfs_create_dir(pci_name(ndev->pdev), 1138 nt_debugfs_dir); 1139 } 1140 1141 for (i = 0; i < qp_count; i++) { 1142 rc = ntb_transport_init_queue(nt, i); 1143 if (rc) 1144 goto err2; 1145 } 1146 1147 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 1148 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); 1149 1150 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); 1151 if (rc) 1152 goto err2; 1153 1154 INIT_LIST_HEAD(&nt->client_devs); 1155 rc = ntb_bus_init(nt); 1156 if (rc) 1157 goto err3; 1158 1159 nt->link_is_up = false; 1160 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1161 ntb_link_event(ndev); 1162 1163 return 0; 1164 1165 err3: 1166 ntb_clear_ctx(ndev); 1167 err2: 1168 kfree(nt->qp_vec); 1169 err1: 1170 while (i--) { 1171 mw = &nt->mw_vec[i]; 1172 iounmap(mw->vbase); 1173 } 1174 kfree(nt->mw_vec); 1175 err: 1176 kfree(nt); 1177 return rc; 1178 } 1179 1180 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) 1181 { 1182 struct ntb_transport_ctx *nt = ndev->ctx; 1183 struct ntb_transport_qp *qp; 1184 u64 qp_bitmap_alloc; 1185 int i; 1186 1187 ntb_transport_link_cleanup(nt); 1188 cancel_work_sync(&nt->link_cleanup); 1189 cancel_delayed_work_sync(&nt->link_work); 1190 1191 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 1192 1193 /* verify that all the qp's are freed */ 1194 for (i = 0; i < nt->qp_count; i++) { 1195 qp = &nt->qp_vec[i]; 1196 if (qp_bitmap_alloc & BIT_ULL(i)) 1197 ntb_transport_free_queue(qp); 1198 debugfs_remove_recursive(qp->debugfs_dir); 1199 } 1200 1201 ntb_link_disable(ndev); 1202 ntb_clear_ctx(ndev); 1203 1204 ntb_bus_remove(nt); 1205 1206 for (i = nt->mw_count; i--; ) { 1207 ntb_free_mw(nt, i); 1208 iounmap(nt->mw_vec[i].vbase); 1209 } 1210 1211 kfree(nt->qp_vec); 1212 kfree(nt->mw_vec); 1213 kfree(nt); 1214 } 1215 1216 static void ntb_complete_rxc(struct ntb_transport_qp *qp) 1217 { 1218 struct ntb_queue_entry *entry; 1219 void *cb_data; 1220 unsigned int len; 1221 unsigned long irqflags; 1222 1223 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1224 1225 while (!list_empty(&qp->rx_post_q)) { 1226 entry = list_first_entry(&qp->rx_post_q, 1227 struct ntb_queue_entry, entry); 1228 if (!(entry->flags & DESC_DONE_FLAG)) 1229 break; 1230 1231 entry->rx_hdr->flags = 0; 1232 iowrite32(entry->rx_index, &qp->rx_info->entry); 1233 1234 cb_data = entry->cb_data; 1235 len = entry->len; 1236 1237 list_move_tail(&entry->entry, &qp->rx_free_q); 1238 1239 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1240 1241 if (qp->rx_handler && qp->client_ready) 1242 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1243 1244 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1245 } 1246 1247 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1248 } 1249 1250 static void ntb_rx_copy_callback(void *data, 1251 const struct dmaengine_result *res) 1252 { 1253 struct ntb_queue_entry *entry = data; 1254 1255 /* we need to check DMA results if we are using DMA */ 1256 if (res) { 1257 enum dmaengine_tx_result dma_err = res->result; 1258 1259 switch (dma_err) { 1260 case DMA_TRANS_READ_FAILED: 1261 case DMA_TRANS_WRITE_FAILED: 1262 entry->errors++; 1263 case DMA_TRANS_ABORTED: 1264 { 1265 struct ntb_transport_qp *qp = entry->qp; 1266 void *offset = qp->rx_buff + qp->rx_max_frame * 1267 qp->rx_index; 1268 1269 ntb_memcpy_rx(entry, offset); 1270 qp->rx_memcpy++; 1271 return; 1272 } 1273 1274 case DMA_TRANS_NOERROR: 1275 default: 1276 break; 1277 } 1278 } 1279 1280 entry->flags |= DESC_DONE_FLAG; 1281 1282 ntb_complete_rxc(entry->qp); 1283 } 1284 1285 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1286 { 1287 void *buf = entry->buf; 1288 size_t len = entry->len; 1289 1290 memcpy(buf, offset, len); 1291 1292 /* Ensure that the data is fully copied out before clearing the flag */ 1293 wmb(); 1294 1295 ntb_rx_copy_callback(entry, NULL); 1296 } 1297 1298 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) 1299 { 1300 struct dma_async_tx_descriptor *txd; 1301 struct ntb_transport_qp *qp = entry->qp; 1302 struct dma_chan *chan = qp->rx_dma_chan; 1303 struct dma_device *device; 1304 size_t pay_off, buff_off, len; 1305 struct dmaengine_unmap_data *unmap; 1306 dma_cookie_t cookie; 1307 void *buf = entry->buf; 1308 1309 len = entry->len; 1310 device = chan->device; 1311 pay_off = (size_t)offset & ~PAGE_MASK; 1312 buff_off = (size_t)buf & ~PAGE_MASK; 1313 1314 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1315 goto err; 1316 1317 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1318 if (!unmap) 1319 goto err; 1320 1321 unmap->len = len; 1322 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1323 pay_off, len, DMA_TO_DEVICE); 1324 if (dma_mapping_error(device->dev, unmap->addr[0])) 1325 goto err_get_unmap; 1326 1327 unmap->to_cnt = 1; 1328 1329 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1330 buff_off, len, DMA_FROM_DEVICE); 1331 if (dma_mapping_error(device->dev, unmap->addr[1])) 1332 goto err_get_unmap; 1333 1334 unmap->from_cnt = 1; 1335 1336 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1337 unmap->addr[0], len, 1338 DMA_PREP_INTERRUPT); 1339 if (!txd) 1340 goto err_get_unmap; 1341 1342 txd->callback_result = ntb_rx_copy_callback; 1343 txd->callback_param = entry; 1344 dma_set_unmap(txd, unmap); 1345 1346 cookie = dmaengine_submit(txd); 1347 if (dma_submit_error(cookie)) 1348 goto err_set_unmap; 1349 1350 dmaengine_unmap_put(unmap); 1351 1352 qp->last_cookie = cookie; 1353 1354 qp->rx_async++; 1355 1356 return 0; 1357 1358 err_set_unmap: 1359 dmaengine_unmap_put(unmap); 1360 err_get_unmap: 1361 dmaengine_unmap_put(unmap); 1362 err: 1363 return -ENXIO; 1364 } 1365 1366 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1367 { 1368 struct ntb_transport_qp *qp = entry->qp; 1369 struct dma_chan *chan = qp->rx_dma_chan; 1370 int res; 1371 1372 if (!chan) 1373 goto err; 1374 1375 if (entry->len < copy_bytes) 1376 goto err; 1377 1378 res = ntb_async_rx_submit(entry, offset); 1379 if (res < 0) 1380 goto err; 1381 1382 if (!entry->retries) 1383 qp->rx_async++; 1384 1385 return; 1386 1387 err: 1388 ntb_memcpy_rx(entry, offset); 1389 qp->rx_memcpy++; 1390 } 1391 1392 static int ntb_process_rxc(struct ntb_transport_qp *qp) 1393 { 1394 struct ntb_payload_header *hdr; 1395 struct ntb_queue_entry *entry; 1396 void *offset; 1397 1398 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1399 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1400 1401 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", 1402 qp->qp_num, hdr->ver, hdr->len, hdr->flags); 1403 1404 if (!(hdr->flags & DESC_DONE_FLAG)) { 1405 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); 1406 qp->rx_ring_empty++; 1407 return -EAGAIN; 1408 } 1409 1410 if (hdr->flags & LINK_DOWN_FLAG) { 1411 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); 1412 ntb_qp_link_down(qp); 1413 hdr->flags = 0; 1414 return -EAGAIN; 1415 } 1416 1417 if (hdr->ver != (u32)qp->rx_pkts) { 1418 dev_dbg(&qp->ndev->pdev->dev, 1419 "version mismatch, expected %llu - got %u\n", 1420 qp->rx_pkts, hdr->ver); 1421 qp->rx_err_ver++; 1422 return -EIO; 1423 } 1424 1425 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 1426 if (!entry) { 1427 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1428 qp->rx_err_no_buf++; 1429 return -EAGAIN; 1430 } 1431 1432 entry->rx_hdr = hdr; 1433 entry->rx_index = qp->rx_index; 1434 1435 if (hdr->len > entry->len) { 1436 dev_dbg(&qp->ndev->pdev->dev, 1437 "receive buffer overflow! Wanted %d got %d\n", 1438 hdr->len, entry->len); 1439 qp->rx_err_oflow++; 1440 1441 entry->len = -EIO; 1442 entry->flags |= DESC_DONE_FLAG; 1443 1444 ntb_complete_rxc(qp); 1445 } else { 1446 dev_dbg(&qp->ndev->pdev->dev, 1447 "RX OK index %u ver %u size %d into buf size %d\n", 1448 qp->rx_index, hdr->ver, hdr->len, entry->len); 1449 1450 qp->rx_bytes += hdr->len; 1451 qp->rx_pkts++; 1452 1453 entry->len = hdr->len; 1454 1455 ntb_async_rx(entry, offset); 1456 } 1457 1458 qp->rx_index++; 1459 qp->rx_index %= qp->rx_max_entry; 1460 1461 return 0; 1462 } 1463 1464 static void ntb_transport_rxc_db(unsigned long data) 1465 { 1466 struct ntb_transport_qp *qp = (void *)data; 1467 int rc, i; 1468 1469 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", 1470 __func__, qp->qp_num); 1471 1472 /* Limit the number of packets processed in a single interrupt to 1473 * provide fairness to others 1474 */ 1475 for (i = 0; i < qp->rx_max_entry; i++) { 1476 rc = ntb_process_rxc(qp); 1477 if (rc) 1478 break; 1479 } 1480 1481 if (i && qp->rx_dma_chan) 1482 dma_async_issue_pending(qp->rx_dma_chan); 1483 1484 if (i == qp->rx_max_entry) { 1485 /* there is more work to do */ 1486 if (qp->active) 1487 tasklet_schedule(&qp->rxc_db_work); 1488 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1489 /* the doorbell bit is set: clear it */ 1490 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); 1491 /* ntb_db_read ensures ntb_db_clear write is committed */ 1492 ntb_db_read(qp->ndev); 1493 1494 /* an interrupt may have arrived between finishing 1495 * ntb_process_rxc and clearing the doorbell bit: 1496 * there might be some more work to do. 1497 */ 1498 if (qp->active) 1499 tasklet_schedule(&qp->rxc_db_work); 1500 } 1501 } 1502 1503 static void ntb_tx_copy_callback(void *data, 1504 const struct dmaengine_result *res) 1505 { 1506 struct ntb_queue_entry *entry = data; 1507 struct ntb_transport_qp *qp = entry->qp; 1508 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1509 1510 /* we need to check DMA results if we are using DMA */ 1511 if (res) { 1512 enum dmaengine_tx_result dma_err = res->result; 1513 1514 switch (dma_err) { 1515 case DMA_TRANS_READ_FAILED: 1516 case DMA_TRANS_WRITE_FAILED: 1517 entry->errors++; 1518 case DMA_TRANS_ABORTED: 1519 { 1520 void __iomem *offset = 1521 qp->tx_mw + qp->tx_max_frame * 1522 entry->tx_index; 1523 1524 /* resubmit via CPU */ 1525 ntb_memcpy_tx(entry, offset); 1526 qp->tx_memcpy++; 1527 return; 1528 } 1529 1530 case DMA_TRANS_NOERROR: 1531 default: 1532 break; 1533 } 1534 } 1535 1536 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1537 1538 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1539 1540 /* The entry length can only be zero if the packet is intended to be a 1541 * "link down" or similar. Since no payload is being sent in these 1542 * cases, there is nothing to add to the completion queue. 1543 */ 1544 if (entry->len > 0) { 1545 qp->tx_bytes += entry->len; 1546 1547 if (qp->tx_handler) 1548 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1549 entry->len); 1550 } 1551 1552 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1553 } 1554 1555 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) 1556 { 1557 #ifdef ARCH_HAS_NOCACHE_UACCESS 1558 /* 1559 * Using non-temporal mov to improve performance on non-cached 1560 * writes, even though we aren't actually copying from user space. 1561 */ 1562 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); 1563 #else 1564 memcpy_toio(offset, entry->buf, entry->len); 1565 #endif 1566 1567 /* Ensure that the data is fully copied out before setting the flags */ 1568 wmb(); 1569 1570 ntb_tx_copy_callback(entry, NULL); 1571 } 1572 1573 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 1574 struct ntb_queue_entry *entry) 1575 { 1576 struct dma_async_tx_descriptor *txd; 1577 struct dma_chan *chan = qp->tx_dma_chan; 1578 struct dma_device *device; 1579 size_t len = entry->len; 1580 void *buf = entry->buf; 1581 size_t dest_off, buff_off; 1582 struct dmaengine_unmap_data *unmap; 1583 dma_addr_t dest; 1584 dma_cookie_t cookie; 1585 1586 device = chan->device; 1587 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; 1588 buff_off = (size_t)buf & ~PAGE_MASK; 1589 dest_off = (size_t)dest & ~PAGE_MASK; 1590 1591 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1592 goto err; 1593 1594 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); 1595 if (!unmap) 1596 goto err; 1597 1598 unmap->len = len; 1599 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), 1600 buff_off, len, DMA_TO_DEVICE); 1601 if (dma_mapping_error(device->dev, unmap->addr[0])) 1602 goto err_get_unmap; 1603 1604 unmap->to_cnt = 1; 1605 1606 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1607 DMA_PREP_INTERRUPT); 1608 if (!txd) 1609 goto err_get_unmap; 1610 1611 txd->callback_result = ntb_tx_copy_callback; 1612 txd->callback_param = entry; 1613 dma_set_unmap(txd, unmap); 1614 1615 cookie = dmaengine_submit(txd); 1616 if (dma_submit_error(cookie)) 1617 goto err_set_unmap; 1618 1619 dmaengine_unmap_put(unmap); 1620 1621 dma_async_issue_pending(chan); 1622 1623 return 0; 1624 err_set_unmap: 1625 dmaengine_unmap_put(unmap); 1626 err_get_unmap: 1627 dmaengine_unmap_put(unmap); 1628 err: 1629 return -ENXIO; 1630 } 1631 1632 static void ntb_async_tx(struct ntb_transport_qp *qp, 1633 struct ntb_queue_entry *entry) 1634 { 1635 struct ntb_payload_header __iomem *hdr; 1636 struct dma_chan *chan = qp->tx_dma_chan; 1637 void __iomem *offset; 1638 int res; 1639 1640 entry->tx_index = qp->tx_index; 1641 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index; 1642 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1643 entry->tx_hdr = hdr; 1644 1645 iowrite32(entry->len, &hdr->len); 1646 iowrite32((u32)qp->tx_pkts, &hdr->ver); 1647 1648 if (!chan) 1649 goto err; 1650 1651 if (entry->len < copy_bytes) 1652 goto err; 1653 1654 res = ntb_async_tx_submit(qp, entry); 1655 if (res < 0) 1656 goto err; 1657 1658 if (!entry->retries) 1659 qp->tx_async++; 1660 1661 return; 1662 1663 err: 1664 ntb_memcpy_tx(entry, offset); 1665 qp->tx_memcpy++; 1666 } 1667 1668 static int ntb_process_tx(struct ntb_transport_qp *qp, 1669 struct ntb_queue_entry *entry) 1670 { 1671 if (qp->tx_index == qp->remote_rx_info->entry) { 1672 qp->tx_ring_full++; 1673 return -EAGAIN; 1674 } 1675 1676 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1677 if (qp->tx_handler) 1678 qp->tx_handler(qp, qp->cb_data, NULL, -EIO); 1679 1680 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1681 &qp->tx_free_q); 1682 return 0; 1683 } 1684 1685 ntb_async_tx(qp, entry); 1686 1687 qp->tx_index++; 1688 qp->tx_index %= qp->tx_max_entry; 1689 1690 qp->tx_pkts++; 1691 1692 return 0; 1693 } 1694 1695 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1696 { 1697 struct pci_dev *pdev = qp->ndev->pdev; 1698 struct ntb_queue_entry *entry; 1699 int i, rc; 1700 1701 if (!qp->link_is_up) 1702 return; 1703 1704 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); 1705 1706 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1707 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1708 if (entry) 1709 break; 1710 msleep(100); 1711 } 1712 1713 if (!entry) 1714 return; 1715 1716 entry->cb_data = NULL; 1717 entry->buf = NULL; 1718 entry->len = 0; 1719 entry->flags = LINK_DOWN_FLAG; 1720 1721 rc = ntb_process_tx(qp, entry); 1722 if (rc) 1723 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1724 qp->qp_num); 1725 1726 ntb_qp_link_down_reset(qp); 1727 } 1728 1729 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) 1730 { 1731 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; 1732 } 1733 1734 /** 1735 * ntb_transport_create_queue - Create a new NTB transport layer queue 1736 * @rx_handler: receive callback function 1737 * @tx_handler: transmit callback function 1738 * @event_handler: event callback function 1739 * 1740 * Create a new NTB transport layer queue and provide the queue with a callback 1741 * routine for both transmit and receive. The receive callback routine will be 1742 * used to pass up data when the transport has received it on the queue. The 1743 * transmit callback routine will be called when the transport has completed the 1744 * transmission of the data on the queue and the data is ready to be freed. 1745 * 1746 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1747 */ 1748 struct ntb_transport_qp * 1749 ntb_transport_create_queue(void *data, struct device *client_dev, 1750 const struct ntb_queue_handlers *handlers) 1751 { 1752 struct ntb_dev *ndev; 1753 struct pci_dev *pdev; 1754 struct ntb_transport_ctx *nt; 1755 struct ntb_queue_entry *entry; 1756 struct ntb_transport_qp *qp; 1757 u64 qp_bit; 1758 unsigned int free_queue; 1759 dma_cap_mask_t dma_mask; 1760 int node; 1761 int i; 1762 1763 ndev = dev_ntb(client_dev->parent); 1764 pdev = ndev->pdev; 1765 nt = ndev->ctx; 1766 1767 node = dev_to_node(&ndev->dev); 1768 1769 free_queue = ffs(nt->qp_bitmap_free); 1770 if (!free_queue) 1771 goto err; 1772 1773 /* decrement free_queue to make it zero based */ 1774 free_queue--; 1775 1776 qp = &nt->qp_vec[free_queue]; 1777 qp_bit = BIT_ULL(qp->qp_num); 1778 1779 nt->qp_bitmap_free &= ~qp_bit; 1780 1781 qp->cb_data = data; 1782 qp->rx_handler = handlers->rx_handler; 1783 qp->tx_handler = handlers->tx_handler; 1784 qp->event_handler = handlers->event_handler; 1785 1786 dma_cap_zero(dma_mask); 1787 dma_cap_set(DMA_MEMCPY, dma_mask); 1788 1789 if (use_dma) { 1790 qp->tx_dma_chan = 1791 dma_request_channel(dma_mask, ntb_dma_filter_fn, 1792 (void *)(unsigned long)node); 1793 if (!qp->tx_dma_chan) 1794 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n"); 1795 1796 qp->rx_dma_chan = 1797 dma_request_channel(dma_mask, ntb_dma_filter_fn, 1798 (void *)(unsigned long)node); 1799 if (!qp->rx_dma_chan) 1800 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n"); 1801 } else { 1802 qp->tx_dma_chan = NULL; 1803 qp->rx_dma_chan = NULL; 1804 } 1805 1806 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", 1807 qp->tx_dma_chan ? "DMA" : "CPU"); 1808 1809 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n", 1810 qp->rx_dma_chan ? "DMA" : "CPU"); 1811 1812 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1813 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1814 if (!entry) 1815 goto err1; 1816 1817 entry->qp = qp; 1818 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 1819 &qp->rx_free_q); 1820 } 1821 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES; 1822 1823 for (i = 0; i < qp->tx_max_entry; i++) { 1824 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1825 if (!entry) 1826 goto err2; 1827 1828 entry->qp = qp; 1829 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1830 &qp->tx_free_q); 1831 } 1832 1833 ntb_db_clear(qp->ndev, qp_bit); 1834 ntb_db_clear_mask(qp->ndev, qp_bit); 1835 1836 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1837 1838 return qp; 1839 1840 err2: 1841 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1842 kfree(entry); 1843 err1: 1844 qp->rx_alloc_entry = 0; 1845 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1846 kfree(entry); 1847 if (qp->tx_dma_chan) 1848 dma_release_channel(qp->tx_dma_chan); 1849 if (qp->rx_dma_chan) 1850 dma_release_channel(qp->rx_dma_chan); 1851 nt->qp_bitmap_free |= qp_bit; 1852 err: 1853 return NULL; 1854 } 1855 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 1856 1857 /** 1858 * ntb_transport_free_queue - Frees NTB transport queue 1859 * @qp: NTB queue to be freed 1860 * 1861 * Frees NTB transport queue 1862 */ 1863 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1864 { 1865 struct pci_dev *pdev; 1866 struct ntb_queue_entry *entry; 1867 u64 qp_bit; 1868 1869 if (!qp) 1870 return; 1871 1872 pdev = qp->ndev->pdev; 1873 1874 qp->active = false; 1875 1876 if (qp->tx_dma_chan) { 1877 struct dma_chan *chan = qp->tx_dma_chan; 1878 /* Putting the dma_chan to NULL will force any new traffic to be 1879 * processed by the CPU instead of the DAM engine 1880 */ 1881 qp->tx_dma_chan = NULL; 1882 1883 /* Try to be nice and wait for any queued DMA engine 1884 * transactions to process before smashing it with a rock 1885 */ 1886 dma_sync_wait(chan, qp->last_cookie); 1887 dmaengine_terminate_all(chan); 1888 dma_release_channel(chan); 1889 } 1890 1891 if (qp->rx_dma_chan) { 1892 struct dma_chan *chan = qp->rx_dma_chan; 1893 /* Putting the dma_chan to NULL will force any new traffic to be 1894 * processed by the CPU instead of the DAM engine 1895 */ 1896 qp->rx_dma_chan = NULL; 1897 1898 /* Try to be nice and wait for any queued DMA engine 1899 * transactions to process before smashing it with a rock 1900 */ 1901 dma_sync_wait(chan, qp->last_cookie); 1902 dmaengine_terminate_all(chan); 1903 dma_release_channel(chan); 1904 } 1905 1906 qp_bit = BIT_ULL(qp->qp_num); 1907 1908 ntb_db_set_mask(qp->ndev, qp_bit); 1909 tasklet_kill(&qp->rxc_db_work); 1910 1911 cancel_delayed_work_sync(&qp->link_work); 1912 1913 qp->cb_data = NULL; 1914 qp->rx_handler = NULL; 1915 qp->tx_handler = NULL; 1916 qp->event_handler = NULL; 1917 1918 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1919 kfree(entry); 1920 1921 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { 1922 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); 1923 kfree(entry); 1924 } 1925 1926 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { 1927 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); 1928 kfree(entry); 1929 } 1930 1931 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1932 kfree(entry); 1933 1934 qp->transport->qp_bitmap_free |= qp_bit; 1935 1936 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1937 } 1938 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 1939 1940 /** 1941 * ntb_transport_rx_remove - Dequeues enqueued rx packet 1942 * @qp: NTB queue to be freed 1943 * @len: pointer to variable to write enqueued buffers length 1944 * 1945 * Dequeues unused buffers from receive queue. Should only be used during 1946 * shutdown of qp. 1947 * 1948 * RETURNS: NULL error value on error, or void* for success. 1949 */ 1950 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 1951 { 1952 struct ntb_queue_entry *entry; 1953 void *buf; 1954 1955 if (!qp || qp->client_ready) 1956 return NULL; 1957 1958 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); 1959 if (!entry) 1960 return NULL; 1961 1962 buf = entry->cb_data; 1963 *len = entry->len; 1964 1965 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); 1966 1967 return buf; 1968 } 1969 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 1970 1971 /** 1972 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 1973 * @qp: NTB transport layer queue the entry is to be enqueued on 1974 * @cb: per buffer pointer for callback function to use 1975 * @data: pointer to data buffer that incoming packets will be copied into 1976 * @len: length of the data buffer 1977 * 1978 * Enqueue a new receive buffer onto the transport queue into which a NTB 1979 * payload can be received into. 1980 * 1981 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1982 */ 1983 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1984 unsigned int len) 1985 { 1986 struct ntb_queue_entry *entry; 1987 1988 if (!qp) 1989 return -EINVAL; 1990 1991 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); 1992 if (!entry) 1993 return -ENOMEM; 1994 1995 entry->cb_data = cb; 1996 entry->buf = data; 1997 entry->len = len; 1998 entry->flags = 0; 1999 entry->retries = 0; 2000 entry->errors = 0; 2001 entry->rx_index = 0; 2002 2003 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 2004 2005 if (qp->active) 2006 tasklet_schedule(&qp->rxc_db_work); 2007 2008 return 0; 2009 } 2010 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 2011 2012 /** 2013 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 2014 * @qp: NTB transport layer queue the entry is to be enqueued on 2015 * @cb: per buffer pointer for callback function to use 2016 * @data: pointer to data buffer that will be sent 2017 * @len: length of the data buffer 2018 * 2019 * Enqueue a new transmit buffer onto the transport queue from which a NTB 2020 * payload will be transmitted. This assumes that a lock is being held to 2021 * serialize access to the qp. 2022 * 2023 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2024 */ 2025 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 2026 unsigned int len) 2027 { 2028 struct ntb_queue_entry *entry; 2029 int rc; 2030 2031 if (!qp || !qp->link_is_up || !len) 2032 return -EINVAL; 2033 2034 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 2035 if (!entry) { 2036 qp->tx_err_no_buf++; 2037 return -EBUSY; 2038 } 2039 2040 entry->cb_data = cb; 2041 entry->buf = data; 2042 entry->len = len; 2043 entry->flags = 0; 2044 entry->errors = 0; 2045 entry->retries = 0; 2046 entry->tx_index = 0; 2047 2048 rc = ntb_process_tx(qp, entry); 2049 if (rc) 2050 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 2051 &qp->tx_free_q); 2052 2053 return rc; 2054 } 2055 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 2056 2057 /** 2058 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 2059 * @qp: NTB transport layer queue to be enabled 2060 * 2061 * Notify NTB transport layer of client readiness to use queue 2062 */ 2063 void ntb_transport_link_up(struct ntb_transport_qp *qp) 2064 { 2065 if (!qp) 2066 return; 2067 2068 qp->client_ready = true; 2069 2070 if (qp->transport->link_is_up) 2071 schedule_delayed_work(&qp->link_work, 0); 2072 } 2073 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 2074 2075 /** 2076 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 2077 * @qp: NTB transport layer queue to be disabled 2078 * 2079 * Notify NTB transport layer of client's desire to no longer receive data on 2080 * transport queue specified. It is the client's responsibility to ensure all 2081 * entries on queue are purged or otherwise handled appropriately. 2082 */ 2083 void ntb_transport_link_down(struct ntb_transport_qp *qp) 2084 { 2085 int val; 2086 2087 if (!qp) 2088 return; 2089 2090 qp->client_ready = false; 2091 2092 val = ntb_spad_read(qp->ndev, QP_LINKS); 2093 2094 ntb_peer_spad_write(qp->ndev, QP_LINKS, 2095 val & ~BIT(qp->qp_num)); 2096 2097 if (qp->link_is_up) 2098 ntb_send_link_down(qp); 2099 else 2100 cancel_delayed_work_sync(&qp->link_work); 2101 } 2102 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 2103 2104 /** 2105 * ntb_transport_link_query - Query transport link state 2106 * @qp: NTB transport layer queue to be queried 2107 * 2108 * Query connectivity to the remote system of the NTB transport queue 2109 * 2110 * RETURNS: true for link up or false for link down 2111 */ 2112 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 2113 { 2114 if (!qp) 2115 return false; 2116 2117 return qp->link_is_up; 2118 } 2119 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 2120 2121 /** 2122 * ntb_transport_qp_num - Query the qp number 2123 * @qp: NTB transport layer queue to be queried 2124 * 2125 * Query qp number of the NTB transport queue 2126 * 2127 * RETURNS: a zero based number specifying the qp number 2128 */ 2129 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 2130 { 2131 if (!qp) 2132 return 0; 2133 2134 return qp->qp_num; 2135 } 2136 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 2137 2138 /** 2139 * ntb_transport_max_size - Query the max payload size of a qp 2140 * @qp: NTB transport layer queue to be queried 2141 * 2142 * Query the maximum payload size permissible on the given qp 2143 * 2144 * RETURNS: the max payload size of a qp 2145 */ 2146 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 2147 { 2148 unsigned int max_size; 2149 unsigned int copy_align; 2150 struct dma_chan *rx_chan, *tx_chan; 2151 2152 if (!qp) 2153 return 0; 2154 2155 rx_chan = qp->rx_dma_chan; 2156 tx_chan = qp->tx_dma_chan; 2157 2158 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0, 2159 tx_chan ? tx_chan->device->copy_align : 0); 2160 2161 /* If DMA engine usage is possible, try to find the max size for that */ 2162 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); 2163 max_size = round_down(max_size, 1 << copy_align); 2164 2165 return max_size; 2166 } 2167 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 2168 2169 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) 2170 { 2171 unsigned int head = qp->tx_index; 2172 unsigned int tail = qp->remote_rx_info->entry; 2173 2174 return tail > head ? tail - head : qp->tx_max_entry + tail - head; 2175 } 2176 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry); 2177 2178 static void ntb_transport_doorbell_callback(void *data, int vector) 2179 { 2180 struct ntb_transport_ctx *nt = data; 2181 struct ntb_transport_qp *qp; 2182 u64 db_bits; 2183 unsigned int qp_num; 2184 2185 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 2186 ntb_db_vector_mask(nt->ndev, vector)); 2187 2188 while (db_bits) { 2189 qp_num = __ffs(db_bits); 2190 qp = &nt->qp_vec[qp_num]; 2191 2192 if (qp->active) 2193 tasklet_schedule(&qp->rxc_db_work); 2194 2195 db_bits &= ~BIT_ULL(qp_num); 2196 } 2197 } 2198 2199 static const struct ntb_ctx_ops ntb_transport_ops = { 2200 .link_event = ntb_transport_event_callback, 2201 .db_event = ntb_transport_doorbell_callback, 2202 }; 2203 2204 static struct ntb_client ntb_transport_client = { 2205 .ops = { 2206 .probe = ntb_transport_probe, 2207 .remove = ntb_transport_free, 2208 }, 2209 }; 2210 2211 static int __init ntb_transport_init(void) 2212 { 2213 int rc; 2214 2215 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER); 2216 2217 if (debugfs_initialized()) 2218 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 2219 2220 rc = bus_register(&ntb_transport_bus); 2221 if (rc) 2222 goto err_bus; 2223 2224 rc = ntb_register_client(&ntb_transport_client); 2225 if (rc) 2226 goto err_client; 2227 2228 return 0; 2229 2230 err_client: 2231 bus_unregister(&ntb_transport_bus); 2232 err_bus: 2233 debugfs_remove_recursive(nt_debugfs_dir); 2234 return rc; 2235 } 2236 module_init(ntb_transport_init); 2237 2238 static void __exit ntb_transport_exit(void) 2239 { 2240 ntb_unregister_client(&ntb_transport_client); 2241 bus_unregister(&ntb_transport_bus); 2242 debugfs_remove_recursive(nt_debugfs_dir); 2243 } 2244 module_exit(ntb_transport_exit); 2245