1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Transport Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/debugfs.h> 51 #include <linux/delay.h> 52 #include <linux/dmaengine.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/errno.h> 55 #include <linux/export.h> 56 #include <linux/interrupt.h> 57 #include <linux/module.h> 58 #include <linux/pci.h> 59 #include <linux/slab.h> 60 #include <linux/types.h> 61 #include <linux/uaccess.h> 62 #include "linux/ntb.h" 63 #include "linux/ntb_transport.h" 64 65 #define NTB_TRANSPORT_VERSION 4 66 #define NTB_TRANSPORT_VER "4" 67 #define NTB_TRANSPORT_NAME "ntb_transport" 68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" 69 70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); 71 MODULE_VERSION(NTB_TRANSPORT_VER); 72 MODULE_LICENSE("Dual BSD/GPL"); 73 MODULE_AUTHOR("Intel Corporation"); 74 75 static unsigned long max_mw_size; 76 module_param(max_mw_size, ulong, 0644); 77 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); 78 79 static unsigned int transport_mtu = 0x10000; 80 module_param(transport_mtu, uint, 0644); 81 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 82 83 static unsigned char max_num_clients; 84 module_param(max_num_clients, byte, 0644); 85 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 86 87 static unsigned int copy_bytes = 1024; 88 module_param(copy_bytes, uint, 0644); 89 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 90 91 static bool use_dma; 92 module_param(use_dma, bool, 0644); 93 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); 94 95 static struct dentry *nt_debugfs_dir; 96 97 struct ntb_queue_entry { 98 /* ntb_queue list reference */ 99 struct list_head entry; 100 /* pointers to data to be transferred */ 101 void *cb_data; 102 void *buf; 103 unsigned int len; 104 unsigned int flags; 105 int retries; 106 int errors; 107 unsigned int tx_index; 108 109 struct ntb_transport_qp *qp; 110 union { 111 struct ntb_payload_header __iomem *tx_hdr; 112 struct ntb_payload_header *rx_hdr; 113 }; 114 unsigned int index; 115 }; 116 117 struct ntb_rx_info { 118 unsigned int entry; 119 }; 120 121 struct ntb_transport_qp { 122 struct ntb_transport_ctx *transport; 123 struct ntb_dev *ndev; 124 void *cb_data; 125 struct dma_chan *tx_dma_chan; 126 struct dma_chan *rx_dma_chan; 127 128 bool client_ready; 129 bool link_is_up; 130 bool active; 131 132 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 133 u64 qp_bit; 134 135 struct ntb_rx_info __iomem *rx_info; 136 struct ntb_rx_info *remote_rx_info; 137 138 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 139 void *data, int len); 140 struct list_head tx_free_q; 141 spinlock_t ntb_tx_free_q_lock; 142 void __iomem *tx_mw; 143 dma_addr_t tx_mw_phys; 144 unsigned int tx_index; 145 unsigned int tx_max_entry; 146 unsigned int tx_max_frame; 147 148 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 149 void *data, int len); 150 struct list_head rx_post_q; 151 struct list_head rx_pend_q; 152 struct list_head rx_free_q; 153 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 154 spinlock_t ntb_rx_q_lock; 155 void *rx_buff; 156 unsigned int rx_index; 157 unsigned int rx_max_entry; 158 unsigned int rx_max_frame; 159 unsigned int rx_alloc_entry; 160 dma_cookie_t last_cookie; 161 struct tasklet_struct rxc_db_work; 162 163 void (*event_handler)(void *data, int status); 164 struct delayed_work link_work; 165 struct work_struct link_cleanup; 166 167 struct dentry *debugfs_dir; 168 struct dentry *debugfs_stats; 169 170 /* Stats */ 171 u64 rx_bytes; 172 u64 rx_pkts; 173 u64 rx_ring_empty; 174 u64 rx_err_no_buf; 175 u64 rx_err_oflow; 176 u64 rx_err_ver; 177 u64 rx_memcpy; 178 u64 rx_async; 179 u64 dma_rx_prep_err; 180 u64 tx_bytes; 181 u64 tx_pkts; 182 u64 tx_ring_full; 183 u64 tx_err_no_buf; 184 u64 tx_memcpy; 185 u64 tx_async; 186 u64 dma_tx_prep_err; 187 }; 188 189 struct ntb_transport_mw { 190 phys_addr_t phys_addr; 191 resource_size_t phys_size; 192 resource_size_t xlat_align; 193 resource_size_t xlat_align_size; 194 void __iomem *vbase; 195 size_t xlat_size; 196 size_t buff_size; 197 void *virt_addr; 198 dma_addr_t dma_addr; 199 }; 200 201 struct ntb_transport_client_dev { 202 struct list_head entry; 203 struct ntb_transport_ctx *nt; 204 struct device dev; 205 }; 206 207 struct ntb_transport_ctx { 208 struct list_head entry; 209 struct list_head client_devs; 210 211 struct ntb_dev *ndev; 212 213 struct ntb_transport_mw *mw_vec; 214 struct ntb_transport_qp *qp_vec; 215 unsigned int mw_count; 216 unsigned int qp_count; 217 u64 qp_bitmap; 218 u64 qp_bitmap_free; 219 220 bool link_is_up; 221 struct delayed_work link_work; 222 struct work_struct link_cleanup; 223 224 struct dentry *debugfs_node_dir; 225 }; 226 227 enum { 228 DESC_DONE_FLAG = BIT(0), 229 LINK_DOWN_FLAG = BIT(1), 230 }; 231 232 struct ntb_payload_header { 233 unsigned int ver; 234 unsigned int len; 235 unsigned int flags; 236 }; 237 238 enum { 239 VERSION = 0, 240 QP_LINKS, 241 NUM_QPS, 242 NUM_MWS, 243 MW0_SZ_HIGH, 244 MW0_SZ_LOW, 245 MW1_SZ_HIGH, 246 MW1_SZ_LOW, 247 MAX_SPAD, 248 }; 249 250 #define dev_client_dev(__dev) \ 251 container_of((__dev), struct ntb_transport_client_dev, dev) 252 253 #define drv_client(__drv) \ 254 container_of((__drv), struct ntb_transport_client, driver) 255 256 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 257 #define NTB_QP_DEF_NUM_ENTRIES 100 258 #define NTB_LINK_DOWN_TIMEOUT 10 259 #define DMA_RETRIES 20 260 #define DMA_OUT_RESOURCE_TO 50 261 262 static void ntb_transport_rxc_db(unsigned long data); 263 static const struct ntb_ctx_ops ntb_transport_ops; 264 static struct ntb_client ntb_transport_client; 265 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 266 struct ntb_queue_entry *entry); 267 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); 268 269 static int ntb_transport_bus_match(struct device *dev, 270 struct device_driver *drv) 271 { 272 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 273 } 274 275 static int ntb_transport_bus_probe(struct device *dev) 276 { 277 const struct ntb_transport_client *client; 278 int rc = -EINVAL; 279 280 get_device(dev); 281 282 client = drv_client(dev->driver); 283 rc = client->probe(dev); 284 if (rc) 285 put_device(dev); 286 287 return rc; 288 } 289 290 static int ntb_transport_bus_remove(struct device *dev) 291 { 292 const struct ntb_transport_client *client; 293 294 client = drv_client(dev->driver); 295 client->remove(dev); 296 297 put_device(dev); 298 299 return 0; 300 } 301 302 static struct bus_type ntb_transport_bus = { 303 .name = "ntb_transport", 304 .match = ntb_transport_bus_match, 305 .probe = ntb_transport_bus_probe, 306 .remove = ntb_transport_bus_remove, 307 }; 308 309 static LIST_HEAD(ntb_transport_list); 310 311 static int ntb_bus_init(struct ntb_transport_ctx *nt) 312 { 313 list_add_tail(&nt->entry, &ntb_transport_list); 314 return 0; 315 } 316 317 static void ntb_bus_remove(struct ntb_transport_ctx *nt) 318 { 319 struct ntb_transport_client_dev *client_dev, *cd; 320 321 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 322 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 323 dev_name(&client_dev->dev)); 324 list_del(&client_dev->entry); 325 device_unregister(&client_dev->dev); 326 } 327 328 list_del(&nt->entry); 329 } 330 331 static void ntb_transport_client_release(struct device *dev) 332 { 333 struct ntb_transport_client_dev *client_dev; 334 335 client_dev = dev_client_dev(dev); 336 kfree(client_dev); 337 } 338 339 /** 340 * ntb_transport_unregister_client_dev - Unregister NTB client device 341 * @device_name: Name of NTB client device 342 * 343 * Unregister an NTB client device with the NTB transport layer 344 */ 345 void ntb_transport_unregister_client_dev(char *device_name) 346 { 347 struct ntb_transport_client_dev *client, *cd; 348 struct ntb_transport_ctx *nt; 349 350 list_for_each_entry(nt, &ntb_transport_list, entry) 351 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 352 if (!strncmp(dev_name(&client->dev), device_name, 353 strlen(device_name))) { 354 list_del(&client->entry); 355 device_unregister(&client->dev); 356 } 357 } 358 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); 359 360 /** 361 * ntb_transport_register_client_dev - Register NTB client device 362 * @device_name: Name of NTB client device 363 * 364 * Register an NTB client device with the NTB transport layer 365 */ 366 int ntb_transport_register_client_dev(char *device_name) 367 { 368 struct ntb_transport_client_dev *client_dev; 369 struct ntb_transport_ctx *nt; 370 int node; 371 int rc, i = 0; 372 373 if (list_empty(&ntb_transport_list)) 374 return -ENODEV; 375 376 list_for_each_entry(nt, &ntb_transport_list, entry) { 377 struct device *dev; 378 379 node = dev_to_node(&nt->ndev->dev); 380 381 client_dev = kzalloc_node(sizeof(*client_dev), 382 GFP_KERNEL, node); 383 if (!client_dev) { 384 rc = -ENOMEM; 385 goto err; 386 } 387 388 dev = &client_dev->dev; 389 390 /* setup and register client devices */ 391 dev_set_name(dev, "%s%d", device_name, i); 392 dev->bus = &ntb_transport_bus; 393 dev->release = ntb_transport_client_release; 394 dev->parent = &nt->ndev->dev; 395 396 rc = device_register(dev); 397 if (rc) { 398 kfree(client_dev); 399 goto err; 400 } 401 402 list_add_tail(&client_dev->entry, &nt->client_devs); 403 i++; 404 } 405 406 return 0; 407 408 err: 409 ntb_transport_unregister_client_dev(device_name); 410 411 return rc; 412 } 413 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); 414 415 /** 416 * ntb_transport_register_client - Register NTB client driver 417 * @drv: NTB client driver to be registered 418 * 419 * Register an NTB client driver with the NTB transport layer 420 * 421 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 422 */ 423 int ntb_transport_register_client(struct ntb_transport_client *drv) 424 { 425 drv->driver.bus = &ntb_transport_bus; 426 427 if (list_empty(&ntb_transport_list)) 428 return -ENODEV; 429 430 return driver_register(&drv->driver); 431 } 432 EXPORT_SYMBOL_GPL(ntb_transport_register_client); 433 434 /** 435 * ntb_transport_unregister_client - Unregister NTB client driver 436 * @drv: NTB client driver to be unregistered 437 * 438 * Unregister an NTB client driver with the NTB transport layer 439 * 440 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 441 */ 442 void ntb_transport_unregister_client(struct ntb_transport_client *drv) 443 { 444 driver_unregister(&drv->driver); 445 } 446 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); 447 448 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 449 loff_t *offp) 450 { 451 struct ntb_transport_qp *qp; 452 char *buf; 453 ssize_t ret, out_offset, out_count; 454 455 qp = filp->private_data; 456 457 if (!qp || !qp->link_is_up) 458 return 0; 459 460 out_count = 1000; 461 462 buf = kmalloc(out_count, GFP_KERNEL); 463 if (!buf) 464 return -ENOMEM; 465 466 out_offset = 0; 467 out_offset += snprintf(buf + out_offset, out_count - out_offset, 468 "\nNTB QP stats:\n\n"); 469 out_offset += snprintf(buf + out_offset, out_count - out_offset, 470 "rx_bytes - \t%llu\n", qp->rx_bytes); 471 out_offset += snprintf(buf + out_offset, out_count - out_offset, 472 "rx_pkts - \t%llu\n", qp->rx_pkts); 473 out_offset += snprintf(buf + out_offset, out_count - out_offset, 474 "rx_memcpy - \t%llu\n", qp->rx_memcpy); 475 out_offset += snprintf(buf + out_offset, out_count - out_offset, 476 "rx_async - \t%llu\n", qp->rx_async); 477 out_offset += snprintf(buf + out_offset, out_count - out_offset, 478 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 479 out_offset += snprintf(buf + out_offset, out_count - out_offset, 480 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 481 out_offset += snprintf(buf + out_offset, out_count - out_offset, 482 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 483 out_offset += snprintf(buf + out_offset, out_count - out_offset, 484 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 485 out_offset += snprintf(buf + out_offset, out_count - out_offset, 486 "rx_buff - \t0x%p\n", qp->rx_buff); 487 out_offset += snprintf(buf + out_offset, out_count - out_offset, 488 "rx_index - \t%u\n", qp->rx_index); 489 out_offset += snprintf(buf + out_offset, out_count - out_offset, 490 "rx_max_entry - \t%u\n", qp->rx_max_entry); 491 out_offset += snprintf(buf + out_offset, out_count - out_offset, 492 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry); 493 494 out_offset += snprintf(buf + out_offset, out_count - out_offset, 495 "tx_bytes - \t%llu\n", qp->tx_bytes); 496 out_offset += snprintf(buf + out_offset, out_count - out_offset, 497 "tx_pkts - \t%llu\n", qp->tx_pkts); 498 out_offset += snprintf(buf + out_offset, out_count - out_offset, 499 "tx_memcpy - \t%llu\n", qp->tx_memcpy); 500 out_offset += snprintf(buf + out_offset, out_count - out_offset, 501 "tx_async - \t%llu\n", qp->tx_async); 502 out_offset += snprintf(buf + out_offset, out_count - out_offset, 503 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 504 out_offset += snprintf(buf + out_offset, out_count - out_offset, 505 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 506 out_offset += snprintf(buf + out_offset, out_count - out_offset, 507 "tx_mw - \t0x%p\n", qp->tx_mw); 508 out_offset += snprintf(buf + out_offset, out_count - out_offset, 509 "tx_index (H) - \t%u\n", qp->tx_index); 510 out_offset += snprintf(buf + out_offset, out_count - out_offset, 511 "RRI (T) - \t%u\n", 512 qp->remote_rx_info->entry); 513 out_offset += snprintf(buf + out_offset, out_count - out_offset, 514 "tx_max_entry - \t%u\n", qp->tx_max_entry); 515 out_offset += snprintf(buf + out_offset, out_count - out_offset, 516 "free tx - \t%u\n", 517 ntb_transport_tx_free_entry(qp)); 518 out_offset += snprintf(buf + out_offset, out_count - out_offset, 519 "DMA tx prep err - \t%llu\n", 520 qp->dma_tx_prep_err); 521 out_offset += snprintf(buf + out_offset, out_count - out_offset, 522 "DMA rx prep err - \t%llu\n", 523 qp->dma_rx_prep_err); 524 525 out_offset += snprintf(buf + out_offset, out_count - out_offset, 526 "\n"); 527 out_offset += snprintf(buf + out_offset, out_count - out_offset, 528 "Using TX DMA - \t%s\n", 529 qp->tx_dma_chan ? "Yes" : "No"); 530 out_offset += snprintf(buf + out_offset, out_count - out_offset, 531 "Using RX DMA - \t%s\n", 532 qp->rx_dma_chan ? "Yes" : "No"); 533 out_offset += snprintf(buf + out_offset, out_count - out_offset, 534 "QP Link - \t%s\n", 535 qp->link_is_up ? "Up" : "Down"); 536 out_offset += snprintf(buf + out_offset, out_count - out_offset, 537 "\n"); 538 539 if (out_offset > out_count) 540 out_offset = out_count; 541 542 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 543 kfree(buf); 544 return ret; 545 } 546 547 static const struct file_operations ntb_qp_debugfs_stats = { 548 .owner = THIS_MODULE, 549 .open = simple_open, 550 .read = debugfs_read, 551 }; 552 553 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 554 struct list_head *list) 555 { 556 unsigned long flags; 557 558 spin_lock_irqsave(lock, flags); 559 list_add_tail(entry, list); 560 spin_unlock_irqrestore(lock, flags); 561 } 562 563 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 564 struct list_head *list) 565 { 566 struct ntb_queue_entry *entry; 567 unsigned long flags; 568 569 spin_lock_irqsave(lock, flags); 570 if (list_empty(list)) { 571 entry = NULL; 572 goto out; 573 } 574 entry = list_first_entry(list, struct ntb_queue_entry, entry); 575 list_del(&entry->entry); 576 577 out: 578 spin_unlock_irqrestore(lock, flags); 579 580 return entry; 581 } 582 583 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, 584 struct list_head *list, 585 struct list_head *to_list) 586 { 587 struct ntb_queue_entry *entry; 588 unsigned long flags; 589 590 spin_lock_irqsave(lock, flags); 591 592 if (list_empty(list)) { 593 entry = NULL; 594 } else { 595 entry = list_first_entry(list, struct ntb_queue_entry, entry); 596 list_move_tail(&entry->entry, to_list); 597 } 598 599 spin_unlock_irqrestore(lock, flags); 600 601 return entry; 602 } 603 604 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 605 unsigned int qp_num) 606 { 607 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 608 struct ntb_transport_mw *mw; 609 struct ntb_dev *ndev = nt->ndev; 610 struct ntb_queue_entry *entry; 611 unsigned int rx_size, num_qps_mw; 612 unsigned int mw_num, mw_count, qp_count; 613 unsigned int i; 614 int node; 615 616 mw_count = nt->mw_count; 617 qp_count = nt->qp_count; 618 619 mw_num = QP_TO_MW(nt, qp_num); 620 mw = &nt->mw_vec[mw_num]; 621 622 if (!mw->virt_addr) 623 return -ENOMEM; 624 625 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 626 num_qps_mw = qp_count / mw_count + 1; 627 else 628 num_qps_mw = qp_count / mw_count; 629 630 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; 631 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); 632 rx_size -= sizeof(struct ntb_rx_info); 633 634 qp->remote_rx_info = qp->rx_buff + rx_size; 635 636 /* Due to housekeeping, there must be atleast 2 buffs */ 637 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 638 qp->rx_max_entry = rx_size / qp->rx_max_frame; 639 qp->rx_index = 0; 640 641 /* 642 * Checking to see if we have more entries than the default. 643 * We should add additional entries if that is the case so we 644 * can be in sync with the transport frames. 645 */ 646 node = dev_to_node(&ndev->dev); 647 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) { 648 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 649 if (!entry) 650 return -ENOMEM; 651 652 entry->qp = qp; 653 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 654 &qp->rx_free_q); 655 qp->rx_alloc_entry++; 656 } 657 658 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 659 660 /* setup the hdr offsets with 0's */ 661 for (i = 0; i < qp->rx_max_entry; i++) { 662 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - 663 sizeof(struct ntb_payload_header)); 664 memset(offset, 0, sizeof(struct ntb_payload_header)); 665 } 666 667 qp->rx_pkts = 0; 668 qp->tx_pkts = 0; 669 qp->tx_index = 0; 670 671 return 0; 672 } 673 674 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 675 { 676 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 677 struct pci_dev *pdev = nt->ndev->pdev; 678 679 if (!mw->virt_addr) 680 return; 681 682 ntb_mw_clear_trans(nt->ndev, num_mw); 683 dma_free_coherent(&pdev->dev, mw->buff_size, 684 mw->virt_addr, mw->dma_addr); 685 mw->xlat_size = 0; 686 mw->buff_size = 0; 687 mw->virt_addr = NULL; 688 } 689 690 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 691 resource_size_t size) 692 { 693 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 694 struct pci_dev *pdev = nt->ndev->pdev; 695 size_t xlat_size, buff_size; 696 int rc; 697 698 if (!size) 699 return -EINVAL; 700 701 xlat_size = round_up(size, mw->xlat_align_size); 702 buff_size = round_up(size, mw->xlat_align); 703 704 /* No need to re-setup */ 705 if (mw->xlat_size == xlat_size) 706 return 0; 707 708 if (mw->buff_size) 709 ntb_free_mw(nt, num_mw); 710 711 /* Alloc memory for receiving data. Must be aligned */ 712 mw->xlat_size = xlat_size; 713 mw->buff_size = buff_size; 714 715 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size, 716 &mw->dma_addr, GFP_KERNEL); 717 if (!mw->virt_addr) { 718 mw->xlat_size = 0; 719 mw->buff_size = 0; 720 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", 721 buff_size); 722 return -ENOMEM; 723 } 724 725 /* 726 * we must ensure that the memory address allocated is BAR size 727 * aligned in order for the XLAT register to take the value. This 728 * is a requirement of the hardware. It is recommended to setup CMA 729 * for BAR sizes equal or greater than 4MB. 730 */ 731 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { 732 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", 733 &mw->dma_addr); 734 ntb_free_mw(nt, num_mw); 735 return -ENOMEM; 736 } 737 738 /* Notify HW the memory location of the receive buffer */ 739 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size); 740 if (rc) { 741 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); 742 ntb_free_mw(nt, num_mw); 743 return -EIO; 744 } 745 746 return 0; 747 } 748 749 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 750 { 751 qp->link_is_up = false; 752 qp->active = false; 753 754 qp->tx_index = 0; 755 qp->rx_index = 0; 756 qp->rx_bytes = 0; 757 qp->rx_pkts = 0; 758 qp->rx_ring_empty = 0; 759 qp->rx_err_no_buf = 0; 760 qp->rx_err_oflow = 0; 761 qp->rx_err_ver = 0; 762 qp->rx_memcpy = 0; 763 qp->rx_async = 0; 764 qp->tx_bytes = 0; 765 qp->tx_pkts = 0; 766 qp->tx_ring_full = 0; 767 qp->tx_err_no_buf = 0; 768 qp->tx_memcpy = 0; 769 qp->tx_async = 0; 770 qp->dma_tx_prep_err = 0; 771 qp->dma_rx_prep_err = 0; 772 } 773 774 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 775 { 776 struct ntb_transport_ctx *nt = qp->transport; 777 struct pci_dev *pdev = nt->ndev->pdev; 778 779 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); 780 781 cancel_delayed_work_sync(&qp->link_work); 782 ntb_qp_link_down_reset(qp); 783 784 if (qp->event_handler) 785 qp->event_handler(qp->cb_data, qp->link_is_up); 786 } 787 788 static void ntb_qp_link_cleanup_work(struct work_struct *work) 789 { 790 struct ntb_transport_qp *qp = container_of(work, 791 struct ntb_transport_qp, 792 link_cleanup); 793 struct ntb_transport_ctx *nt = qp->transport; 794 795 ntb_qp_link_cleanup(qp); 796 797 if (nt->link_is_up) 798 schedule_delayed_work(&qp->link_work, 799 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 800 } 801 802 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 803 { 804 schedule_work(&qp->link_cleanup); 805 } 806 807 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 808 { 809 struct ntb_transport_qp *qp; 810 u64 qp_bitmap_alloc; 811 int i; 812 813 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 814 815 /* Pass along the info to any clients */ 816 for (i = 0; i < nt->qp_count; i++) 817 if (qp_bitmap_alloc & BIT_ULL(i)) { 818 qp = &nt->qp_vec[i]; 819 ntb_qp_link_cleanup(qp); 820 cancel_work_sync(&qp->link_cleanup); 821 cancel_delayed_work_sync(&qp->link_work); 822 } 823 824 if (!nt->link_is_up) 825 cancel_delayed_work_sync(&nt->link_work); 826 827 /* The scratchpad registers keep the values if the remote side 828 * goes down, blast them now to give them a sane value the next 829 * time they are accessed 830 */ 831 for (i = 0; i < MAX_SPAD; i++) 832 ntb_spad_write(nt->ndev, i, 0); 833 } 834 835 static void ntb_transport_link_cleanup_work(struct work_struct *work) 836 { 837 struct ntb_transport_ctx *nt = 838 container_of(work, struct ntb_transport_ctx, link_cleanup); 839 840 ntb_transport_link_cleanup(nt); 841 } 842 843 static void ntb_transport_event_callback(void *data) 844 { 845 struct ntb_transport_ctx *nt = data; 846 847 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) 848 schedule_delayed_work(&nt->link_work, 0); 849 else 850 schedule_work(&nt->link_cleanup); 851 } 852 853 static void ntb_transport_link_work(struct work_struct *work) 854 { 855 struct ntb_transport_ctx *nt = 856 container_of(work, struct ntb_transport_ctx, link_work.work); 857 struct ntb_dev *ndev = nt->ndev; 858 struct pci_dev *pdev = ndev->pdev; 859 resource_size_t size; 860 u32 val; 861 int rc = 0, i, spad; 862 863 /* send the local info, in the opposite order of the way we read it */ 864 for (i = 0; i < nt->mw_count; i++) { 865 size = nt->mw_vec[i].phys_size; 866 867 if (max_mw_size && size > max_mw_size) 868 size = max_mw_size; 869 870 spad = MW0_SZ_HIGH + (i * 2); 871 ntb_peer_spad_write(ndev, spad, upper_32_bits(size)); 872 873 spad = MW0_SZ_LOW + (i * 2); 874 ntb_peer_spad_write(ndev, spad, lower_32_bits(size)); 875 } 876 877 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count); 878 879 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count); 880 881 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION); 882 883 /* Query the remote side for its info */ 884 val = ntb_spad_read(ndev, VERSION); 885 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 886 if (val != NTB_TRANSPORT_VERSION) 887 goto out; 888 889 val = ntb_spad_read(ndev, NUM_QPS); 890 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 891 if (val != nt->qp_count) 892 goto out; 893 894 val = ntb_spad_read(ndev, NUM_MWS); 895 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 896 if (val != nt->mw_count) 897 goto out; 898 899 for (i = 0; i < nt->mw_count; i++) { 900 u64 val64; 901 902 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); 903 val64 = (u64)val << 32; 904 905 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); 906 val64 |= val; 907 908 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); 909 910 rc = ntb_set_mw(nt, i, val64); 911 if (rc) 912 goto out1; 913 } 914 915 nt->link_is_up = true; 916 917 for (i = 0; i < nt->qp_count; i++) { 918 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 919 920 ntb_transport_setup_qp_mw(nt, i); 921 922 if (qp->client_ready) 923 schedule_delayed_work(&qp->link_work, 0); 924 } 925 926 return; 927 928 out1: 929 for (i = 0; i < nt->mw_count; i++) 930 ntb_free_mw(nt, i); 931 932 /* if there's an actual failure, we should just bail */ 933 if (rc < 0) { 934 ntb_link_disable(ndev); 935 return; 936 } 937 938 out: 939 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 940 schedule_delayed_work(&nt->link_work, 941 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 942 } 943 944 static void ntb_qp_link_work(struct work_struct *work) 945 { 946 struct ntb_transport_qp *qp = container_of(work, 947 struct ntb_transport_qp, 948 link_work.work); 949 struct pci_dev *pdev = qp->ndev->pdev; 950 struct ntb_transport_ctx *nt = qp->transport; 951 int val; 952 953 WARN_ON(!nt->link_is_up); 954 955 val = ntb_spad_read(nt->ndev, QP_LINKS); 956 957 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); 958 959 /* query remote spad for qp ready bits */ 960 ntb_peer_spad_read(nt->ndev, QP_LINKS); 961 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); 962 963 /* See if the remote side is up */ 964 if (val & BIT(qp->qp_num)) { 965 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 966 qp->link_is_up = true; 967 qp->active = true; 968 969 if (qp->event_handler) 970 qp->event_handler(qp->cb_data, qp->link_is_up); 971 972 if (qp->active) 973 tasklet_schedule(&qp->rxc_db_work); 974 } else if (nt->link_is_up) 975 schedule_delayed_work(&qp->link_work, 976 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 977 } 978 979 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, 980 unsigned int qp_num) 981 { 982 struct ntb_transport_qp *qp; 983 phys_addr_t mw_base; 984 resource_size_t mw_size; 985 unsigned int num_qps_mw, tx_size; 986 unsigned int mw_num, mw_count, qp_count; 987 u64 qp_offset; 988 989 mw_count = nt->mw_count; 990 qp_count = nt->qp_count; 991 992 mw_num = QP_TO_MW(nt, qp_num); 993 994 qp = &nt->qp_vec[qp_num]; 995 qp->qp_num = qp_num; 996 qp->transport = nt; 997 qp->ndev = nt->ndev; 998 qp->client_ready = false; 999 qp->event_handler = NULL; 1000 ntb_qp_link_down_reset(qp); 1001 1002 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 1003 num_qps_mw = qp_count / mw_count + 1; 1004 else 1005 num_qps_mw = qp_count / mw_count; 1006 1007 mw_base = nt->mw_vec[mw_num].phys_addr; 1008 mw_size = nt->mw_vec[mw_num].phys_size; 1009 1010 tx_size = (unsigned int)mw_size / num_qps_mw; 1011 qp_offset = tx_size * (qp_num / mw_count); 1012 1013 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 1014 if (!qp->tx_mw) 1015 return -EINVAL; 1016 1017 qp->tx_mw_phys = mw_base + qp_offset; 1018 if (!qp->tx_mw_phys) 1019 return -EINVAL; 1020 1021 tx_size -= sizeof(struct ntb_rx_info); 1022 qp->rx_info = qp->tx_mw + tx_size; 1023 1024 /* Due to housekeeping, there must be atleast 2 buffs */ 1025 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 1026 qp->tx_max_entry = tx_size / qp->tx_max_frame; 1027 1028 if (nt->debugfs_node_dir) { 1029 char debugfs_name[4]; 1030 1031 snprintf(debugfs_name, 4, "qp%d", qp_num); 1032 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 1033 nt->debugfs_node_dir); 1034 1035 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 1036 qp->debugfs_dir, qp, 1037 &ntb_qp_debugfs_stats); 1038 } else { 1039 qp->debugfs_dir = NULL; 1040 qp->debugfs_stats = NULL; 1041 } 1042 1043 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 1044 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 1045 1046 spin_lock_init(&qp->ntb_rx_q_lock); 1047 spin_lock_init(&qp->ntb_tx_free_q_lock); 1048 1049 INIT_LIST_HEAD(&qp->rx_post_q); 1050 INIT_LIST_HEAD(&qp->rx_pend_q); 1051 INIT_LIST_HEAD(&qp->rx_free_q); 1052 INIT_LIST_HEAD(&qp->tx_free_q); 1053 1054 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, 1055 (unsigned long)qp); 1056 1057 return 0; 1058 } 1059 1060 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) 1061 { 1062 struct ntb_transport_ctx *nt; 1063 struct ntb_transport_mw *mw; 1064 unsigned int mw_count, qp_count; 1065 u64 qp_bitmap; 1066 int node; 1067 int rc, i; 1068 1069 mw_count = ntb_mw_count(ndev); 1070 if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) { 1071 dev_err(&ndev->dev, "Not enough scratch pad registers for %s", 1072 NTB_TRANSPORT_NAME); 1073 return -EIO; 1074 } 1075 1076 if (ntb_db_is_unsafe(ndev)) 1077 dev_dbg(&ndev->dev, 1078 "doorbell is unsafe, proceed anyway...\n"); 1079 if (ntb_spad_is_unsafe(ndev)) 1080 dev_dbg(&ndev->dev, 1081 "scratchpad is unsafe, proceed anyway...\n"); 1082 1083 node = dev_to_node(&ndev->dev); 1084 1085 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); 1086 if (!nt) 1087 return -ENOMEM; 1088 1089 nt->ndev = ndev; 1090 1091 nt->mw_count = mw_count; 1092 1093 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec), 1094 GFP_KERNEL, node); 1095 if (!nt->mw_vec) { 1096 rc = -ENOMEM; 1097 goto err; 1098 } 1099 1100 for (i = 0; i < mw_count; i++) { 1101 mw = &nt->mw_vec[i]; 1102 1103 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size, 1104 &mw->xlat_align, &mw->xlat_align_size); 1105 if (rc) 1106 goto err1; 1107 1108 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); 1109 if (!mw->vbase) { 1110 rc = -ENOMEM; 1111 goto err1; 1112 } 1113 1114 mw->buff_size = 0; 1115 mw->xlat_size = 0; 1116 mw->virt_addr = NULL; 1117 mw->dma_addr = 0; 1118 } 1119 1120 qp_bitmap = ntb_db_valid_mask(ndev); 1121 1122 qp_count = ilog2(qp_bitmap); 1123 if (max_num_clients && max_num_clients < qp_count) 1124 qp_count = max_num_clients; 1125 else if (mw_count < qp_count) 1126 qp_count = mw_count; 1127 1128 qp_bitmap &= BIT_ULL(qp_count) - 1; 1129 1130 nt->qp_count = qp_count; 1131 nt->qp_bitmap = qp_bitmap; 1132 nt->qp_bitmap_free = qp_bitmap; 1133 1134 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec), 1135 GFP_KERNEL, node); 1136 if (!nt->qp_vec) { 1137 rc = -ENOMEM; 1138 goto err1; 1139 } 1140 1141 if (nt_debugfs_dir) { 1142 nt->debugfs_node_dir = 1143 debugfs_create_dir(pci_name(ndev->pdev), 1144 nt_debugfs_dir); 1145 } 1146 1147 for (i = 0; i < qp_count; i++) { 1148 rc = ntb_transport_init_queue(nt, i); 1149 if (rc) 1150 goto err2; 1151 } 1152 1153 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 1154 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); 1155 1156 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); 1157 if (rc) 1158 goto err2; 1159 1160 INIT_LIST_HEAD(&nt->client_devs); 1161 rc = ntb_bus_init(nt); 1162 if (rc) 1163 goto err3; 1164 1165 nt->link_is_up = false; 1166 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1167 ntb_link_event(ndev); 1168 1169 return 0; 1170 1171 err3: 1172 ntb_clear_ctx(ndev); 1173 err2: 1174 kfree(nt->qp_vec); 1175 err1: 1176 while (i--) { 1177 mw = &nt->mw_vec[i]; 1178 iounmap(mw->vbase); 1179 } 1180 kfree(nt->mw_vec); 1181 err: 1182 kfree(nt); 1183 return rc; 1184 } 1185 1186 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) 1187 { 1188 struct ntb_transport_ctx *nt = ndev->ctx; 1189 struct ntb_transport_qp *qp; 1190 u64 qp_bitmap_alloc; 1191 int i; 1192 1193 ntb_transport_link_cleanup(nt); 1194 cancel_work_sync(&nt->link_cleanup); 1195 cancel_delayed_work_sync(&nt->link_work); 1196 1197 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 1198 1199 /* verify that all the qp's are freed */ 1200 for (i = 0; i < nt->qp_count; i++) { 1201 qp = &nt->qp_vec[i]; 1202 if (qp_bitmap_alloc & BIT_ULL(i)) 1203 ntb_transport_free_queue(qp); 1204 debugfs_remove_recursive(qp->debugfs_dir); 1205 } 1206 1207 ntb_link_disable(ndev); 1208 ntb_clear_ctx(ndev); 1209 1210 ntb_bus_remove(nt); 1211 1212 for (i = nt->mw_count; i--; ) { 1213 ntb_free_mw(nt, i); 1214 iounmap(nt->mw_vec[i].vbase); 1215 } 1216 1217 kfree(nt->qp_vec); 1218 kfree(nt->mw_vec); 1219 kfree(nt); 1220 } 1221 1222 static void ntb_complete_rxc(struct ntb_transport_qp *qp) 1223 { 1224 struct ntb_queue_entry *entry; 1225 void *cb_data; 1226 unsigned int len; 1227 unsigned long irqflags; 1228 1229 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1230 1231 while (!list_empty(&qp->rx_post_q)) { 1232 entry = list_first_entry(&qp->rx_post_q, 1233 struct ntb_queue_entry, entry); 1234 if (!(entry->flags & DESC_DONE_FLAG)) 1235 break; 1236 1237 entry->rx_hdr->flags = 0; 1238 iowrite32(entry->index, &qp->rx_info->entry); 1239 1240 cb_data = entry->cb_data; 1241 len = entry->len; 1242 1243 list_move_tail(&entry->entry, &qp->rx_free_q); 1244 1245 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1246 1247 if (qp->rx_handler && qp->client_ready) 1248 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1249 1250 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1251 } 1252 1253 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1254 } 1255 1256 static void ntb_rx_copy_callback(void *data) 1257 { 1258 struct ntb_queue_entry *entry = data; 1259 1260 entry->flags |= DESC_DONE_FLAG; 1261 1262 ntb_complete_rxc(entry->qp); 1263 } 1264 1265 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1266 { 1267 void *buf = entry->buf; 1268 size_t len = entry->len; 1269 1270 memcpy(buf, offset, len); 1271 1272 /* Ensure that the data is fully copied out before clearing the flag */ 1273 wmb(); 1274 1275 ntb_rx_copy_callback(entry); 1276 } 1277 1278 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1279 { 1280 struct dma_async_tx_descriptor *txd; 1281 struct ntb_transport_qp *qp = entry->qp; 1282 struct dma_chan *chan = qp->rx_dma_chan; 1283 struct dma_device *device; 1284 size_t pay_off, buff_off, len; 1285 struct dmaengine_unmap_data *unmap; 1286 dma_cookie_t cookie; 1287 void *buf = entry->buf; 1288 int retries = 0; 1289 1290 len = entry->len; 1291 1292 if (!chan) 1293 goto err; 1294 1295 if (len < copy_bytes) 1296 goto err; 1297 1298 device = chan->device; 1299 pay_off = (size_t)offset & ~PAGE_MASK; 1300 buff_off = (size_t)buf & ~PAGE_MASK; 1301 1302 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1303 goto err; 1304 1305 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1306 if (!unmap) 1307 goto err; 1308 1309 unmap->len = len; 1310 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1311 pay_off, len, DMA_TO_DEVICE); 1312 if (dma_mapping_error(device->dev, unmap->addr[0])) 1313 goto err_get_unmap; 1314 1315 unmap->to_cnt = 1; 1316 1317 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1318 buff_off, len, DMA_FROM_DEVICE); 1319 if (dma_mapping_error(device->dev, unmap->addr[1])) 1320 goto err_get_unmap; 1321 1322 unmap->from_cnt = 1; 1323 1324 for (retries = 0; retries < DMA_RETRIES; retries++) { 1325 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1326 unmap->addr[0], len, 1327 DMA_PREP_INTERRUPT); 1328 if (txd) 1329 break; 1330 1331 set_current_state(TASK_INTERRUPTIBLE); 1332 schedule_timeout(DMA_OUT_RESOURCE_TO); 1333 } 1334 1335 if (!txd) { 1336 qp->dma_rx_prep_err++; 1337 goto err_get_unmap; 1338 } 1339 1340 txd->callback = ntb_rx_copy_callback; 1341 txd->callback_param = entry; 1342 dma_set_unmap(txd, unmap); 1343 1344 cookie = dmaengine_submit(txd); 1345 if (dma_submit_error(cookie)) 1346 goto err_set_unmap; 1347 1348 dmaengine_unmap_put(unmap); 1349 1350 qp->last_cookie = cookie; 1351 1352 qp->rx_async++; 1353 1354 return; 1355 1356 err_set_unmap: 1357 dmaengine_unmap_put(unmap); 1358 err_get_unmap: 1359 dmaengine_unmap_put(unmap); 1360 err: 1361 ntb_memcpy_rx(entry, offset); 1362 qp->rx_memcpy++; 1363 } 1364 1365 static int ntb_process_rxc(struct ntb_transport_qp *qp) 1366 { 1367 struct ntb_payload_header *hdr; 1368 struct ntb_queue_entry *entry; 1369 void *offset; 1370 1371 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1372 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1373 1374 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", 1375 qp->qp_num, hdr->ver, hdr->len, hdr->flags); 1376 1377 if (!(hdr->flags & DESC_DONE_FLAG)) { 1378 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); 1379 qp->rx_ring_empty++; 1380 return -EAGAIN; 1381 } 1382 1383 if (hdr->flags & LINK_DOWN_FLAG) { 1384 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); 1385 ntb_qp_link_down(qp); 1386 hdr->flags = 0; 1387 return -EAGAIN; 1388 } 1389 1390 if (hdr->ver != (u32)qp->rx_pkts) { 1391 dev_dbg(&qp->ndev->pdev->dev, 1392 "version mismatch, expected %llu - got %u\n", 1393 qp->rx_pkts, hdr->ver); 1394 qp->rx_err_ver++; 1395 return -EIO; 1396 } 1397 1398 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 1399 if (!entry) { 1400 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1401 qp->rx_err_no_buf++; 1402 return -EAGAIN; 1403 } 1404 1405 entry->rx_hdr = hdr; 1406 entry->index = qp->rx_index; 1407 1408 if (hdr->len > entry->len) { 1409 dev_dbg(&qp->ndev->pdev->dev, 1410 "receive buffer overflow! Wanted %d got %d\n", 1411 hdr->len, entry->len); 1412 qp->rx_err_oflow++; 1413 1414 entry->len = -EIO; 1415 entry->flags |= DESC_DONE_FLAG; 1416 1417 ntb_complete_rxc(qp); 1418 } else { 1419 dev_dbg(&qp->ndev->pdev->dev, 1420 "RX OK index %u ver %u size %d into buf size %d\n", 1421 qp->rx_index, hdr->ver, hdr->len, entry->len); 1422 1423 qp->rx_bytes += hdr->len; 1424 qp->rx_pkts++; 1425 1426 entry->len = hdr->len; 1427 1428 ntb_async_rx(entry, offset); 1429 } 1430 1431 qp->rx_index++; 1432 qp->rx_index %= qp->rx_max_entry; 1433 1434 return 0; 1435 } 1436 1437 static void ntb_transport_rxc_db(unsigned long data) 1438 { 1439 struct ntb_transport_qp *qp = (void *)data; 1440 int rc, i; 1441 1442 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", 1443 __func__, qp->qp_num); 1444 1445 /* Limit the number of packets processed in a single interrupt to 1446 * provide fairness to others 1447 */ 1448 for (i = 0; i < qp->rx_max_entry; i++) { 1449 rc = ntb_process_rxc(qp); 1450 if (rc) 1451 break; 1452 } 1453 1454 if (i && qp->rx_dma_chan) 1455 dma_async_issue_pending(qp->rx_dma_chan); 1456 1457 if (i == qp->rx_max_entry) { 1458 /* there is more work to do */ 1459 if (qp->active) 1460 tasklet_schedule(&qp->rxc_db_work); 1461 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1462 /* the doorbell bit is set: clear it */ 1463 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); 1464 /* ntb_db_read ensures ntb_db_clear write is committed */ 1465 ntb_db_read(qp->ndev); 1466 1467 /* an interrupt may have arrived between finishing 1468 * ntb_process_rxc and clearing the doorbell bit: 1469 * there might be some more work to do. 1470 */ 1471 if (qp->active) 1472 tasklet_schedule(&qp->rxc_db_work); 1473 } 1474 } 1475 1476 static void ntb_tx_copy_callback(void *data, 1477 const struct dmaengine_result *res) 1478 { 1479 struct ntb_queue_entry *entry = data; 1480 struct ntb_transport_qp *qp = entry->qp; 1481 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1482 1483 /* we need to check DMA results if we are using DMA */ 1484 if (res) { 1485 enum dmaengine_tx_result dma_err = res->result; 1486 1487 switch (dma_err) { 1488 case DMA_TRANS_READ_FAILED: 1489 case DMA_TRANS_WRITE_FAILED: 1490 entry->errors++; 1491 case DMA_TRANS_ABORTED: 1492 { 1493 void __iomem *offset = 1494 qp->tx_mw + qp->tx_max_frame * 1495 entry->tx_index; 1496 1497 /* resubmit via CPU */ 1498 ntb_memcpy_tx(entry, offset); 1499 qp->tx_memcpy++; 1500 return; 1501 } 1502 1503 case DMA_TRANS_NOERROR: 1504 default: 1505 break; 1506 } 1507 } 1508 1509 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1510 1511 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1512 1513 /* The entry length can only be zero if the packet is intended to be a 1514 * "link down" or similar. Since no payload is being sent in these 1515 * cases, there is nothing to add to the completion queue. 1516 */ 1517 if (entry->len > 0) { 1518 qp->tx_bytes += entry->len; 1519 1520 if (qp->tx_handler) 1521 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1522 entry->len); 1523 } 1524 1525 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1526 } 1527 1528 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) 1529 { 1530 #ifdef ARCH_HAS_NOCACHE_UACCESS 1531 /* 1532 * Using non-temporal mov to improve performance on non-cached 1533 * writes, even though we aren't actually copying from user space. 1534 */ 1535 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); 1536 #else 1537 memcpy_toio(offset, entry->buf, entry->len); 1538 #endif 1539 1540 /* Ensure that the data is fully copied out before setting the flags */ 1541 wmb(); 1542 1543 ntb_tx_copy_callback(entry, NULL); 1544 } 1545 1546 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 1547 struct ntb_queue_entry *entry) 1548 { 1549 struct dma_async_tx_descriptor *txd; 1550 struct dma_chan *chan = qp->tx_dma_chan; 1551 struct dma_device *device; 1552 size_t len = entry->len; 1553 void *buf = entry->buf; 1554 size_t dest_off, buff_off; 1555 struct dmaengine_unmap_data *unmap; 1556 dma_addr_t dest; 1557 dma_cookie_t cookie; 1558 int retries = 0; 1559 1560 device = chan->device; 1561 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; 1562 buff_off = (size_t)buf & ~PAGE_MASK; 1563 dest_off = (size_t)dest & ~PAGE_MASK; 1564 1565 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1566 goto err; 1567 1568 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); 1569 if (!unmap) 1570 goto err; 1571 1572 unmap->len = len; 1573 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), 1574 buff_off, len, DMA_TO_DEVICE); 1575 if (dma_mapping_error(device->dev, unmap->addr[0])) 1576 goto err_get_unmap; 1577 1578 unmap->to_cnt = 1; 1579 1580 for (retries = 0; retries < DMA_RETRIES; retries++) { 1581 txd = device->device_prep_dma_memcpy(chan, dest, 1582 unmap->addr[0], len, 1583 DMA_PREP_INTERRUPT); 1584 if (txd) 1585 break; 1586 1587 set_current_state(TASK_INTERRUPTIBLE); 1588 schedule_timeout(DMA_OUT_RESOURCE_TO); 1589 } 1590 1591 if (!txd) { 1592 qp->dma_tx_prep_err++; 1593 goto err_get_unmap; 1594 } 1595 1596 txd->callback_result = ntb_tx_copy_callback; 1597 txd->callback_param = entry; 1598 dma_set_unmap(txd, unmap); 1599 1600 cookie = dmaengine_submit(txd); 1601 if (dma_submit_error(cookie)) 1602 goto err_set_unmap; 1603 1604 dmaengine_unmap_put(unmap); 1605 1606 dma_async_issue_pending(chan); 1607 1608 return 0; 1609 err_set_unmap: 1610 dmaengine_unmap_put(unmap); 1611 err_get_unmap: 1612 dmaengine_unmap_put(unmap); 1613 err: 1614 return -ENXIO; 1615 } 1616 1617 static void ntb_async_tx(struct ntb_transport_qp *qp, 1618 struct ntb_queue_entry *entry) 1619 { 1620 struct ntb_payload_header __iomem *hdr; 1621 struct dma_chan *chan = qp->tx_dma_chan; 1622 void __iomem *offset; 1623 int res; 1624 1625 entry->tx_index = qp->tx_index; 1626 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index; 1627 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1628 entry->tx_hdr = hdr; 1629 1630 iowrite32(entry->len, &hdr->len); 1631 iowrite32((u32)qp->tx_pkts, &hdr->ver); 1632 1633 if (!chan) 1634 goto err; 1635 1636 if (entry->len < copy_bytes) 1637 goto err; 1638 1639 res = ntb_async_tx_submit(qp, entry); 1640 if (res < 0) 1641 goto err; 1642 1643 if (!entry->retries) 1644 qp->tx_async++; 1645 1646 return; 1647 1648 err: 1649 ntb_memcpy_tx(entry, offset); 1650 qp->tx_memcpy++; 1651 } 1652 1653 static int ntb_process_tx(struct ntb_transport_qp *qp, 1654 struct ntb_queue_entry *entry) 1655 { 1656 if (qp->tx_index == qp->remote_rx_info->entry) { 1657 qp->tx_ring_full++; 1658 return -EAGAIN; 1659 } 1660 1661 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1662 if (qp->tx_handler) 1663 qp->tx_handler(qp, qp->cb_data, NULL, -EIO); 1664 1665 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1666 &qp->tx_free_q); 1667 return 0; 1668 } 1669 1670 ntb_async_tx(qp, entry); 1671 1672 qp->tx_index++; 1673 qp->tx_index %= qp->tx_max_entry; 1674 1675 qp->tx_pkts++; 1676 1677 return 0; 1678 } 1679 1680 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1681 { 1682 struct pci_dev *pdev = qp->ndev->pdev; 1683 struct ntb_queue_entry *entry; 1684 int i, rc; 1685 1686 if (!qp->link_is_up) 1687 return; 1688 1689 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); 1690 1691 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1692 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1693 if (entry) 1694 break; 1695 msleep(100); 1696 } 1697 1698 if (!entry) 1699 return; 1700 1701 entry->cb_data = NULL; 1702 entry->buf = NULL; 1703 entry->len = 0; 1704 entry->flags = LINK_DOWN_FLAG; 1705 1706 rc = ntb_process_tx(qp, entry); 1707 if (rc) 1708 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1709 qp->qp_num); 1710 1711 ntb_qp_link_down_reset(qp); 1712 } 1713 1714 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) 1715 { 1716 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; 1717 } 1718 1719 /** 1720 * ntb_transport_create_queue - Create a new NTB transport layer queue 1721 * @rx_handler: receive callback function 1722 * @tx_handler: transmit callback function 1723 * @event_handler: event callback function 1724 * 1725 * Create a new NTB transport layer queue and provide the queue with a callback 1726 * routine for both transmit and receive. The receive callback routine will be 1727 * used to pass up data when the transport has received it on the queue. The 1728 * transmit callback routine will be called when the transport has completed the 1729 * transmission of the data on the queue and the data is ready to be freed. 1730 * 1731 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1732 */ 1733 struct ntb_transport_qp * 1734 ntb_transport_create_queue(void *data, struct device *client_dev, 1735 const struct ntb_queue_handlers *handlers) 1736 { 1737 struct ntb_dev *ndev; 1738 struct pci_dev *pdev; 1739 struct ntb_transport_ctx *nt; 1740 struct ntb_queue_entry *entry; 1741 struct ntb_transport_qp *qp; 1742 u64 qp_bit; 1743 unsigned int free_queue; 1744 dma_cap_mask_t dma_mask; 1745 int node; 1746 int i; 1747 1748 ndev = dev_ntb(client_dev->parent); 1749 pdev = ndev->pdev; 1750 nt = ndev->ctx; 1751 1752 node = dev_to_node(&ndev->dev); 1753 1754 free_queue = ffs(nt->qp_bitmap); 1755 if (!free_queue) 1756 goto err; 1757 1758 /* decrement free_queue to make it zero based */ 1759 free_queue--; 1760 1761 qp = &nt->qp_vec[free_queue]; 1762 qp_bit = BIT_ULL(qp->qp_num); 1763 1764 nt->qp_bitmap_free &= ~qp_bit; 1765 1766 qp->cb_data = data; 1767 qp->rx_handler = handlers->rx_handler; 1768 qp->tx_handler = handlers->tx_handler; 1769 qp->event_handler = handlers->event_handler; 1770 1771 dma_cap_zero(dma_mask); 1772 dma_cap_set(DMA_MEMCPY, dma_mask); 1773 1774 if (use_dma) { 1775 qp->tx_dma_chan = 1776 dma_request_channel(dma_mask, ntb_dma_filter_fn, 1777 (void *)(unsigned long)node); 1778 if (!qp->tx_dma_chan) 1779 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n"); 1780 1781 qp->rx_dma_chan = 1782 dma_request_channel(dma_mask, ntb_dma_filter_fn, 1783 (void *)(unsigned long)node); 1784 if (!qp->rx_dma_chan) 1785 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n"); 1786 } else { 1787 qp->tx_dma_chan = NULL; 1788 qp->rx_dma_chan = NULL; 1789 } 1790 1791 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", 1792 qp->tx_dma_chan ? "DMA" : "CPU"); 1793 1794 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n", 1795 qp->rx_dma_chan ? "DMA" : "CPU"); 1796 1797 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1798 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1799 if (!entry) 1800 goto err1; 1801 1802 entry->qp = qp; 1803 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 1804 &qp->rx_free_q); 1805 } 1806 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES; 1807 1808 for (i = 0; i < qp->tx_max_entry; i++) { 1809 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1810 if (!entry) 1811 goto err2; 1812 1813 entry->qp = qp; 1814 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1815 &qp->tx_free_q); 1816 } 1817 1818 ntb_db_clear(qp->ndev, qp_bit); 1819 ntb_db_clear_mask(qp->ndev, qp_bit); 1820 1821 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1822 1823 return qp; 1824 1825 err2: 1826 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1827 kfree(entry); 1828 err1: 1829 qp->rx_alloc_entry = 0; 1830 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1831 kfree(entry); 1832 if (qp->tx_dma_chan) 1833 dma_release_channel(qp->tx_dma_chan); 1834 if (qp->rx_dma_chan) 1835 dma_release_channel(qp->rx_dma_chan); 1836 nt->qp_bitmap_free |= qp_bit; 1837 err: 1838 return NULL; 1839 } 1840 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 1841 1842 /** 1843 * ntb_transport_free_queue - Frees NTB transport queue 1844 * @qp: NTB queue to be freed 1845 * 1846 * Frees NTB transport queue 1847 */ 1848 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1849 { 1850 struct pci_dev *pdev; 1851 struct ntb_queue_entry *entry; 1852 u64 qp_bit; 1853 1854 if (!qp) 1855 return; 1856 1857 pdev = qp->ndev->pdev; 1858 1859 qp->active = false; 1860 1861 if (qp->tx_dma_chan) { 1862 struct dma_chan *chan = qp->tx_dma_chan; 1863 /* Putting the dma_chan to NULL will force any new traffic to be 1864 * processed by the CPU instead of the DAM engine 1865 */ 1866 qp->tx_dma_chan = NULL; 1867 1868 /* Try to be nice and wait for any queued DMA engine 1869 * transactions to process before smashing it with a rock 1870 */ 1871 dma_sync_wait(chan, qp->last_cookie); 1872 dmaengine_terminate_all(chan); 1873 dma_release_channel(chan); 1874 } 1875 1876 if (qp->rx_dma_chan) { 1877 struct dma_chan *chan = qp->rx_dma_chan; 1878 /* Putting the dma_chan to NULL will force any new traffic to be 1879 * processed by the CPU instead of the DAM engine 1880 */ 1881 qp->rx_dma_chan = NULL; 1882 1883 /* Try to be nice and wait for any queued DMA engine 1884 * transactions to process before smashing it with a rock 1885 */ 1886 dma_sync_wait(chan, qp->last_cookie); 1887 dmaengine_terminate_all(chan); 1888 dma_release_channel(chan); 1889 } 1890 1891 qp_bit = BIT_ULL(qp->qp_num); 1892 1893 ntb_db_set_mask(qp->ndev, qp_bit); 1894 tasklet_kill(&qp->rxc_db_work); 1895 1896 cancel_delayed_work_sync(&qp->link_work); 1897 1898 qp->cb_data = NULL; 1899 qp->rx_handler = NULL; 1900 qp->tx_handler = NULL; 1901 qp->event_handler = NULL; 1902 1903 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1904 kfree(entry); 1905 1906 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { 1907 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); 1908 kfree(entry); 1909 } 1910 1911 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { 1912 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); 1913 kfree(entry); 1914 } 1915 1916 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1917 kfree(entry); 1918 1919 qp->transport->qp_bitmap_free |= qp_bit; 1920 1921 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1922 } 1923 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 1924 1925 /** 1926 * ntb_transport_rx_remove - Dequeues enqueued rx packet 1927 * @qp: NTB queue to be freed 1928 * @len: pointer to variable to write enqueued buffers length 1929 * 1930 * Dequeues unused buffers from receive queue. Should only be used during 1931 * shutdown of qp. 1932 * 1933 * RETURNS: NULL error value on error, or void* for success. 1934 */ 1935 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 1936 { 1937 struct ntb_queue_entry *entry; 1938 void *buf; 1939 1940 if (!qp || qp->client_ready) 1941 return NULL; 1942 1943 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); 1944 if (!entry) 1945 return NULL; 1946 1947 buf = entry->cb_data; 1948 *len = entry->len; 1949 1950 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); 1951 1952 return buf; 1953 } 1954 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 1955 1956 /** 1957 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 1958 * @qp: NTB transport layer queue the entry is to be enqueued on 1959 * @cb: per buffer pointer for callback function to use 1960 * @data: pointer to data buffer that incoming packets will be copied into 1961 * @len: length of the data buffer 1962 * 1963 * Enqueue a new receive buffer onto the transport queue into which a NTB 1964 * payload can be received into. 1965 * 1966 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1967 */ 1968 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1969 unsigned int len) 1970 { 1971 struct ntb_queue_entry *entry; 1972 1973 if (!qp) 1974 return -EINVAL; 1975 1976 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); 1977 if (!entry) 1978 return -ENOMEM; 1979 1980 entry->cb_data = cb; 1981 entry->buf = data; 1982 entry->len = len; 1983 entry->flags = 0; 1984 1985 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 1986 1987 if (qp->active) 1988 tasklet_schedule(&qp->rxc_db_work); 1989 1990 return 0; 1991 } 1992 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 1993 1994 /** 1995 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 1996 * @qp: NTB transport layer queue the entry is to be enqueued on 1997 * @cb: per buffer pointer for callback function to use 1998 * @data: pointer to data buffer that will be sent 1999 * @len: length of the data buffer 2000 * 2001 * Enqueue a new transmit buffer onto the transport queue from which a NTB 2002 * payload will be transmitted. This assumes that a lock is being held to 2003 * serialize access to the qp. 2004 * 2005 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2006 */ 2007 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 2008 unsigned int len) 2009 { 2010 struct ntb_queue_entry *entry; 2011 int rc; 2012 2013 if (!qp || !qp->link_is_up || !len) 2014 return -EINVAL; 2015 2016 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 2017 if (!entry) { 2018 qp->tx_err_no_buf++; 2019 return -EBUSY; 2020 } 2021 2022 entry->cb_data = cb; 2023 entry->buf = data; 2024 entry->len = len; 2025 entry->flags = 0; 2026 entry->errors = 0; 2027 entry->retries = 0; 2028 entry->tx_index = 0; 2029 2030 rc = ntb_process_tx(qp, entry); 2031 if (rc) 2032 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 2033 &qp->tx_free_q); 2034 2035 return rc; 2036 } 2037 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 2038 2039 /** 2040 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 2041 * @qp: NTB transport layer queue to be enabled 2042 * 2043 * Notify NTB transport layer of client readiness to use queue 2044 */ 2045 void ntb_transport_link_up(struct ntb_transport_qp *qp) 2046 { 2047 if (!qp) 2048 return; 2049 2050 qp->client_ready = true; 2051 2052 if (qp->transport->link_is_up) 2053 schedule_delayed_work(&qp->link_work, 0); 2054 } 2055 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 2056 2057 /** 2058 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 2059 * @qp: NTB transport layer queue to be disabled 2060 * 2061 * Notify NTB transport layer of client's desire to no longer receive data on 2062 * transport queue specified. It is the client's responsibility to ensure all 2063 * entries on queue are purged or otherwise handled appropriately. 2064 */ 2065 void ntb_transport_link_down(struct ntb_transport_qp *qp) 2066 { 2067 int val; 2068 2069 if (!qp) 2070 return; 2071 2072 qp->client_ready = false; 2073 2074 val = ntb_spad_read(qp->ndev, QP_LINKS); 2075 2076 ntb_peer_spad_write(qp->ndev, QP_LINKS, 2077 val & ~BIT(qp->qp_num)); 2078 2079 if (qp->link_is_up) 2080 ntb_send_link_down(qp); 2081 else 2082 cancel_delayed_work_sync(&qp->link_work); 2083 } 2084 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 2085 2086 /** 2087 * ntb_transport_link_query - Query transport link state 2088 * @qp: NTB transport layer queue to be queried 2089 * 2090 * Query connectivity to the remote system of the NTB transport queue 2091 * 2092 * RETURNS: true for link up or false for link down 2093 */ 2094 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 2095 { 2096 if (!qp) 2097 return false; 2098 2099 return qp->link_is_up; 2100 } 2101 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 2102 2103 /** 2104 * ntb_transport_qp_num - Query the qp number 2105 * @qp: NTB transport layer queue to be queried 2106 * 2107 * Query qp number of the NTB transport queue 2108 * 2109 * RETURNS: a zero based number specifying the qp number 2110 */ 2111 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 2112 { 2113 if (!qp) 2114 return 0; 2115 2116 return qp->qp_num; 2117 } 2118 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 2119 2120 /** 2121 * ntb_transport_max_size - Query the max payload size of a qp 2122 * @qp: NTB transport layer queue to be queried 2123 * 2124 * Query the maximum payload size permissible on the given qp 2125 * 2126 * RETURNS: the max payload size of a qp 2127 */ 2128 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 2129 { 2130 unsigned int max_size; 2131 unsigned int copy_align; 2132 struct dma_chan *rx_chan, *tx_chan; 2133 2134 if (!qp) 2135 return 0; 2136 2137 rx_chan = qp->rx_dma_chan; 2138 tx_chan = qp->tx_dma_chan; 2139 2140 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0, 2141 tx_chan ? tx_chan->device->copy_align : 0); 2142 2143 /* If DMA engine usage is possible, try to find the max size for that */ 2144 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); 2145 max_size = round_down(max_size, 1 << copy_align); 2146 2147 return max_size; 2148 } 2149 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 2150 2151 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) 2152 { 2153 unsigned int head = qp->tx_index; 2154 unsigned int tail = qp->remote_rx_info->entry; 2155 2156 return tail > head ? tail - head : qp->tx_max_entry + tail - head; 2157 } 2158 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry); 2159 2160 static void ntb_transport_doorbell_callback(void *data, int vector) 2161 { 2162 struct ntb_transport_ctx *nt = data; 2163 struct ntb_transport_qp *qp; 2164 u64 db_bits; 2165 unsigned int qp_num; 2166 2167 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 2168 ntb_db_vector_mask(nt->ndev, vector)); 2169 2170 while (db_bits) { 2171 qp_num = __ffs(db_bits); 2172 qp = &nt->qp_vec[qp_num]; 2173 2174 if (qp->active) 2175 tasklet_schedule(&qp->rxc_db_work); 2176 2177 db_bits &= ~BIT_ULL(qp_num); 2178 } 2179 } 2180 2181 static const struct ntb_ctx_ops ntb_transport_ops = { 2182 .link_event = ntb_transport_event_callback, 2183 .db_event = ntb_transport_doorbell_callback, 2184 }; 2185 2186 static struct ntb_client ntb_transport_client = { 2187 .ops = { 2188 .probe = ntb_transport_probe, 2189 .remove = ntb_transport_free, 2190 }, 2191 }; 2192 2193 static int __init ntb_transport_init(void) 2194 { 2195 int rc; 2196 2197 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER); 2198 2199 if (debugfs_initialized()) 2200 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 2201 2202 rc = bus_register(&ntb_transport_bus); 2203 if (rc) 2204 goto err_bus; 2205 2206 rc = ntb_register_client(&ntb_transport_client); 2207 if (rc) 2208 goto err_client; 2209 2210 return 0; 2211 2212 err_client: 2213 bus_unregister(&ntb_transport_bus); 2214 err_bus: 2215 debugfs_remove_recursive(nt_debugfs_dir); 2216 return rc; 2217 } 2218 module_init(ntb_transport_init); 2219 2220 static void __exit ntb_transport_exit(void) 2221 { 2222 debugfs_remove_recursive(nt_debugfs_dir); 2223 2224 ntb_unregister_client(&ntb_transport_client); 2225 bus_unregister(&ntb_transport_bus); 2226 } 2227 module_exit(ntb_transport_exit); 2228