1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Transport Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/debugfs.h> 51 #include <linux/delay.h> 52 #include <linux/dmaengine.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/errno.h> 55 #include <linux/export.h> 56 #include <linux/interrupt.h> 57 #include <linux/module.h> 58 #include <linux/pci.h> 59 #include <linux/slab.h> 60 #include <linux/types.h> 61 #include <linux/uaccess.h> 62 #include "linux/ntb.h" 63 #include "linux/ntb_transport.h" 64 65 #define NTB_TRANSPORT_VERSION 4 66 #define NTB_TRANSPORT_VER "4" 67 #define NTB_TRANSPORT_NAME "ntb_transport" 68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" 69 #define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2) 70 71 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); 72 MODULE_VERSION(NTB_TRANSPORT_VER); 73 MODULE_LICENSE("Dual BSD/GPL"); 74 MODULE_AUTHOR("Intel Corporation"); 75 76 static unsigned long max_mw_size; 77 module_param(max_mw_size, ulong, 0644); 78 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); 79 80 static unsigned int transport_mtu = 0x10000; 81 module_param(transport_mtu, uint, 0644); 82 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 83 84 static unsigned char max_num_clients; 85 module_param(max_num_clients, byte, 0644); 86 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 87 88 static unsigned int copy_bytes = 1024; 89 module_param(copy_bytes, uint, 0644); 90 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 91 92 static bool use_dma; 93 module_param(use_dma, bool, 0644); 94 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); 95 96 static bool use_msi; 97 #ifdef CONFIG_NTB_MSI 98 module_param(use_msi, bool, 0644); 99 MODULE_PARM_DESC(use_msi, "Use MSI interrupts instead of doorbells"); 100 #endif 101 102 static struct dentry *nt_debugfs_dir; 103 104 /* Only two-ports NTB devices are supported */ 105 #define PIDX NTB_DEF_PEER_IDX 106 107 struct ntb_queue_entry { 108 /* ntb_queue list reference */ 109 struct list_head entry; 110 /* pointers to data to be transferred */ 111 void *cb_data; 112 void *buf; 113 unsigned int len; 114 unsigned int flags; 115 int retries; 116 int errors; 117 unsigned int tx_index; 118 unsigned int rx_index; 119 120 struct ntb_transport_qp *qp; 121 union { 122 struct ntb_payload_header __iomem *tx_hdr; 123 struct ntb_payload_header *rx_hdr; 124 }; 125 }; 126 127 struct ntb_rx_info { 128 unsigned int entry; 129 }; 130 131 struct ntb_transport_qp { 132 struct ntb_transport_ctx *transport; 133 struct ntb_dev *ndev; 134 void *cb_data; 135 struct dma_chan *tx_dma_chan; 136 struct dma_chan *rx_dma_chan; 137 138 bool client_ready; 139 bool link_is_up; 140 bool active; 141 142 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 143 u64 qp_bit; 144 145 struct ntb_rx_info __iomem *rx_info; 146 struct ntb_rx_info *remote_rx_info; 147 148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 149 void *data, int len); 150 struct list_head tx_free_q; 151 spinlock_t ntb_tx_free_q_lock; 152 void __iomem *tx_mw; 153 phys_addr_t tx_mw_phys; 154 size_t tx_mw_size; 155 dma_addr_t tx_mw_dma_addr; 156 unsigned int tx_index; 157 unsigned int tx_max_entry; 158 unsigned int tx_max_frame; 159 160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 161 void *data, int len); 162 struct list_head rx_post_q; 163 struct list_head rx_pend_q; 164 struct list_head rx_free_q; 165 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 166 spinlock_t ntb_rx_q_lock; 167 void *rx_buff; 168 unsigned int rx_index; 169 unsigned int rx_max_entry; 170 unsigned int rx_max_frame; 171 unsigned int rx_alloc_entry; 172 dma_cookie_t last_cookie; 173 struct tasklet_struct rxc_db_work; 174 175 void (*event_handler)(void *data, int status); 176 struct delayed_work link_work; 177 struct work_struct link_cleanup; 178 179 struct dentry *debugfs_dir; 180 struct dentry *debugfs_stats; 181 182 /* Stats */ 183 u64 rx_bytes; 184 u64 rx_pkts; 185 u64 rx_ring_empty; 186 u64 rx_err_no_buf; 187 u64 rx_err_oflow; 188 u64 rx_err_ver; 189 u64 rx_memcpy; 190 u64 rx_async; 191 u64 tx_bytes; 192 u64 tx_pkts; 193 u64 tx_ring_full; 194 u64 tx_err_no_buf; 195 u64 tx_memcpy; 196 u64 tx_async; 197 198 bool use_msi; 199 int msi_irq; 200 struct ntb_msi_desc msi_desc; 201 struct ntb_msi_desc peer_msi_desc; 202 }; 203 204 struct ntb_transport_mw { 205 phys_addr_t phys_addr; 206 resource_size_t phys_size; 207 void __iomem *vbase; 208 size_t xlat_size; 209 size_t buff_size; 210 size_t alloc_size; 211 void *alloc_addr; 212 void *virt_addr; 213 dma_addr_t dma_addr; 214 }; 215 216 struct ntb_transport_client_dev { 217 struct list_head entry; 218 struct ntb_transport_ctx *nt; 219 struct device dev; 220 }; 221 222 struct ntb_transport_ctx { 223 struct list_head entry; 224 struct list_head client_devs; 225 226 struct ntb_dev *ndev; 227 228 struct ntb_transport_mw *mw_vec; 229 struct ntb_transport_qp *qp_vec; 230 unsigned int mw_count; 231 unsigned int qp_count; 232 u64 qp_bitmap; 233 u64 qp_bitmap_free; 234 235 bool use_msi; 236 unsigned int msi_spad_offset; 237 u64 msi_db_mask; 238 239 bool link_is_up; 240 struct delayed_work link_work; 241 struct work_struct link_cleanup; 242 243 struct dentry *debugfs_node_dir; 244 }; 245 246 enum { 247 DESC_DONE_FLAG = BIT(0), 248 LINK_DOWN_FLAG = BIT(1), 249 }; 250 251 struct ntb_payload_header { 252 unsigned int ver; 253 unsigned int len; 254 unsigned int flags; 255 }; 256 257 enum { 258 VERSION = 0, 259 QP_LINKS, 260 NUM_QPS, 261 NUM_MWS, 262 MW0_SZ_HIGH, 263 MW0_SZ_LOW, 264 }; 265 266 #define dev_client_dev(__dev) \ 267 container_of((__dev), struct ntb_transport_client_dev, dev) 268 269 #define drv_client(__drv) \ 270 container_of((__drv), struct ntb_transport_client, driver) 271 272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 273 #define NTB_QP_DEF_NUM_ENTRIES 100 274 #define NTB_LINK_DOWN_TIMEOUT 10 275 276 static void ntb_transport_rxc_db(unsigned long data); 277 static const struct ntb_ctx_ops ntb_transport_ops; 278 static struct ntb_client ntb_transport_client; 279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 280 struct ntb_queue_entry *entry); 281 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); 282 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset); 283 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset); 284 285 286 static int ntb_transport_bus_match(struct device *dev, 287 struct device_driver *drv) 288 { 289 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 290 } 291 292 static int ntb_transport_bus_probe(struct device *dev) 293 { 294 const struct ntb_transport_client *client; 295 int rc; 296 297 get_device(dev); 298 299 client = drv_client(dev->driver); 300 rc = client->probe(dev); 301 if (rc) 302 put_device(dev); 303 304 return rc; 305 } 306 307 static int ntb_transport_bus_remove(struct device *dev) 308 { 309 const struct ntb_transport_client *client; 310 311 client = drv_client(dev->driver); 312 client->remove(dev); 313 314 put_device(dev); 315 316 return 0; 317 } 318 319 static struct bus_type ntb_transport_bus = { 320 .name = "ntb_transport", 321 .match = ntb_transport_bus_match, 322 .probe = ntb_transport_bus_probe, 323 .remove = ntb_transport_bus_remove, 324 }; 325 326 static LIST_HEAD(ntb_transport_list); 327 328 static int ntb_bus_init(struct ntb_transport_ctx *nt) 329 { 330 list_add_tail(&nt->entry, &ntb_transport_list); 331 return 0; 332 } 333 334 static void ntb_bus_remove(struct ntb_transport_ctx *nt) 335 { 336 struct ntb_transport_client_dev *client_dev, *cd; 337 338 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 339 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 340 dev_name(&client_dev->dev)); 341 list_del(&client_dev->entry); 342 device_unregister(&client_dev->dev); 343 } 344 345 list_del(&nt->entry); 346 } 347 348 static void ntb_transport_client_release(struct device *dev) 349 { 350 struct ntb_transport_client_dev *client_dev; 351 352 client_dev = dev_client_dev(dev); 353 kfree(client_dev); 354 } 355 356 /** 357 * ntb_transport_unregister_client_dev - Unregister NTB client device 358 * @device_name: Name of NTB client device 359 * 360 * Unregister an NTB client device with the NTB transport layer 361 */ 362 void ntb_transport_unregister_client_dev(char *device_name) 363 { 364 struct ntb_transport_client_dev *client, *cd; 365 struct ntb_transport_ctx *nt; 366 367 list_for_each_entry(nt, &ntb_transport_list, entry) 368 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 369 if (!strncmp(dev_name(&client->dev), device_name, 370 strlen(device_name))) { 371 list_del(&client->entry); 372 device_unregister(&client->dev); 373 } 374 } 375 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); 376 377 /** 378 * ntb_transport_register_client_dev - Register NTB client device 379 * @device_name: Name of NTB client device 380 * 381 * Register an NTB client device with the NTB transport layer 382 */ 383 int ntb_transport_register_client_dev(char *device_name) 384 { 385 struct ntb_transport_client_dev *client_dev; 386 struct ntb_transport_ctx *nt; 387 int node; 388 int rc, i = 0; 389 390 if (list_empty(&ntb_transport_list)) 391 return -ENODEV; 392 393 list_for_each_entry(nt, &ntb_transport_list, entry) { 394 struct device *dev; 395 396 node = dev_to_node(&nt->ndev->dev); 397 398 client_dev = kzalloc_node(sizeof(*client_dev), 399 GFP_KERNEL, node); 400 if (!client_dev) { 401 rc = -ENOMEM; 402 goto err; 403 } 404 405 dev = &client_dev->dev; 406 407 /* setup and register client devices */ 408 dev_set_name(dev, "%s%d", device_name, i); 409 dev->bus = &ntb_transport_bus; 410 dev->release = ntb_transport_client_release; 411 dev->parent = &nt->ndev->dev; 412 413 rc = device_register(dev); 414 if (rc) { 415 kfree(client_dev); 416 goto err; 417 } 418 419 list_add_tail(&client_dev->entry, &nt->client_devs); 420 i++; 421 } 422 423 return 0; 424 425 err: 426 ntb_transport_unregister_client_dev(device_name); 427 428 return rc; 429 } 430 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); 431 432 /** 433 * ntb_transport_register_client - Register NTB client driver 434 * @drv: NTB client driver to be registered 435 * 436 * Register an NTB client driver with the NTB transport layer 437 * 438 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 439 */ 440 int ntb_transport_register_client(struct ntb_transport_client *drv) 441 { 442 drv->driver.bus = &ntb_transport_bus; 443 444 if (list_empty(&ntb_transport_list)) 445 return -ENODEV; 446 447 return driver_register(&drv->driver); 448 } 449 EXPORT_SYMBOL_GPL(ntb_transport_register_client); 450 451 /** 452 * ntb_transport_unregister_client - Unregister NTB client driver 453 * @drv: NTB client driver to be unregistered 454 * 455 * Unregister an NTB client driver with the NTB transport layer 456 * 457 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 458 */ 459 void ntb_transport_unregister_client(struct ntb_transport_client *drv) 460 { 461 driver_unregister(&drv->driver); 462 } 463 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); 464 465 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 466 loff_t *offp) 467 { 468 struct ntb_transport_qp *qp; 469 char *buf; 470 ssize_t ret, out_offset, out_count; 471 472 qp = filp->private_data; 473 474 if (!qp || !qp->link_is_up) 475 return 0; 476 477 out_count = 1000; 478 479 buf = kmalloc(out_count, GFP_KERNEL); 480 if (!buf) 481 return -ENOMEM; 482 483 out_offset = 0; 484 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 485 "\nNTB QP stats:\n\n"); 486 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 487 "rx_bytes - \t%llu\n", qp->rx_bytes); 488 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 489 "rx_pkts - \t%llu\n", qp->rx_pkts); 490 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 491 "rx_memcpy - \t%llu\n", qp->rx_memcpy); 492 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 493 "rx_async - \t%llu\n", qp->rx_async); 494 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 495 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 496 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 497 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 498 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 499 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 500 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 501 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 502 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 503 "rx_buff - \t0x%p\n", qp->rx_buff); 504 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 505 "rx_index - \t%u\n", qp->rx_index); 506 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 507 "rx_max_entry - \t%u\n", qp->rx_max_entry); 508 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 509 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry); 510 511 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 512 "tx_bytes - \t%llu\n", qp->tx_bytes); 513 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 514 "tx_pkts - \t%llu\n", qp->tx_pkts); 515 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 516 "tx_memcpy - \t%llu\n", qp->tx_memcpy); 517 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 518 "tx_async - \t%llu\n", qp->tx_async); 519 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 520 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 521 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 522 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 523 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 524 "tx_mw - \t0x%p\n", qp->tx_mw); 525 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 526 "tx_index (H) - \t%u\n", qp->tx_index); 527 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 528 "RRI (T) - \t%u\n", 529 qp->remote_rx_info->entry); 530 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 531 "tx_max_entry - \t%u\n", qp->tx_max_entry); 532 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 533 "free tx - \t%u\n", 534 ntb_transport_tx_free_entry(qp)); 535 536 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 537 "\n"); 538 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 539 "Using TX DMA - \t%s\n", 540 qp->tx_dma_chan ? "Yes" : "No"); 541 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 542 "Using RX DMA - \t%s\n", 543 qp->rx_dma_chan ? "Yes" : "No"); 544 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 545 "QP Link - \t%s\n", 546 qp->link_is_up ? "Up" : "Down"); 547 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 548 "\n"); 549 550 if (out_offset > out_count) 551 out_offset = out_count; 552 553 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 554 kfree(buf); 555 return ret; 556 } 557 558 static const struct file_operations ntb_qp_debugfs_stats = { 559 .owner = THIS_MODULE, 560 .open = simple_open, 561 .read = debugfs_read, 562 }; 563 564 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 565 struct list_head *list) 566 { 567 unsigned long flags; 568 569 spin_lock_irqsave(lock, flags); 570 list_add_tail(entry, list); 571 spin_unlock_irqrestore(lock, flags); 572 } 573 574 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 575 struct list_head *list) 576 { 577 struct ntb_queue_entry *entry; 578 unsigned long flags; 579 580 spin_lock_irqsave(lock, flags); 581 if (list_empty(list)) { 582 entry = NULL; 583 goto out; 584 } 585 entry = list_first_entry(list, struct ntb_queue_entry, entry); 586 list_del(&entry->entry); 587 588 out: 589 spin_unlock_irqrestore(lock, flags); 590 591 return entry; 592 } 593 594 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, 595 struct list_head *list, 596 struct list_head *to_list) 597 { 598 struct ntb_queue_entry *entry; 599 unsigned long flags; 600 601 spin_lock_irqsave(lock, flags); 602 603 if (list_empty(list)) { 604 entry = NULL; 605 } else { 606 entry = list_first_entry(list, struct ntb_queue_entry, entry); 607 list_move_tail(&entry->entry, to_list); 608 } 609 610 spin_unlock_irqrestore(lock, flags); 611 612 return entry; 613 } 614 615 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 616 unsigned int qp_num) 617 { 618 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 619 struct ntb_transport_mw *mw; 620 struct ntb_dev *ndev = nt->ndev; 621 struct ntb_queue_entry *entry; 622 unsigned int rx_size, num_qps_mw; 623 unsigned int mw_num, mw_count, qp_count; 624 unsigned int i; 625 int node; 626 627 mw_count = nt->mw_count; 628 qp_count = nt->qp_count; 629 630 mw_num = QP_TO_MW(nt, qp_num); 631 mw = &nt->mw_vec[mw_num]; 632 633 if (!mw->virt_addr) 634 return -ENOMEM; 635 636 if (mw_num < qp_count % mw_count) 637 num_qps_mw = qp_count / mw_count + 1; 638 else 639 num_qps_mw = qp_count / mw_count; 640 641 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; 642 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); 643 rx_size -= sizeof(struct ntb_rx_info); 644 645 qp->remote_rx_info = qp->rx_buff + rx_size; 646 647 /* Due to housekeeping, there must be atleast 2 buffs */ 648 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 649 qp->rx_max_entry = rx_size / qp->rx_max_frame; 650 qp->rx_index = 0; 651 652 /* 653 * Checking to see if we have more entries than the default. 654 * We should add additional entries if that is the case so we 655 * can be in sync with the transport frames. 656 */ 657 node = dev_to_node(&ndev->dev); 658 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) { 659 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); 660 if (!entry) 661 return -ENOMEM; 662 663 entry->qp = qp; 664 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 665 &qp->rx_free_q); 666 qp->rx_alloc_entry++; 667 } 668 669 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 670 671 /* setup the hdr offsets with 0's */ 672 for (i = 0; i < qp->rx_max_entry; i++) { 673 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - 674 sizeof(struct ntb_payload_header)); 675 memset(offset, 0, sizeof(struct ntb_payload_header)); 676 } 677 678 qp->rx_pkts = 0; 679 qp->tx_pkts = 0; 680 qp->tx_index = 0; 681 682 return 0; 683 } 684 685 static irqreturn_t ntb_transport_isr(int irq, void *dev) 686 { 687 struct ntb_transport_qp *qp = dev; 688 689 tasklet_schedule(&qp->rxc_db_work); 690 691 return IRQ_HANDLED; 692 } 693 694 static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt, 695 unsigned int qp_num) 696 { 697 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 698 int spad = qp_num * 2 + nt->msi_spad_offset; 699 700 if (!nt->use_msi) 701 return; 702 703 if (spad >= ntb_spad_count(nt->ndev)) 704 return; 705 706 qp->peer_msi_desc.addr_offset = 707 ntb_peer_spad_read(qp->ndev, PIDX, spad); 708 qp->peer_msi_desc.data = 709 ntb_peer_spad_read(qp->ndev, PIDX, spad + 1); 710 711 dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n", 712 qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data); 713 714 if (qp->peer_msi_desc.addr_offset) { 715 qp->use_msi = true; 716 dev_info(&qp->ndev->pdev->dev, 717 "Using MSI interrupts for QP%d\n", qp_num); 718 } 719 } 720 721 static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt, 722 unsigned int qp_num) 723 { 724 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 725 int spad = qp_num * 2 + nt->msi_spad_offset; 726 int rc; 727 728 if (!nt->use_msi) 729 return; 730 731 if (spad >= ntb_spad_count(nt->ndev)) { 732 dev_warn_once(&qp->ndev->pdev->dev, 733 "Not enough SPADS to use MSI interrupts\n"); 734 return; 735 } 736 737 ntb_spad_write(qp->ndev, spad, 0); 738 ntb_spad_write(qp->ndev, spad + 1, 0); 739 740 if (!qp->msi_irq) { 741 qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr, 742 KBUILD_MODNAME, qp, 743 &qp->msi_desc); 744 if (qp->msi_irq < 0) { 745 dev_warn(&qp->ndev->pdev->dev, 746 "Unable to allocate MSI interrupt for qp%d\n", 747 qp_num); 748 return; 749 } 750 } 751 752 rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset); 753 if (rc) 754 goto err_free_interrupt; 755 756 rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data); 757 if (rc) 758 goto err_free_interrupt; 759 760 dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n", 761 qp_num, qp->msi_irq, qp->msi_desc.addr_offset, 762 qp->msi_desc.data); 763 764 return; 765 766 err_free_interrupt: 767 devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp); 768 } 769 770 static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt) 771 { 772 int i; 773 774 dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed"); 775 776 for (i = 0; i < nt->qp_count; i++) 777 ntb_transport_setup_qp_peer_msi(nt, i); 778 } 779 780 static void ntb_transport_msi_desc_changed(void *data) 781 { 782 struct ntb_transport_ctx *nt = data; 783 int i; 784 785 dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed"); 786 787 for (i = 0; i < nt->qp_count; i++) 788 ntb_transport_setup_qp_msi(nt, i); 789 790 ntb_peer_db_set(nt->ndev, nt->msi_db_mask); 791 } 792 793 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 794 { 795 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 796 struct pci_dev *pdev = nt->ndev->pdev; 797 798 if (!mw->virt_addr) 799 return; 800 801 ntb_mw_clear_trans(nt->ndev, PIDX, num_mw); 802 dma_free_coherent(&pdev->dev, mw->alloc_size, 803 mw->alloc_addr, mw->dma_addr); 804 mw->xlat_size = 0; 805 mw->buff_size = 0; 806 mw->alloc_size = 0; 807 mw->alloc_addr = NULL; 808 mw->virt_addr = NULL; 809 } 810 811 static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, 812 struct device *dma_dev, size_t align) 813 { 814 dma_addr_t dma_addr; 815 void *alloc_addr, *virt_addr; 816 int rc; 817 818 alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size, 819 &dma_addr, GFP_KERNEL); 820 if (!alloc_addr) { 821 dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n", 822 mw->alloc_size); 823 return -ENOMEM; 824 } 825 virt_addr = alloc_addr; 826 827 /* 828 * we must ensure that the memory address allocated is BAR size 829 * aligned in order for the XLAT register to take the value. This 830 * is a requirement of the hardware. It is recommended to setup CMA 831 * for BAR sizes equal or greater than 4MB. 832 */ 833 if (!IS_ALIGNED(dma_addr, align)) { 834 if (mw->alloc_size > mw->buff_size) { 835 virt_addr = PTR_ALIGN(alloc_addr, align); 836 dma_addr = ALIGN(dma_addr, align); 837 } else { 838 rc = -ENOMEM; 839 goto err; 840 } 841 } 842 843 mw->alloc_addr = alloc_addr; 844 mw->virt_addr = virt_addr; 845 mw->dma_addr = dma_addr; 846 847 return 0; 848 849 err: 850 dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr); 851 852 return rc; 853 } 854 855 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 856 resource_size_t size) 857 { 858 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 859 struct pci_dev *pdev = nt->ndev->pdev; 860 size_t xlat_size, buff_size; 861 resource_size_t xlat_align; 862 resource_size_t xlat_align_size; 863 int rc; 864 865 if (!size) 866 return -EINVAL; 867 868 rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align, 869 &xlat_align_size, NULL); 870 if (rc) 871 return rc; 872 873 xlat_size = round_up(size, xlat_align_size); 874 buff_size = round_up(size, xlat_align); 875 876 /* No need to re-setup */ 877 if (mw->xlat_size == xlat_size) 878 return 0; 879 880 if (mw->buff_size) 881 ntb_free_mw(nt, num_mw); 882 883 /* Alloc memory for receiving data. Must be aligned */ 884 mw->xlat_size = xlat_size; 885 mw->buff_size = buff_size; 886 mw->alloc_size = buff_size; 887 888 rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); 889 if (rc) { 890 mw->alloc_size *= 2; 891 rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); 892 if (rc) { 893 dev_err(&pdev->dev, 894 "Unable to alloc aligned MW buff\n"); 895 mw->xlat_size = 0; 896 mw->buff_size = 0; 897 mw->alloc_size = 0; 898 return rc; 899 } 900 } 901 902 /* Notify HW the memory location of the receive buffer */ 903 rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr, 904 mw->xlat_size); 905 if (rc) { 906 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); 907 ntb_free_mw(nt, num_mw); 908 return -EIO; 909 } 910 911 return 0; 912 } 913 914 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 915 { 916 qp->link_is_up = false; 917 qp->active = false; 918 919 qp->tx_index = 0; 920 qp->rx_index = 0; 921 qp->rx_bytes = 0; 922 qp->rx_pkts = 0; 923 qp->rx_ring_empty = 0; 924 qp->rx_err_no_buf = 0; 925 qp->rx_err_oflow = 0; 926 qp->rx_err_ver = 0; 927 qp->rx_memcpy = 0; 928 qp->rx_async = 0; 929 qp->tx_bytes = 0; 930 qp->tx_pkts = 0; 931 qp->tx_ring_full = 0; 932 qp->tx_err_no_buf = 0; 933 qp->tx_memcpy = 0; 934 qp->tx_async = 0; 935 } 936 937 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 938 { 939 struct ntb_transport_ctx *nt = qp->transport; 940 struct pci_dev *pdev = nt->ndev->pdev; 941 942 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); 943 944 cancel_delayed_work_sync(&qp->link_work); 945 ntb_qp_link_down_reset(qp); 946 947 if (qp->event_handler) 948 qp->event_handler(qp->cb_data, qp->link_is_up); 949 } 950 951 static void ntb_qp_link_cleanup_work(struct work_struct *work) 952 { 953 struct ntb_transport_qp *qp = container_of(work, 954 struct ntb_transport_qp, 955 link_cleanup); 956 struct ntb_transport_ctx *nt = qp->transport; 957 958 ntb_qp_link_cleanup(qp); 959 960 if (nt->link_is_up) 961 schedule_delayed_work(&qp->link_work, 962 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 963 } 964 965 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 966 { 967 schedule_work(&qp->link_cleanup); 968 } 969 970 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 971 { 972 struct ntb_transport_qp *qp; 973 u64 qp_bitmap_alloc; 974 unsigned int i, count; 975 976 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 977 978 /* Pass along the info to any clients */ 979 for (i = 0; i < nt->qp_count; i++) 980 if (qp_bitmap_alloc & BIT_ULL(i)) { 981 qp = &nt->qp_vec[i]; 982 ntb_qp_link_cleanup(qp); 983 cancel_work_sync(&qp->link_cleanup); 984 cancel_delayed_work_sync(&qp->link_work); 985 } 986 987 if (!nt->link_is_up) 988 cancel_delayed_work_sync(&nt->link_work); 989 990 for (i = 0; i < nt->mw_count; i++) 991 ntb_free_mw(nt, i); 992 993 /* The scratchpad registers keep the values if the remote side 994 * goes down, blast them now to give them a sane value the next 995 * time they are accessed 996 */ 997 count = ntb_spad_count(nt->ndev); 998 for (i = 0; i < count; i++) 999 ntb_spad_write(nt->ndev, i, 0); 1000 } 1001 1002 static void ntb_transport_link_cleanup_work(struct work_struct *work) 1003 { 1004 struct ntb_transport_ctx *nt = 1005 container_of(work, struct ntb_transport_ctx, link_cleanup); 1006 1007 ntb_transport_link_cleanup(nt); 1008 } 1009 1010 static void ntb_transport_event_callback(void *data) 1011 { 1012 struct ntb_transport_ctx *nt = data; 1013 1014 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) 1015 schedule_delayed_work(&nt->link_work, 0); 1016 else 1017 schedule_work(&nt->link_cleanup); 1018 } 1019 1020 static void ntb_transport_link_work(struct work_struct *work) 1021 { 1022 struct ntb_transport_ctx *nt = 1023 container_of(work, struct ntb_transport_ctx, link_work.work); 1024 struct ntb_dev *ndev = nt->ndev; 1025 struct pci_dev *pdev = ndev->pdev; 1026 resource_size_t size; 1027 u32 val; 1028 int rc = 0, i, spad; 1029 1030 /* send the local info, in the opposite order of the way we read it */ 1031 1032 if (nt->use_msi) { 1033 rc = ntb_msi_setup_mws(ndev); 1034 if (rc) { 1035 dev_warn(&pdev->dev, 1036 "Failed to register MSI memory window: %d\n", 1037 rc); 1038 nt->use_msi = false; 1039 } 1040 } 1041 1042 for (i = 0; i < nt->qp_count; i++) 1043 ntb_transport_setup_qp_msi(nt, i); 1044 1045 for (i = 0; i < nt->mw_count; i++) { 1046 size = nt->mw_vec[i].phys_size; 1047 1048 if (max_mw_size && size > max_mw_size) 1049 size = max_mw_size; 1050 1051 spad = MW0_SZ_HIGH + (i * 2); 1052 ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size)); 1053 1054 spad = MW0_SZ_LOW + (i * 2); 1055 ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size)); 1056 } 1057 1058 ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count); 1059 1060 ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count); 1061 1062 ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION); 1063 1064 /* Query the remote side for its info */ 1065 val = ntb_spad_read(ndev, VERSION); 1066 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 1067 if (val != NTB_TRANSPORT_VERSION) 1068 goto out; 1069 1070 val = ntb_spad_read(ndev, NUM_QPS); 1071 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 1072 if (val != nt->qp_count) 1073 goto out; 1074 1075 val = ntb_spad_read(ndev, NUM_MWS); 1076 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 1077 if (val != nt->mw_count) 1078 goto out; 1079 1080 for (i = 0; i < nt->mw_count; i++) { 1081 u64 val64; 1082 1083 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); 1084 val64 = (u64)val << 32; 1085 1086 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); 1087 val64 |= val; 1088 1089 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); 1090 1091 rc = ntb_set_mw(nt, i, val64); 1092 if (rc) 1093 goto out1; 1094 } 1095 1096 nt->link_is_up = true; 1097 1098 for (i = 0; i < nt->qp_count; i++) { 1099 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 1100 1101 ntb_transport_setup_qp_mw(nt, i); 1102 ntb_transport_setup_qp_peer_msi(nt, i); 1103 1104 if (qp->client_ready) 1105 schedule_delayed_work(&qp->link_work, 0); 1106 } 1107 1108 return; 1109 1110 out1: 1111 for (i = 0; i < nt->mw_count; i++) 1112 ntb_free_mw(nt, i); 1113 1114 /* if there's an actual failure, we should just bail */ 1115 if (rc < 0) 1116 return; 1117 1118 out: 1119 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 1120 schedule_delayed_work(&nt->link_work, 1121 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 1122 } 1123 1124 static void ntb_qp_link_work(struct work_struct *work) 1125 { 1126 struct ntb_transport_qp *qp = container_of(work, 1127 struct ntb_transport_qp, 1128 link_work.work); 1129 struct pci_dev *pdev = qp->ndev->pdev; 1130 struct ntb_transport_ctx *nt = qp->transport; 1131 int val; 1132 1133 WARN_ON(!nt->link_is_up); 1134 1135 val = ntb_spad_read(nt->ndev, QP_LINKS); 1136 1137 ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num)); 1138 1139 /* query remote spad for qp ready bits */ 1140 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); 1141 1142 /* See if the remote side is up */ 1143 if (val & BIT(qp->qp_num)) { 1144 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 1145 qp->link_is_up = true; 1146 qp->active = true; 1147 1148 if (qp->event_handler) 1149 qp->event_handler(qp->cb_data, qp->link_is_up); 1150 1151 if (qp->active) 1152 tasklet_schedule(&qp->rxc_db_work); 1153 } else if (nt->link_is_up) 1154 schedule_delayed_work(&qp->link_work, 1155 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 1156 } 1157 1158 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, 1159 unsigned int qp_num) 1160 { 1161 struct ntb_transport_qp *qp; 1162 phys_addr_t mw_base; 1163 resource_size_t mw_size; 1164 unsigned int num_qps_mw, tx_size; 1165 unsigned int mw_num, mw_count, qp_count; 1166 u64 qp_offset; 1167 1168 mw_count = nt->mw_count; 1169 qp_count = nt->qp_count; 1170 1171 mw_num = QP_TO_MW(nt, qp_num); 1172 1173 qp = &nt->qp_vec[qp_num]; 1174 qp->qp_num = qp_num; 1175 qp->transport = nt; 1176 qp->ndev = nt->ndev; 1177 qp->client_ready = false; 1178 qp->event_handler = NULL; 1179 ntb_qp_link_down_reset(qp); 1180 1181 if (mw_num < qp_count % mw_count) 1182 num_qps_mw = qp_count / mw_count + 1; 1183 else 1184 num_qps_mw = qp_count / mw_count; 1185 1186 mw_base = nt->mw_vec[mw_num].phys_addr; 1187 mw_size = nt->mw_vec[mw_num].phys_size; 1188 1189 if (max_mw_size && mw_size > max_mw_size) 1190 mw_size = max_mw_size; 1191 1192 tx_size = (unsigned int)mw_size / num_qps_mw; 1193 qp_offset = tx_size * (qp_num / mw_count); 1194 1195 qp->tx_mw_size = tx_size; 1196 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 1197 if (!qp->tx_mw) 1198 return -EINVAL; 1199 1200 qp->tx_mw_phys = mw_base + qp_offset; 1201 if (!qp->tx_mw_phys) 1202 return -EINVAL; 1203 1204 tx_size -= sizeof(struct ntb_rx_info); 1205 qp->rx_info = qp->tx_mw + tx_size; 1206 1207 /* Due to housekeeping, there must be atleast 2 buffs */ 1208 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 1209 qp->tx_max_entry = tx_size / qp->tx_max_frame; 1210 1211 if (nt->debugfs_node_dir) { 1212 char debugfs_name[4]; 1213 1214 snprintf(debugfs_name, 4, "qp%d", qp_num); 1215 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 1216 nt->debugfs_node_dir); 1217 1218 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 1219 qp->debugfs_dir, qp, 1220 &ntb_qp_debugfs_stats); 1221 } else { 1222 qp->debugfs_dir = NULL; 1223 qp->debugfs_stats = NULL; 1224 } 1225 1226 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 1227 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 1228 1229 spin_lock_init(&qp->ntb_rx_q_lock); 1230 spin_lock_init(&qp->ntb_tx_free_q_lock); 1231 1232 INIT_LIST_HEAD(&qp->rx_post_q); 1233 INIT_LIST_HEAD(&qp->rx_pend_q); 1234 INIT_LIST_HEAD(&qp->rx_free_q); 1235 INIT_LIST_HEAD(&qp->tx_free_q); 1236 1237 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, 1238 (unsigned long)qp); 1239 1240 return 0; 1241 } 1242 1243 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) 1244 { 1245 struct ntb_transport_ctx *nt; 1246 struct ntb_transport_mw *mw; 1247 unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads; 1248 u64 qp_bitmap; 1249 int node; 1250 int rc, i; 1251 1252 mw_count = ntb_peer_mw_count(ndev); 1253 1254 if (!ndev->ops->mw_set_trans) { 1255 dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); 1256 return -EINVAL; 1257 } 1258 1259 if (ntb_db_is_unsafe(ndev)) 1260 dev_dbg(&ndev->dev, 1261 "doorbell is unsafe, proceed anyway...\n"); 1262 if (ntb_spad_is_unsafe(ndev)) 1263 dev_dbg(&ndev->dev, 1264 "scratchpad is unsafe, proceed anyway...\n"); 1265 1266 if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT) 1267 dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n"); 1268 1269 node = dev_to_node(&ndev->dev); 1270 1271 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); 1272 if (!nt) 1273 return -ENOMEM; 1274 1275 nt->ndev = ndev; 1276 1277 /* 1278 * If we are using MSI, and have at least one extra memory window, 1279 * we will reserve the last MW for the MSI window. 1280 */ 1281 if (use_msi && mw_count > 1) { 1282 rc = ntb_msi_init(ndev, ntb_transport_msi_desc_changed); 1283 if (!rc) { 1284 mw_count -= 1; 1285 nt->use_msi = true; 1286 } 1287 } 1288 1289 spad_count = ntb_spad_count(ndev); 1290 1291 /* Limit the MW's based on the availability of scratchpads */ 1292 1293 if (spad_count < NTB_TRANSPORT_MIN_SPADS) { 1294 nt->mw_count = 0; 1295 rc = -EINVAL; 1296 goto err; 1297 } 1298 1299 max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2; 1300 nt->mw_count = min(mw_count, max_mw_count_for_spads); 1301 1302 nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH; 1303 1304 nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec), 1305 GFP_KERNEL, node); 1306 if (!nt->mw_vec) { 1307 rc = -ENOMEM; 1308 goto err; 1309 } 1310 1311 for (i = 0; i < mw_count; i++) { 1312 mw = &nt->mw_vec[i]; 1313 1314 rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, 1315 &mw->phys_size); 1316 if (rc) 1317 goto err1; 1318 1319 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); 1320 if (!mw->vbase) { 1321 rc = -ENOMEM; 1322 goto err1; 1323 } 1324 1325 mw->buff_size = 0; 1326 mw->xlat_size = 0; 1327 mw->virt_addr = NULL; 1328 mw->dma_addr = 0; 1329 } 1330 1331 qp_bitmap = ntb_db_valid_mask(ndev); 1332 1333 qp_count = ilog2(qp_bitmap); 1334 if (nt->use_msi) { 1335 qp_count -= 1; 1336 nt->msi_db_mask = 1 << qp_count; 1337 ntb_db_clear_mask(ndev, nt->msi_db_mask); 1338 } 1339 1340 if (max_num_clients && max_num_clients < qp_count) 1341 qp_count = max_num_clients; 1342 else if (nt->mw_count < qp_count) 1343 qp_count = nt->mw_count; 1344 1345 qp_bitmap &= BIT_ULL(qp_count) - 1; 1346 1347 nt->qp_count = qp_count; 1348 nt->qp_bitmap = qp_bitmap; 1349 nt->qp_bitmap_free = qp_bitmap; 1350 1351 nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec), 1352 GFP_KERNEL, node); 1353 if (!nt->qp_vec) { 1354 rc = -ENOMEM; 1355 goto err1; 1356 } 1357 1358 if (nt_debugfs_dir) { 1359 nt->debugfs_node_dir = 1360 debugfs_create_dir(pci_name(ndev->pdev), 1361 nt_debugfs_dir); 1362 } 1363 1364 for (i = 0; i < qp_count; i++) { 1365 rc = ntb_transport_init_queue(nt, i); 1366 if (rc) 1367 goto err2; 1368 } 1369 1370 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 1371 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); 1372 1373 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); 1374 if (rc) 1375 goto err2; 1376 1377 INIT_LIST_HEAD(&nt->client_devs); 1378 rc = ntb_bus_init(nt); 1379 if (rc) 1380 goto err3; 1381 1382 nt->link_is_up = false; 1383 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1384 ntb_link_event(ndev); 1385 1386 return 0; 1387 1388 err3: 1389 ntb_clear_ctx(ndev); 1390 err2: 1391 kfree(nt->qp_vec); 1392 err1: 1393 while (i--) { 1394 mw = &nt->mw_vec[i]; 1395 iounmap(mw->vbase); 1396 } 1397 kfree(nt->mw_vec); 1398 err: 1399 kfree(nt); 1400 return rc; 1401 } 1402 1403 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) 1404 { 1405 struct ntb_transport_ctx *nt = ndev->ctx; 1406 struct ntb_transport_qp *qp; 1407 u64 qp_bitmap_alloc; 1408 int i; 1409 1410 ntb_transport_link_cleanup(nt); 1411 cancel_work_sync(&nt->link_cleanup); 1412 cancel_delayed_work_sync(&nt->link_work); 1413 1414 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 1415 1416 /* verify that all the qp's are freed */ 1417 for (i = 0; i < nt->qp_count; i++) { 1418 qp = &nt->qp_vec[i]; 1419 if (qp_bitmap_alloc & BIT_ULL(i)) 1420 ntb_transport_free_queue(qp); 1421 debugfs_remove_recursive(qp->debugfs_dir); 1422 } 1423 1424 ntb_link_disable(ndev); 1425 ntb_clear_ctx(ndev); 1426 1427 ntb_bus_remove(nt); 1428 1429 for (i = nt->mw_count; i--; ) { 1430 ntb_free_mw(nt, i); 1431 iounmap(nt->mw_vec[i].vbase); 1432 } 1433 1434 kfree(nt->qp_vec); 1435 kfree(nt->mw_vec); 1436 kfree(nt); 1437 } 1438 1439 static void ntb_complete_rxc(struct ntb_transport_qp *qp) 1440 { 1441 struct ntb_queue_entry *entry; 1442 void *cb_data; 1443 unsigned int len; 1444 unsigned long irqflags; 1445 1446 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1447 1448 while (!list_empty(&qp->rx_post_q)) { 1449 entry = list_first_entry(&qp->rx_post_q, 1450 struct ntb_queue_entry, entry); 1451 if (!(entry->flags & DESC_DONE_FLAG)) 1452 break; 1453 1454 entry->rx_hdr->flags = 0; 1455 iowrite32(entry->rx_index, &qp->rx_info->entry); 1456 1457 cb_data = entry->cb_data; 1458 len = entry->len; 1459 1460 list_move_tail(&entry->entry, &qp->rx_free_q); 1461 1462 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1463 1464 if (qp->rx_handler && qp->client_ready) 1465 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1466 1467 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1468 } 1469 1470 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1471 } 1472 1473 static void ntb_rx_copy_callback(void *data, 1474 const struct dmaengine_result *res) 1475 { 1476 struct ntb_queue_entry *entry = data; 1477 1478 /* we need to check DMA results if we are using DMA */ 1479 if (res) { 1480 enum dmaengine_tx_result dma_err = res->result; 1481 1482 switch (dma_err) { 1483 case DMA_TRANS_READ_FAILED: 1484 case DMA_TRANS_WRITE_FAILED: 1485 entry->errors++; 1486 /* fall through */ 1487 case DMA_TRANS_ABORTED: 1488 { 1489 struct ntb_transport_qp *qp = entry->qp; 1490 void *offset = qp->rx_buff + qp->rx_max_frame * 1491 qp->rx_index; 1492 1493 ntb_memcpy_rx(entry, offset); 1494 qp->rx_memcpy++; 1495 return; 1496 } 1497 1498 case DMA_TRANS_NOERROR: 1499 default: 1500 break; 1501 } 1502 } 1503 1504 entry->flags |= DESC_DONE_FLAG; 1505 1506 ntb_complete_rxc(entry->qp); 1507 } 1508 1509 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1510 { 1511 void *buf = entry->buf; 1512 size_t len = entry->len; 1513 1514 memcpy(buf, offset, len); 1515 1516 /* Ensure that the data is fully copied out before clearing the flag */ 1517 wmb(); 1518 1519 ntb_rx_copy_callback(entry, NULL); 1520 } 1521 1522 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) 1523 { 1524 struct dma_async_tx_descriptor *txd; 1525 struct ntb_transport_qp *qp = entry->qp; 1526 struct dma_chan *chan = qp->rx_dma_chan; 1527 struct dma_device *device; 1528 size_t pay_off, buff_off, len; 1529 struct dmaengine_unmap_data *unmap; 1530 dma_cookie_t cookie; 1531 void *buf = entry->buf; 1532 1533 len = entry->len; 1534 device = chan->device; 1535 pay_off = (size_t)offset & ~PAGE_MASK; 1536 buff_off = (size_t)buf & ~PAGE_MASK; 1537 1538 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1539 goto err; 1540 1541 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1542 if (!unmap) 1543 goto err; 1544 1545 unmap->len = len; 1546 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1547 pay_off, len, DMA_TO_DEVICE); 1548 if (dma_mapping_error(device->dev, unmap->addr[0])) 1549 goto err_get_unmap; 1550 1551 unmap->to_cnt = 1; 1552 1553 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1554 buff_off, len, DMA_FROM_DEVICE); 1555 if (dma_mapping_error(device->dev, unmap->addr[1])) 1556 goto err_get_unmap; 1557 1558 unmap->from_cnt = 1; 1559 1560 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1561 unmap->addr[0], len, 1562 DMA_PREP_INTERRUPT); 1563 if (!txd) 1564 goto err_get_unmap; 1565 1566 txd->callback_result = ntb_rx_copy_callback; 1567 txd->callback_param = entry; 1568 dma_set_unmap(txd, unmap); 1569 1570 cookie = dmaengine_submit(txd); 1571 if (dma_submit_error(cookie)) 1572 goto err_set_unmap; 1573 1574 dmaengine_unmap_put(unmap); 1575 1576 qp->last_cookie = cookie; 1577 1578 qp->rx_async++; 1579 1580 return 0; 1581 1582 err_set_unmap: 1583 dmaengine_unmap_put(unmap); 1584 err_get_unmap: 1585 dmaengine_unmap_put(unmap); 1586 err: 1587 return -ENXIO; 1588 } 1589 1590 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1591 { 1592 struct ntb_transport_qp *qp = entry->qp; 1593 struct dma_chan *chan = qp->rx_dma_chan; 1594 int res; 1595 1596 if (!chan) 1597 goto err; 1598 1599 if (entry->len < copy_bytes) 1600 goto err; 1601 1602 res = ntb_async_rx_submit(entry, offset); 1603 if (res < 0) 1604 goto err; 1605 1606 if (!entry->retries) 1607 qp->rx_async++; 1608 1609 return; 1610 1611 err: 1612 ntb_memcpy_rx(entry, offset); 1613 qp->rx_memcpy++; 1614 } 1615 1616 static int ntb_process_rxc(struct ntb_transport_qp *qp) 1617 { 1618 struct ntb_payload_header *hdr; 1619 struct ntb_queue_entry *entry; 1620 void *offset; 1621 1622 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1623 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1624 1625 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", 1626 qp->qp_num, hdr->ver, hdr->len, hdr->flags); 1627 1628 if (!(hdr->flags & DESC_DONE_FLAG)) { 1629 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); 1630 qp->rx_ring_empty++; 1631 return -EAGAIN; 1632 } 1633 1634 if (hdr->flags & LINK_DOWN_FLAG) { 1635 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); 1636 ntb_qp_link_down(qp); 1637 hdr->flags = 0; 1638 return -EAGAIN; 1639 } 1640 1641 if (hdr->ver != (u32)qp->rx_pkts) { 1642 dev_dbg(&qp->ndev->pdev->dev, 1643 "version mismatch, expected %llu - got %u\n", 1644 qp->rx_pkts, hdr->ver); 1645 qp->rx_err_ver++; 1646 return -EIO; 1647 } 1648 1649 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 1650 if (!entry) { 1651 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1652 qp->rx_err_no_buf++; 1653 return -EAGAIN; 1654 } 1655 1656 entry->rx_hdr = hdr; 1657 entry->rx_index = qp->rx_index; 1658 1659 if (hdr->len > entry->len) { 1660 dev_dbg(&qp->ndev->pdev->dev, 1661 "receive buffer overflow! Wanted %d got %d\n", 1662 hdr->len, entry->len); 1663 qp->rx_err_oflow++; 1664 1665 entry->len = -EIO; 1666 entry->flags |= DESC_DONE_FLAG; 1667 1668 ntb_complete_rxc(qp); 1669 } else { 1670 dev_dbg(&qp->ndev->pdev->dev, 1671 "RX OK index %u ver %u size %d into buf size %d\n", 1672 qp->rx_index, hdr->ver, hdr->len, entry->len); 1673 1674 qp->rx_bytes += hdr->len; 1675 qp->rx_pkts++; 1676 1677 entry->len = hdr->len; 1678 1679 ntb_async_rx(entry, offset); 1680 } 1681 1682 qp->rx_index++; 1683 qp->rx_index %= qp->rx_max_entry; 1684 1685 return 0; 1686 } 1687 1688 static void ntb_transport_rxc_db(unsigned long data) 1689 { 1690 struct ntb_transport_qp *qp = (void *)data; 1691 int rc, i; 1692 1693 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", 1694 __func__, qp->qp_num); 1695 1696 /* Limit the number of packets processed in a single interrupt to 1697 * provide fairness to others 1698 */ 1699 for (i = 0; i < qp->rx_max_entry; i++) { 1700 rc = ntb_process_rxc(qp); 1701 if (rc) 1702 break; 1703 } 1704 1705 if (i && qp->rx_dma_chan) 1706 dma_async_issue_pending(qp->rx_dma_chan); 1707 1708 if (i == qp->rx_max_entry) { 1709 /* there is more work to do */ 1710 if (qp->active) 1711 tasklet_schedule(&qp->rxc_db_work); 1712 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1713 /* the doorbell bit is set: clear it */ 1714 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); 1715 /* ntb_db_read ensures ntb_db_clear write is committed */ 1716 ntb_db_read(qp->ndev); 1717 1718 /* an interrupt may have arrived between finishing 1719 * ntb_process_rxc and clearing the doorbell bit: 1720 * there might be some more work to do. 1721 */ 1722 if (qp->active) 1723 tasklet_schedule(&qp->rxc_db_work); 1724 } 1725 } 1726 1727 static void ntb_tx_copy_callback(void *data, 1728 const struct dmaengine_result *res) 1729 { 1730 struct ntb_queue_entry *entry = data; 1731 struct ntb_transport_qp *qp = entry->qp; 1732 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1733 1734 /* we need to check DMA results if we are using DMA */ 1735 if (res) { 1736 enum dmaengine_tx_result dma_err = res->result; 1737 1738 switch (dma_err) { 1739 case DMA_TRANS_READ_FAILED: 1740 case DMA_TRANS_WRITE_FAILED: 1741 entry->errors++; 1742 /* fall through */ 1743 case DMA_TRANS_ABORTED: 1744 { 1745 void __iomem *offset = 1746 qp->tx_mw + qp->tx_max_frame * 1747 entry->tx_index; 1748 1749 /* resubmit via CPU */ 1750 ntb_memcpy_tx(entry, offset); 1751 qp->tx_memcpy++; 1752 return; 1753 } 1754 1755 case DMA_TRANS_NOERROR: 1756 default: 1757 break; 1758 } 1759 } 1760 1761 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1762 1763 if (qp->use_msi) 1764 ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc); 1765 else 1766 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1767 1768 /* The entry length can only be zero if the packet is intended to be a 1769 * "link down" or similar. Since no payload is being sent in these 1770 * cases, there is nothing to add to the completion queue. 1771 */ 1772 if (entry->len > 0) { 1773 qp->tx_bytes += entry->len; 1774 1775 if (qp->tx_handler) 1776 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1777 entry->len); 1778 } 1779 1780 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1781 } 1782 1783 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) 1784 { 1785 #ifdef ARCH_HAS_NOCACHE_UACCESS 1786 /* 1787 * Using non-temporal mov to improve performance on non-cached 1788 * writes, even though we aren't actually copying from user space. 1789 */ 1790 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); 1791 #else 1792 memcpy_toio(offset, entry->buf, entry->len); 1793 #endif 1794 1795 /* Ensure that the data is fully copied out before setting the flags */ 1796 wmb(); 1797 1798 ntb_tx_copy_callback(entry, NULL); 1799 } 1800 1801 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 1802 struct ntb_queue_entry *entry) 1803 { 1804 struct dma_async_tx_descriptor *txd; 1805 struct dma_chan *chan = qp->tx_dma_chan; 1806 struct dma_device *device; 1807 size_t len = entry->len; 1808 void *buf = entry->buf; 1809 size_t dest_off, buff_off; 1810 struct dmaengine_unmap_data *unmap; 1811 dma_addr_t dest; 1812 dma_cookie_t cookie; 1813 1814 device = chan->device; 1815 dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index; 1816 buff_off = (size_t)buf & ~PAGE_MASK; 1817 dest_off = (size_t)dest & ~PAGE_MASK; 1818 1819 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1820 goto err; 1821 1822 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); 1823 if (!unmap) 1824 goto err; 1825 1826 unmap->len = len; 1827 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), 1828 buff_off, len, DMA_TO_DEVICE); 1829 if (dma_mapping_error(device->dev, unmap->addr[0])) 1830 goto err_get_unmap; 1831 1832 unmap->to_cnt = 1; 1833 1834 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1835 DMA_PREP_INTERRUPT); 1836 if (!txd) 1837 goto err_get_unmap; 1838 1839 txd->callback_result = ntb_tx_copy_callback; 1840 txd->callback_param = entry; 1841 dma_set_unmap(txd, unmap); 1842 1843 cookie = dmaengine_submit(txd); 1844 if (dma_submit_error(cookie)) 1845 goto err_set_unmap; 1846 1847 dmaengine_unmap_put(unmap); 1848 1849 dma_async_issue_pending(chan); 1850 1851 return 0; 1852 err_set_unmap: 1853 dmaengine_unmap_put(unmap); 1854 err_get_unmap: 1855 dmaengine_unmap_put(unmap); 1856 err: 1857 return -ENXIO; 1858 } 1859 1860 static void ntb_async_tx(struct ntb_transport_qp *qp, 1861 struct ntb_queue_entry *entry) 1862 { 1863 struct ntb_payload_header __iomem *hdr; 1864 struct dma_chan *chan = qp->tx_dma_chan; 1865 void __iomem *offset; 1866 int res; 1867 1868 entry->tx_index = qp->tx_index; 1869 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index; 1870 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1871 entry->tx_hdr = hdr; 1872 1873 iowrite32(entry->len, &hdr->len); 1874 iowrite32((u32)qp->tx_pkts, &hdr->ver); 1875 1876 if (!chan) 1877 goto err; 1878 1879 if (entry->len < copy_bytes) 1880 goto err; 1881 1882 res = ntb_async_tx_submit(qp, entry); 1883 if (res < 0) 1884 goto err; 1885 1886 if (!entry->retries) 1887 qp->tx_async++; 1888 1889 return; 1890 1891 err: 1892 ntb_memcpy_tx(entry, offset); 1893 qp->tx_memcpy++; 1894 } 1895 1896 static int ntb_process_tx(struct ntb_transport_qp *qp, 1897 struct ntb_queue_entry *entry) 1898 { 1899 if (qp->tx_index == qp->remote_rx_info->entry) { 1900 qp->tx_ring_full++; 1901 return -EAGAIN; 1902 } 1903 1904 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1905 if (qp->tx_handler) 1906 qp->tx_handler(qp, qp->cb_data, NULL, -EIO); 1907 1908 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1909 &qp->tx_free_q); 1910 return 0; 1911 } 1912 1913 ntb_async_tx(qp, entry); 1914 1915 qp->tx_index++; 1916 qp->tx_index %= qp->tx_max_entry; 1917 1918 qp->tx_pkts++; 1919 1920 return 0; 1921 } 1922 1923 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1924 { 1925 struct pci_dev *pdev = qp->ndev->pdev; 1926 struct ntb_queue_entry *entry; 1927 int i, rc; 1928 1929 if (!qp->link_is_up) 1930 return; 1931 1932 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); 1933 1934 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1935 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1936 if (entry) 1937 break; 1938 msleep(100); 1939 } 1940 1941 if (!entry) 1942 return; 1943 1944 entry->cb_data = NULL; 1945 entry->buf = NULL; 1946 entry->len = 0; 1947 entry->flags = LINK_DOWN_FLAG; 1948 1949 rc = ntb_process_tx(qp, entry); 1950 if (rc) 1951 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1952 qp->qp_num); 1953 1954 ntb_qp_link_down_reset(qp); 1955 } 1956 1957 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) 1958 { 1959 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; 1960 } 1961 1962 /** 1963 * ntb_transport_create_queue - Create a new NTB transport layer queue 1964 * @rx_handler: receive callback function 1965 * @tx_handler: transmit callback function 1966 * @event_handler: event callback function 1967 * 1968 * Create a new NTB transport layer queue and provide the queue with a callback 1969 * routine for both transmit and receive. The receive callback routine will be 1970 * used to pass up data when the transport has received it on the queue. The 1971 * transmit callback routine will be called when the transport has completed the 1972 * transmission of the data on the queue and the data is ready to be freed. 1973 * 1974 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1975 */ 1976 struct ntb_transport_qp * 1977 ntb_transport_create_queue(void *data, struct device *client_dev, 1978 const struct ntb_queue_handlers *handlers) 1979 { 1980 struct ntb_dev *ndev; 1981 struct pci_dev *pdev; 1982 struct ntb_transport_ctx *nt; 1983 struct ntb_queue_entry *entry; 1984 struct ntb_transport_qp *qp; 1985 u64 qp_bit; 1986 unsigned int free_queue; 1987 dma_cap_mask_t dma_mask; 1988 int node; 1989 int i; 1990 1991 ndev = dev_ntb(client_dev->parent); 1992 pdev = ndev->pdev; 1993 nt = ndev->ctx; 1994 1995 node = dev_to_node(&ndev->dev); 1996 1997 free_queue = ffs(nt->qp_bitmap_free); 1998 if (!free_queue) 1999 goto err; 2000 2001 /* decrement free_queue to make it zero based */ 2002 free_queue--; 2003 2004 qp = &nt->qp_vec[free_queue]; 2005 qp_bit = BIT_ULL(qp->qp_num); 2006 2007 nt->qp_bitmap_free &= ~qp_bit; 2008 2009 qp->cb_data = data; 2010 qp->rx_handler = handlers->rx_handler; 2011 qp->tx_handler = handlers->tx_handler; 2012 qp->event_handler = handlers->event_handler; 2013 2014 dma_cap_zero(dma_mask); 2015 dma_cap_set(DMA_MEMCPY, dma_mask); 2016 2017 if (use_dma) { 2018 qp->tx_dma_chan = 2019 dma_request_channel(dma_mask, ntb_dma_filter_fn, 2020 (void *)(unsigned long)node); 2021 if (!qp->tx_dma_chan) 2022 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n"); 2023 2024 qp->rx_dma_chan = 2025 dma_request_channel(dma_mask, ntb_dma_filter_fn, 2026 (void *)(unsigned long)node); 2027 if (!qp->rx_dma_chan) 2028 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n"); 2029 } else { 2030 qp->tx_dma_chan = NULL; 2031 qp->rx_dma_chan = NULL; 2032 } 2033 2034 qp->tx_mw_dma_addr = 0; 2035 if (qp->tx_dma_chan) { 2036 qp->tx_mw_dma_addr = 2037 dma_map_resource(qp->tx_dma_chan->device->dev, 2038 qp->tx_mw_phys, qp->tx_mw_size, 2039 DMA_FROM_DEVICE, 0); 2040 if (dma_mapping_error(qp->tx_dma_chan->device->dev, 2041 qp->tx_mw_dma_addr)) { 2042 qp->tx_mw_dma_addr = 0; 2043 goto err1; 2044 } 2045 } 2046 2047 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", 2048 qp->tx_dma_chan ? "DMA" : "CPU"); 2049 2050 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n", 2051 qp->rx_dma_chan ? "DMA" : "CPU"); 2052 2053 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 2054 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); 2055 if (!entry) 2056 goto err1; 2057 2058 entry->qp = qp; 2059 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 2060 &qp->rx_free_q); 2061 } 2062 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES; 2063 2064 for (i = 0; i < qp->tx_max_entry; i++) { 2065 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); 2066 if (!entry) 2067 goto err2; 2068 2069 entry->qp = qp; 2070 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 2071 &qp->tx_free_q); 2072 } 2073 2074 ntb_db_clear(qp->ndev, qp_bit); 2075 ntb_db_clear_mask(qp->ndev, qp_bit); 2076 2077 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 2078 2079 return qp; 2080 2081 err2: 2082 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 2083 kfree(entry); 2084 err1: 2085 qp->rx_alloc_entry = 0; 2086 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 2087 kfree(entry); 2088 if (qp->tx_mw_dma_addr) 2089 dma_unmap_resource(qp->tx_dma_chan->device->dev, 2090 qp->tx_mw_dma_addr, qp->tx_mw_size, 2091 DMA_FROM_DEVICE, 0); 2092 if (qp->tx_dma_chan) 2093 dma_release_channel(qp->tx_dma_chan); 2094 if (qp->rx_dma_chan) 2095 dma_release_channel(qp->rx_dma_chan); 2096 nt->qp_bitmap_free |= qp_bit; 2097 err: 2098 return NULL; 2099 } 2100 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 2101 2102 /** 2103 * ntb_transport_free_queue - Frees NTB transport queue 2104 * @qp: NTB queue to be freed 2105 * 2106 * Frees NTB transport queue 2107 */ 2108 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 2109 { 2110 struct pci_dev *pdev; 2111 struct ntb_queue_entry *entry; 2112 u64 qp_bit; 2113 2114 if (!qp) 2115 return; 2116 2117 pdev = qp->ndev->pdev; 2118 2119 qp->active = false; 2120 2121 if (qp->tx_dma_chan) { 2122 struct dma_chan *chan = qp->tx_dma_chan; 2123 /* Putting the dma_chan to NULL will force any new traffic to be 2124 * processed by the CPU instead of the DAM engine 2125 */ 2126 qp->tx_dma_chan = NULL; 2127 2128 /* Try to be nice and wait for any queued DMA engine 2129 * transactions to process before smashing it with a rock 2130 */ 2131 dma_sync_wait(chan, qp->last_cookie); 2132 dmaengine_terminate_all(chan); 2133 2134 dma_unmap_resource(chan->device->dev, 2135 qp->tx_mw_dma_addr, qp->tx_mw_size, 2136 DMA_FROM_DEVICE, 0); 2137 2138 dma_release_channel(chan); 2139 } 2140 2141 if (qp->rx_dma_chan) { 2142 struct dma_chan *chan = qp->rx_dma_chan; 2143 /* Putting the dma_chan to NULL will force any new traffic to be 2144 * processed by the CPU instead of the DAM engine 2145 */ 2146 qp->rx_dma_chan = NULL; 2147 2148 /* Try to be nice and wait for any queued DMA engine 2149 * transactions to process before smashing it with a rock 2150 */ 2151 dma_sync_wait(chan, qp->last_cookie); 2152 dmaengine_terminate_all(chan); 2153 dma_release_channel(chan); 2154 } 2155 2156 qp_bit = BIT_ULL(qp->qp_num); 2157 2158 ntb_db_set_mask(qp->ndev, qp_bit); 2159 tasklet_kill(&qp->rxc_db_work); 2160 2161 cancel_delayed_work_sync(&qp->link_work); 2162 2163 qp->cb_data = NULL; 2164 qp->rx_handler = NULL; 2165 qp->tx_handler = NULL; 2166 qp->event_handler = NULL; 2167 2168 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 2169 kfree(entry); 2170 2171 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { 2172 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); 2173 kfree(entry); 2174 } 2175 2176 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { 2177 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); 2178 kfree(entry); 2179 } 2180 2181 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 2182 kfree(entry); 2183 2184 qp->transport->qp_bitmap_free |= qp_bit; 2185 2186 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 2187 } 2188 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 2189 2190 /** 2191 * ntb_transport_rx_remove - Dequeues enqueued rx packet 2192 * @qp: NTB queue to be freed 2193 * @len: pointer to variable to write enqueued buffers length 2194 * 2195 * Dequeues unused buffers from receive queue. Should only be used during 2196 * shutdown of qp. 2197 * 2198 * RETURNS: NULL error value on error, or void* for success. 2199 */ 2200 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 2201 { 2202 struct ntb_queue_entry *entry; 2203 void *buf; 2204 2205 if (!qp || qp->client_ready) 2206 return NULL; 2207 2208 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); 2209 if (!entry) 2210 return NULL; 2211 2212 buf = entry->cb_data; 2213 *len = entry->len; 2214 2215 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); 2216 2217 return buf; 2218 } 2219 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 2220 2221 /** 2222 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 2223 * @qp: NTB transport layer queue the entry is to be enqueued on 2224 * @cb: per buffer pointer for callback function to use 2225 * @data: pointer to data buffer that incoming packets will be copied into 2226 * @len: length of the data buffer 2227 * 2228 * Enqueue a new receive buffer onto the transport queue into which a NTB 2229 * payload can be received into. 2230 * 2231 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2232 */ 2233 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 2234 unsigned int len) 2235 { 2236 struct ntb_queue_entry *entry; 2237 2238 if (!qp) 2239 return -EINVAL; 2240 2241 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); 2242 if (!entry) 2243 return -ENOMEM; 2244 2245 entry->cb_data = cb; 2246 entry->buf = data; 2247 entry->len = len; 2248 entry->flags = 0; 2249 entry->retries = 0; 2250 entry->errors = 0; 2251 entry->rx_index = 0; 2252 2253 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 2254 2255 if (qp->active) 2256 tasklet_schedule(&qp->rxc_db_work); 2257 2258 return 0; 2259 } 2260 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 2261 2262 /** 2263 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 2264 * @qp: NTB transport layer queue the entry is to be enqueued on 2265 * @cb: per buffer pointer for callback function to use 2266 * @data: pointer to data buffer that will be sent 2267 * @len: length of the data buffer 2268 * 2269 * Enqueue a new transmit buffer onto the transport queue from which a NTB 2270 * payload will be transmitted. This assumes that a lock is being held to 2271 * serialize access to the qp. 2272 * 2273 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2274 */ 2275 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 2276 unsigned int len) 2277 { 2278 struct ntb_queue_entry *entry; 2279 int rc; 2280 2281 if (!qp || !qp->link_is_up || !len) 2282 return -EINVAL; 2283 2284 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 2285 if (!entry) { 2286 qp->tx_err_no_buf++; 2287 return -EBUSY; 2288 } 2289 2290 entry->cb_data = cb; 2291 entry->buf = data; 2292 entry->len = len; 2293 entry->flags = 0; 2294 entry->errors = 0; 2295 entry->retries = 0; 2296 entry->tx_index = 0; 2297 2298 rc = ntb_process_tx(qp, entry); 2299 if (rc) 2300 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 2301 &qp->tx_free_q); 2302 2303 return rc; 2304 } 2305 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 2306 2307 /** 2308 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 2309 * @qp: NTB transport layer queue to be enabled 2310 * 2311 * Notify NTB transport layer of client readiness to use queue 2312 */ 2313 void ntb_transport_link_up(struct ntb_transport_qp *qp) 2314 { 2315 if (!qp) 2316 return; 2317 2318 qp->client_ready = true; 2319 2320 if (qp->transport->link_is_up) 2321 schedule_delayed_work(&qp->link_work, 0); 2322 } 2323 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 2324 2325 /** 2326 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 2327 * @qp: NTB transport layer queue to be disabled 2328 * 2329 * Notify NTB transport layer of client's desire to no longer receive data on 2330 * transport queue specified. It is the client's responsibility to ensure all 2331 * entries on queue are purged or otherwise handled appropriately. 2332 */ 2333 void ntb_transport_link_down(struct ntb_transport_qp *qp) 2334 { 2335 int val; 2336 2337 if (!qp) 2338 return; 2339 2340 qp->client_ready = false; 2341 2342 val = ntb_spad_read(qp->ndev, QP_LINKS); 2343 2344 ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num)); 2345 2346 if (qp->link_is_up) 2347 ntb_send_link_down(qp); 2348 else 2349 cancel_delayed_work_sync(&qp->link_work); 2350 } 2351 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 2352 2353 /** 2354 * ntb_transport_link_query - Query transport link state 2355 * @qp: NTB transport layer queue to be queried 2356 * 2357 * Query connectivity to the remote system of the NTB transport queue 2358 * 2359 * RETURNS: true for link up or false for link down 2360 */ 2361 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 2362 { 2363 if (!qp) 2364 return false; 2365 2366 return qp->link_is_up; 2367 } 2368 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 2369 2370 /** 2371 * ntb_transport_qp_num - Query the qp number 2372 * @qp: NTB transport layer queue to be queried 2373 * 2374 * Query qp number of the NTB transport queue 2375 * 2376 * RETURNS: a zero based number specifying the qp number 2377 */ 2378 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 2379 { 2380 if (!qp) 2381 return 0; 2382 2383 return qp->qp_num; 2384 } 2385 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 2386 2387 /** 2388 * ntb_transport_max_size - Query the max payload size of a qp 2389 * @qp: NTB transport layer queue to be queried 2390 * 2391 * Query the maximum payload size permissible on the given qp 2392 * 2393 * RETURNS: the max payload size of a qp 2394 */ 2395 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 2396 { 2397 unsigned int max_size; 2398 unsigned int copy_align; 2399 struct dma_chan *rx_chan, *tx_chan; 2400 2401 if (!qp) 2402 return 0; 2403 2404 rx_chan = qp->rx_dma_chan; 2405 tx_chan = qp->tx_dma_chan; 2406 2407 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0, 2408 tx_chan ? tx_chan->device->copy_align : 0); 2409 2410 /* If DMA engine usage is possible, try to find the max size for that */ 2411 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); 2412 max_size = round_down(max_size, 1 << copy_align); 2413 2414 return max_size; 2415 } 2416 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 2417 2418 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) 2419 { 2420 unsigned int head = qp->tx_index; 2421 unsigned int tail = qp->remote_rx_info->entry; 2422 2423 return tail > head ? tail - head : qp->tx_max_entry + tail - head; 2424 } 2425 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry); 2426 2427 static void ntb_transport_doorbell_callback(void *data, int vector) 2428 { 2429 struct ntb_transport_ctx *nt = data; 2430 struct ntb_transport_qp *qp; 2431 u64 db_bits; 2432 unsigned int qp_num; 2433 2434 if (ntb_db_read(nt->ndev) & nt->msi_db_mask) { 2435 ntb_transport_msi_peer_desc_changed(nt); 2436 ntb_db_clear(nt->ndev, nt->msi_db_mask); 2437 } 2438 2439 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 2440 ntb_db_vector_mask(nt->ndev, vector)); 2441 2442 while (db_bits) { 2443 qp_num = __ffs(db_bits); 2444 qp = &nt->qp_vec[qp_num]; 2445 2446 if (qp->active) 2447 tasklet_schedule(&qp->rxc_db_work); 2448 2449 db_bits &= ~BIT_ULL(qp_num); 2450 } 2451 } 2452 2453 static const struct ntb_ctx_ops ntb_transport_ops = { 2454 .link_event = ntb_transport_event_callback, 2455 .db_event = ntb_transport_doorbell_callback, 2456 }; 2457 2458 static struct ntb_client ntb_transport_client = { 2459 .ops = { 2460 .probe = ntb_transport_probe, 2461 .remove = ntb_transport_free, 2462 }, 2463 }; 2464 2465 static int __init ntb_transport_init(void) 2466 { 2467 int rc; 2468 2469 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER); 2470 2471 if (debugfs_initialized()) 2472 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 2473 2474 rc = bus_register(&ntb_transport_bus); 2475 if (rc) 2476 goto err_bus; 2477 2478 rc = ntb_register_client(&ntb_transport_client); 2479 if (rc) 2480 goto err_client; 2481 2482 return 0; 2483 2484 err_client: 2485 bus_unregister(&ntb_transport_bus); 2486 err_bus: 2487 debugfs_remove_recursive(nt_debugfs_dir); 2488 return rc; 2489 } 2490 module_init(ntb_transport_init); 2491 2492 static void __exit ntb_transport_exit(void) 2493 { 2494 ntb_unregister_client(&ntb_transport_client); 2495 bus_unregister(&ntb_transport_bus); 2496 debugfs_remove_recursive(nt_debugfs_dir); 2497 } 2498 module_exit(ntb_transport_exit); 2499