1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Transport Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/debugfs.h> 51 #include <linux/delay.h> 52 #include <linux/dmaengine.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/errno.h> 55 #include <linux/export.h> 56 #include <linux/interrupt.h> 57 #include <linux/module.h> 58 #include <linux/pci.h> 59 #include <linux/slab.h> 60 #include <linux/types.h> 61 #include <linux/uaccess.h> 62 #include "linux/ntb.h" 63 #include "linux/ntb_transport.h" 64 65 #define NTB_TRANSPORT_VERSION 4 66 #define NTB_TRANSPORT_VER "4" 67 #define NTB_TRANSPORT_NAME "ntb_transport" 68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" 69 #define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2) 70 71 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); 72 MODULE_VERSION(NTB_TRANSPORT_VER); 73 MODULE_LICENSE("Dual BSD/GPL"); 74 MODULE_AUTHOR("Intel Corporation"); 75 76 static unsigned long max_mw_size; 77 module_param(max_mw_size, ulong, 0644); 78 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); 79 80 static unsigned int transport_mtu = 0x10000; 81 module_param(transport_mtu, uint, 0644); 82 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 83 84 static unsigned char max_num_clients; 85 module_param(max_num_clients, byte, 0644); 86 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 87 88 static unsigned int copy_bytes = 1024; 89 module_param(copy_bytes, uint, 0644); 90 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 91 92 static bool use_dma; 93 module_param(use_dma, bool, 0644); 94 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); 95 96 static bool use_msi; 97 #ifdef CONFIG_NTB_MSI 98 module_param(use_msi, bool, 0644); 99 MODULE_PARM_DESC(use_msi, "Use MSI interrupts instead of doorbells"); 100 #endif 101 102 static struct dentry *nt_debugfs_dir; 103 104 /* Only two-ports NTB devices are supported */ 105 #define PIDX NTB_DEF_PEER_IDX 106 107 struct ntb_queue_entry { 108 /* ntb_queue list reference */ 109 struct list_head entry; 110 /* pointers to data to be transferred */ 111 void *cb_data; 112 void *buf; 113 unsigned int len; 114 unsigned int flags; 115 int retries; 116 int errors; 117 unsigned int tx_index; 118 unsigned int rx_index; 119 120 struct ntb_transport_qp *qp; 121 union { 122 struct ntb_payload_header __iomem *tx_hdr; 123 struct ntb_payload_header *rx_hdr; 124 }; 125 }; 126 127 struct ntb_rx_info { 128 unsigned int entry; 129 }; 130 131 struct ntb_transport_qp { 132 struct ntb_transport_ctx *transport; 133 struct ntb_dev *ndev; 134 void *cb_data; 135 struct dma_chan *tx_dma_chan; 136 struct dma_chan *rx_dma_chan; 137 138 bool client_ready; 139 bool link_is_up; 140 bool active; 141 142 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 143 u64 qp_bit; 144 145 struct ntb_rx_info __iomem *rx_info; 146 struct ntb_rx_info *remote_rx_info; 147 148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 149 void *data, int len); 150 struct list_head tx_free_q; 151 spinlock_t ntb_tx_free_q_lock; 152 void __iomem *tx_mw; 153 phys_addr_t tx_mw_phys; 154 size_t tx_mw_size; 155 dma_addr_t tx_mw_dma_addr; 156 unsigned int tx_index; 157 unsigned int tx_max_entry; 158 unsigned int tx_max_frame; 159 160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 161 void *data, int len); 162 struct list_head rx_post_q; 163 struct list_head rx_pend_q; 164 struct list_head rx_free_q; 165 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 166 spinlock_t ntb_rx_q_lock; 167 void *rx_buff; 168 unsigned int rx_index; 169 unsigned int rx_max_entry; 170 unsigned int rx_max_frame; 171 unsigned int rx_alloc_entry; 172 dma_cookie_t last_cookie; 173 struct tasklet_struct rxc_db_work; 174 175 void (*event_handler)(void *data, int status); 176 struct delayed_work link_work; 177 struct work_struct link_cleanup; 178 179 struct dentry *debugfs_dir; 180 struct dentry *debugfs_stats; 181 182 /* Stats */ 183 u64 rx_bytes; 184 u64 rx_pkts; 185 u64 rx_ring_empty; 186 u64 rx_err_no_buf; 187 u64 rx_err_oflow; 188 u64 rx_err_ver; 189 u64 rx_memcpy; 190 u64 rx_async; 191 u64 tx_bytes; 192 u64 tx_pkts; 193 u64 tx_ring_full; 194 u64 tx_err_no_buf; 195 u64 tx_memcpy; 196 u64 tx_async; 197 198 bool use_msi; 199 int msi_irq; 200 struct ntb_msi_desc msi_desc; 201 struct ntb_msi_desc peer_msi_desc; 202 }; 203 204 struct ntb_transport_mw { 205 phys_addr_t phys_addr; 206 resource_size_t phys_size; 207 void __iomem *vbase; 208 size_t xlat_size; 209 size_t buff_size; 210 size_t alloc_size; 211 void *alloc_addr; 212 void *virt_addr; 213 dma_addr_t dma_addr; 214 }; 215 216 struct ntb_transport_client_dev { 217 struct list_head entry; 218 struct ntb_transport_ctx *nt; 219 struct device dev; 220 }; 221 222 struct ntb_transport_ctx { 223 struct list_head entry; 224 struct list_head client_devs; 225 226 struct ntb_dev *ndev; 227 228 struct ntb_transport_mw *mw_vec; 229 struct ntb_transport_qp *qp_vec; 230 unsigned int mw_count; 231 unsigned int qp_count; 232 u64 qp_bitmap; 233 u64 qp_bitmap_free; 234 235 bool use_msi; 236 unsigned int msi_spad_offset; 237 u64 msi_db_mask; 238 239 bool link_is_up; 240 struct delayed_work link_work; 241 struct work_struct link_cleanup; 242 243 struct dentry *debugfs_node_dir; 244 }; 245 246 enum { 247 DESC_DONE_FLAG = BIT(0), 248 LINK_DOWN_FLAG = BIT(1), 249 }; 250 251 struct ntb_payload_header { 252 unsigned int ver; 253 unsigned int len; 254 unsigned int flags; 255 }; 256 257 enum { 258 VERSION = 0, 259 QP_LINKS, 260 NUM_QPS, 261 NUM_MWS, 262 MW0_SZ_HIGH, 263 MW0_SZ_LOW, 264 }; 265 266 #define dev_client_dev(__dev) \ 267 container_of((__dev), struct ntb_transport_client_dev, dev) 268 269 #define drv_client(__drv) \ 270 container_of((__drv), struct ntb_transport_client, driver) 271 272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 273 #define NTB_QP_DEF_NUM_ENTRIES 100 274 #define NTB_LINK_DOWN_TIMEOUT 10 275 276 static void ntb_transport_rxc_db(unsigned long data); 277 static const struct ntb_ctx_ops ntb_transport_ops; 278 static struct ntb_client ntb_transport_client; 279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 280 struct ntb_queue_entry *entry); 281 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); 282 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset); 283 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset); 284 285 286 static int ntb_transport_bus_match(struct device *dev, 287 struct device_driver *drv) 288 { 289 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 290 } 291 292 static int ntb_transport_bus_probe(struct device *dev) 293 { 294 const struct ntb_transport_client *client; 295 int rc; 296 297 get_device(dev); 298 299 client = drv_client(dev->driver); 300 rc = client->probe(dev); 301 if (rc) 302 put_device(dev); 303 304 return rc; 305 } 306 307 static void ntb_transport_bus_remove(struct device *dev) 308 { 309 const struct ntb_transport_client *client; 310 311 client = drv_client(dev->driver); 312 client->remove(dev); 313 314 put_device(dev); 315 } 316 317 static struct bus_type ntb_transport_bus = { 318 .name = "ntb_transport", 319 .match = ntb_transport_bus_match, 320 .probe = ntb_transport_bus_probe, 321 .remove = ntb_transport_bus_remove, 322 }; 323 324 static LIST_HEAD(ntb_transport_list); 325 326 static int ntb_bus_init(struct ntb_transport_ctx *nt) 327 { 328 list_add_tail(&nt->entry, &ntb_transport_list); 329 return 0; 330 } 331 332 static void ntb_bus_remove(struct ntb_transport_ctx *nt) 333 { 334 struct ntb_transport_client_dev *client_dev, *cd; 335 336 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 337 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 338 dev_name(&client_dev->dev)); 339 list_del(&client_dev->entry); 340 device_unregister(&client_dev->dev); 341 } 342 343 list_del(&nt->entry); 344 } 345 346 static void ntb_transport_client_release(struct device *dev) 347 { 348 struct ntb_transport_client_dev *client_dev; 349 350 client_dev = dev_client_dev(dev); 351 kfree(client_dev); 352 } 353 354 /** 355 * ntb_transport_unregister_client_dev - Unregister NTB client device 356 * @device_name: Name of NTB client device 357 * 358 * Unregister an NTB client device with the NTB transport layer 359 */ 360 void ntb_transport_unregister_client_dev(char *device_name) 361 { 362 struct ntb_transport_client_dev *client, *cd; 363 struct ntb_transport_ctx *nt; 364 365 list_for_each_entry(nt, &ntb_transport_list, entry) 366 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 367 if (!strncmp(dev_name(&client->dev), device_name, 368 strlen(device_name))) { 369 list_del(&client->entry); 370 device_unregister(&client->dev); 371 } 372 } 373 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); 374 375 /** 376 * ntb_transport_register_client_dev - Register NTB client device 377 * @device_name: Name of NTB client device 378 * 379 * Register an NTB client device with the NTB transport layer 380 */ 381 int ntb_transport_register_client_dev(char *device_name) 382 { 383 struct ntb_transport_client_dev *client_dev; 384 struct ntb_transport_ctx *nt; 385 int node; 386 int rc, i = 0; 387 388 if (list_empty(&ntb_transport_list)) 389 return -ENODEV; 390 391 list_for_each_entry(nt, &ntb_transport_list, entry) { 392 struct device *dev; 393 394 node = dev_to_node(&nt->ndev->dev); 395 396 client_dev = kzalloc_node(sizeof(*client_dev), 397 GFP_KERNEL, node); 398 if (!client_dev) { 399 rc = -ENOMEM; 400 goto err; 401 } 402 403 dev = &client_dev->dev; 404 405 /* setup and register client devices */ 406 dev_set_name(dev, "%s%d", device_name, i); 407 dev->bus = &ntb_transport_bus; 408 dev->release = ntb_transport_client_release; 409 dev->parent = &nt->ndev->dev; 410 411 rc = device_register(dev); 412 if (rc) { 413 put_device(dev); 414 goto err; 415 } 416 417 list_add_tail(&client_dev->entry, &nt->client_devs); 418 i++; 419 } 420 421 return 0; 422 423 err: 424 ntb_transport_unregister_client_dev(device_name); 425 426 return rc; 427 } 428 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); 429 430 /** 431 * ntb_transport_register_client - Register NTB client driver 432 * @drv: NTB client driver to be registered 433 * 434 * Register an NTB client driver with the NTB transport layer 435 * 436 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 437 */ 438 int ntb_transport_register_client(struct ntb_transport_client *drv) 439 { 440 drv->driver.bus = &ntb_transport_bus; 441 442 if (list_empty(&ntb_transport_list)) 443 return -ENODEV; 444 445 return driver_register(&drv->driver); 446 } 447 EXPORT_SYMBOL_GPL(ntb_transport_register_client); 448 449 /** 450 * ntb_transport_unregister_client - Unregister NTB client driver 451 * @drv: NTB client driver to be unregistered 452 * 453 * Unregister an NTB client driver with the NTB transport layer 454 * 455 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 456 */ 457 void ntb_transport_unregister_client(struct ntb_transport_client *drv) 458 { 459 driver_unregister(&drv->driver); 460 } 461 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); 462 463 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 464 loff_t *offp) 465 { 466 struct ntb_transport_qp *qp; 467 char *buf; 468 ssize_t ret, out_offset, out_count; 469 470 qp = filp->private_data; 471 472 if (!qp || !qp->link_is_up) 473 return 0; 474 475 out_count = 1000; 476 477 buf = kmalloc(out_count, GFP_KERNEL); 478 if (!buf) 479 return -ENOMEM; 480 481 out_offset = 0; 482 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 483 "\nNTB QP stats:\n\n"); 484 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 485 "rx_bytes - \t%llu\n", qp->rx_bytes); 486 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 487 "rx_pkts - \t%llu\n", qp->rx_pkts); 488 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 489 "rx_memcpy - \t%llu\n", qp->rx_memcpy); 490 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 491 "rx_async - \t%llu\n", qp->rx_async); 492 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 493 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 494 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 495 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 496 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 497 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 498 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 499 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 500 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 501 "rx_buff - \t0x%p\n", qp->rx_buff); 502 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 503 "rx_index - \t%u\n", qp->rx_index); 504 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 505 "rx_max_entry - \t%u\n", qp->rx_max_entry); 506 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 507 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry); 508 509 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 510 "tx_bytes - \t%llu\n", qp->tx_bytes); 511 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 512 "tx_pkts - \t%llu\n", qp->tx_pkts); 513 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 514 "tx_memcpy - \t%llu\n", qp->tx_memcpy); 515 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 516 "tx_async - \t%llu\n", qp->tx_async); 517 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 518 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 519 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 520 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 521 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 522 "tx_mw - \t0x%p\n", qp->tx_mw); 523 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 524 "tx_index (H) - \t%u\n", qp->tx_index); 525 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 526 "RRI (T) - \t%u\n", 527 qp->remote_rx_info->entry); 528 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 529 "tx_max_entry - \t%u\n", qp->tx_max_entry); 530 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 531 "free tx - \t%u\n", 532 ntb_transport_tx_free_entry(qp)); 533 534 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 535 "\n"); 536 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 537 "Using TX DMA - \t%s\n", 538 qp->tx_dma_chan ? "Yes" : "No"); 539 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 540 "Using RX DMA - \t%s\n", 541 qp->rx_dma_chan ? "Yes" : "No"); 542 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 543 "QP Link - \t%s\n", 544 qp->link_is_up ? "Up" : "Down"); 545 out_offset += scnprintf(buf + out_offset, out_count - out_offset, 546 "\n"); 547 548 if (out_offset > out_count) 549 out_offset = out_count; 550 551 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 552 kfree(buf); 553 return ret; 554 } 555 556 static const struct file_operations ntb_qp_debugfs_stats = { 557 .owner = THIS_MODULE, 558 .open = simple_open, 559 .read = debugfs_read, 560 }; 561 562 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 563 struct list_head *list) 564 { 565 unsigned long flags; 566 567 spin_lock_irqsave(lock, flags); 568 list_add_tail(entry, list); 569 spin_unlock_irqrestore(lock, flags); 570 } 571 572 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 573 struct list_head *list) 574 { 575 struct ntb_queue_entry *entry; 576 unsigned long flags; 577 578 spin_lock_irqsave(lock, flags); 579 if (list_empty(list)) { 580 entry = NULL; 581 goto out; 582 } 583 entry = list_first_entry(list, struct ntb_queue_entry, entry); 584 list_del(&entry->entry); 585 586 out: 587 spin_unlock_irqrestore(lock, flags); 588 589 return entry; 590 } 591 592 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, 593 struct list_head *list, 594 struct list_head *to_list) 595 { 596 struct ntb_queue_entry *entry; 597 unsigned long flags; 598 599 spin_lock_irqsave(lock, flags); 600 601 if (list_empty(list)) { 602 entry = NULL; 603 } else { 604 entry = list_first_entry(list, struct ntb_queue_entry, entry); 605 list_move_tail(&entry->entry, to_list); 606 } 607 608 spin_unlock_irqrestore(lock, flags); 609 610 return entry; 611 } 612 613 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 614 unsigned int qp_num) 615 { 616 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 617 struct ntb_transport_mw *mw; 618 struct ntb_dev *ndev = nt->ndev; 619 struct ntb_queue_entry *entry; 620 unsigned int rx_size, num_qps_mw; 621 unsigned int mw_num, mw_count, qp_count; 622 unsigned int i; 623 int node; 624 625 mw_count = nt->mw_count; 626 qp_count = nt->qp_count; 627 628 mw_num = QP_TO_MW(nt, qp_num); 629 mw = &nt->mw_vec[mw_num]; 630 631 if (!mw->virt_addr) 632 return -ENOMEM; 633 634 if (mw_num < qp_count % mw_count) 635 num_qps_mw = qp_count / mw_count + 1; 636 else 637 num_qps_mw = qp_count / mw_count; 638 639 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; 640 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); 641 rx_size -= sizeof(struct ntb_rx_info); 642 643 qp->remote_rx_info = qp->rx_buff + rx_size; 644 645 /* Due to housekeeping, there must be atleast 2 buffs */ 646 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 647 qp->rx_max_entry = rx_size / qp->rx_max_frame; 648 qp->rx_index = 0; 649 650 /* 651 * Checking to see if we have more entries than the default. 652 * We should add additional entries if that is the case so we 653 * can be in sync with the transport frames. 654 */ 655 node = dev_to_node(&ndev->dev); 656 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) { 657 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); 658 if (!entry) 659 return -ENOMEM; 660 661 entry->qp = qp; 662 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 663 &qp->rx_free_q); 664 qp->rx_alloc_entry++; 665 } 666 667 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 668 669 /* setup the hdr offsets with 0's */ 670 for (i = 0; i < qp->rx_max_entry; i++) { 671 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - 672 sizeof(struct ntb_payload_header)); 673 memset(offset, 0, sizeof(struct ntb_payload_header)); 674 } 675 676 qp->rx_pkts = 0; 677 qp->tx_pkts = 0; 678 qp->tx_index = 0; 679 680 return 0; 681 } 682 683 static irqreturn_t ntb_transport_isr(int irq, void *dev) 684 { 685 struct ntb_transport_qp *qp = dev; 686 687 tasklet_schedule(&qp->rxc_db_work); 688 689 return IRQ_HANDLED; 690 } 691 692 static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt, 693 unsigned int qp_num) 694 { 695 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 696 int spad = qp_num * 2 + nt->msi_spad_offset; 697 698 if (!nt->use_msi) 699 return; 700 701 if (spad >= ntb_spad_count(nt->ndev)) 702 return; 703 704 qp->peer_msi_desc.addr_offset = 705 ntb_peer_spad_read(qp->ndev, PIDX, spad); 706 qp->peer_msi_desc.data = 707 ntb_peer_spad_read(qp->ndev, PIDX, spad + 1); 708 709 dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n", 710 qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data); 711 712 if (qp->peer_msi_desc.addr_offset) { 713 qp->use_msi = true; 714 dev_info(&qp->ndev->pdev->dev, 715 "Using MSI interrupts for QP%d\n", qp_num); 716 } 717 } 718 719 static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt, 720 unsigned int qp_num) 721 { 722 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 723 int spad = qp_num * 2 + nt->msi_spad_offset; 724 int rc; 725 726 if (!nt->use_msi) 727 return; 728 729 if (spad >= ntb_spad_count(nt->ndev)) { 730 dev_warn_once(&qp->ndev->pdev->dev, 731 "Not enough SPADS to use MSI interrupts\n"); 732 return; 733 } 734 735 ntb_spad_write(qp->ndev, spad, 0); 736 ntb_spad_write(qp->ndev, spad + 1, 0); 737 738 if (!qp->msi_irq) { 739 qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr, 740 KBUILD_MODNAME, qp, 741 &qp->msi_desc); 742 if (qp->msi_irq < 0) { 743 dev_warn(&qp->ndev->pdev->dev, 744 "Unable to allocate MSI interrupt for qp%d\n", 745 qp_num); 746 return; 747 } 748 } 749 750 rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset); 751 if (rc) 752 goto err_free_interrupt; 753 754 rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data); 755 if (rc) 756 goto err_free_interrupt; 757 758 dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n", 759 qp_num, qp->msi_irq, qp->msi_desc.addr_offset, 760 qp->msi_desc.data); 761 762 return; 763 764 err_free_interrupt: 765 devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp); 766 } 767 768 static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt) 769 { 770 int i; 771 772 dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed"); 773 774 for (i = 0; i < nt->qp_count; i++) 775 ntb_transport_setup_qp_peer_msi(nt, i); 776 } 777 778 static void ntb_transport_msi_desc_changed(void *data) 779 { 780 struct ntb_transport_ctx *nt = data; 781 int i; 782 783 dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed"); 784 785 for (i = 0; i < nt->qp_count; i++) 786 ntb_transport_setup_qp_msi(nt, i); 787 788 ntb_peer_db_set(nt->ndev, nt->msi_db_mask); 789 } 790 791 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 792 { 793 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 794 struct pci_dev *pdev = nt->ndev->pdev; 795 796 if (!mw->virt_addr) 797 return; 798 799 ntb_mw_clear_trans(nt->ndev, PIDX, num_mw); 800 dma_free_coherent(&pdev->dev, mw->alloc_size, 801 mw->alloc_addr, mw->dma_addr); 802 mw->xlat_size = 0; 803 mw->buff_size = 0; 804 mw->alloc_size = 0; 805 mw->alloc_addr = NULL; 806 mw->virt_addr = NULL; 807 } 808 809 static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, 810 struct device *dma_dev, size_t align) 811 { 812 dma_addr_t dma_addr; 813 void *alloc_addr, *virt_addr; 814 int rc; 815 816 alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size, 817 &dma_addr, GFP_KERNEL); 818 if (!alloc_addr) { 819 dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n", 820 mw->alloc_size); 821 return -ENOMEM; 822 } 823 virt_addr = alloc_addr; 824 825 /* 826 * we must ensure that the memory address allocated is BAR size 827 * aligned in order for the XLAT register to take the value. This 828 * is a requirement of the hardware. It is recommended to setup CMA 829 * for BAR sizes equal or greater than 4MB. 830 */ 831 if (!IS_ALIGNED(dma_addr, align)) { 832 if (mw->alloc_size > mw->buff_size) { 833 virt_addr = PTR_ALIGN(alloc_addr, align); 834 dma_addr = ALIGN(dma_addr, align); 835 } else { 836 rc = -ENOMEM; 837 goto err; 838 } 839 } 840 841 mw->alloc_addr = alloc_addr; 842 mw->virt_addr = virt_addr; 843 mw->dma_addr = dma_addr; 844 845 return 0; 846 847 err: 848 dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr); 849 850 return rc; 851 } 852 853 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 854 resource_size_t size) 855 { 856 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 857 struct pci_dev *pdev = nt->ndev->pdev; 858 size_t xlat_size, buff_size; 859 resource_size_t xlat_align; 860 resource_size_t xlat_align_size; 861 int rc; 862 863 if (!size) 864 return -EINVAL; 865 866 rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align, 867 &xlat_align_size, NULL); 868 if (rc) 869 return rc; 870 871 xlat_size = round_up(size, xlat_align_size); 872 buff_size = round_up(size, xlat_align); 873 874 /* No need to re-setup */ 875 if (mw->xlat_size == xlat_size) 876 return 0; 877 878 if (mw->buff_size) 879 ntb_free_mw(nt, num_mw); 880 881 /* Alloc memory for receiving data. Must be aligned */ 882 mw->xlat_size = xlat_size; 883 mw->buff_size = buff_size; 884 mw->alloc_size = buff_size; 885 886 rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); 887 if (rc) { 888 mw->alloc_size *= 2; 889 rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); 890 if (rc) { 891 dev_err(&pdev->dev, 892 "Unable to alloc aligned MW buff\n"); 893 mw->xlat_size = 0; 894 mw->buff_size = 0; 895 mw->alloc_size = 0; 896 return rc; 897 } 898 } 899 900 /* Notify HW the memory location of the receive buffer */ 901 rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr, 902 mw->xlat_size); 903 if (rc) { 904 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); 905 ntb_free_mw(nt, num_mw); 906 return -EIO; 907 } 908 909 return 0; 910 } 911 912 static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp) 913 { 914 qp->link_is_up = false; 915 qp->active = false; 916 917 qp->tx_index = 0; 918 qp->rx_index = 0; 919 qp->rx_bytes = 0; 920 qp->rx_pkts = 0; 921 qp->rx_ring_empty = 0; 922 qp->rx_err_no_buf = 0; 923 qp->rx_err_oflow = 0; 924 qp->rx_err_ver = 0; 925 qp->rx_memcpy = 0; 926 qp->rx_async = 0; 927 qp->tx_bytes = 0; 928 qp->tx_pkts = 0; 929 qp->tx_ring_full = 0; 930 qp->tx_err_no_buf = 0; 931 qp->tx_memcpy = 0; 932 qp->tx_async = 0; 933 } 934 935 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 936 { 937 ntb_qp_link_context_reset(qp); 938 if (qp->remote_rx_info) 939 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 940 } 941 942 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 943 { 944 struct ntb_transport_ctx *nt = qp->transport; 945 struct pci_dev *pdev = nt->ndev->pdev; 946 947 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); 948 949 cancel_delayed_work_sync(&qp->link_work); 950 ntb_qp_link_down_reset(qp); 951 952 if (qp->event_handler) 953 qp->event_handler(qp->cb_data, qp->link_is_up); 954 } 955 956 static void ntb_qp_link_cleanup_work(struct work_struct *work) 957 { 958 struct ntb_transport_qp *qp = container_of(work, 959 struct ntb_transport_qp, 960 link_cleanup); 961 struct ntb_transport_ctx *nt = qp->transport; 962 963 ntb_qp_link_cleanup(qp); 964 965 if (nt->link_is_up) 966 schedule_delayed_work(&qp->link_work, 967 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 968 } 969 970 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 971 { 972 schedule_work(&qp->link_cleanup); 973 } 974 975 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 976 { 977 struct ntb_transport_qp *qp; 978 u64 qp_bitmap_alloc; 979 unsigned int i, count; 980 981 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 982 983 /* Pass along the info to any clients */ 984 for (i = 0; i < nt->qp_count; i++) 985 if (qp_bitmap_alloc & BIT_ULL(i)) { 986 qp = &nt->qp_vec[i]; 987 ntb_qp_link_cleanup(qp); 988 cancel_work_sync(&qp->link_cleanup); 989 cancel_delayed_work_sync(&qp->link_work); 990 } 991 992 if (!nt->link_is_up) 993 cancel_delayed_work_sync(&nt->link_work); 994 995 for (i = 0; i < nt->mw_count; i++) 996 ntb_free_mw(nt, i); 997 998 /* The scratchpad registers keep the values if the remote side 999 * goes down, blast them now to give them a sane value the next 1000 * time they are accessed 1001 */ 1002 count = ntb_spad_count(nt->ndev); 1003 for (i = 0; i < count; i++) 1004 ntb_spad_write(nt->ndev, i, 0); 1005 } 1006 1007 static void ntb_transport_link_cleanup_work(struct work_struct *work) 1008 { 1009 struct ntb_transport_ctx *nt = 1010 container_of(work, struct ntb_transport_ctx, link_cleanup); 1011 1012 ntb_transport_link_cleanup(nt); 1013 } 1014 1015 static void ntb_transport_event_callback(void *data) 1016 { 1017 struct ntb_transport_ctx *nt = data; 1018 1019 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) 1020 schedule_delayed_work(&nt->link_work, 0); 1021 else 1022 schedule_work(&nt->link_cleanup); 1023 } 1024 1025 static void ntb_transport_link_work(struct work_struct *work) 1026 { 1027 struct ntb_transport_ctx *nt = 1028 container_of(work, struct ntb_transport_ctx, link_work.work); 1029 struct ntb_dev *ndev = nt->ndev; 1030 struct pci_dev *pdev = ndev->pdev; 1031 resource_size_t size; 1032 u32 val; 1033 int rc = 0, i, spad; 1034 1035 /* send the local info, in the opposite order of the way we read it */ 1036 1037 if (nt->use_msi) { 1038 rc = ntb_msi_setup_mws(ndev); 1039 if (rc) { 1040 dev_warn(&pdev->dev, 1041 "Failed to register MSI memory window: %d\n", 1042 rc); 1043 nt->use_msi = false; 1044 } 1045 } 1046 1047 for (i = 0; i < nt->qp_count; i++) 1048 ntb_transport_setup_qp_msi(nt, i); 1049 1050 for (i = 0; i < nt->mw_count; i++) { 1051 size = nt->mw_vec[i].phys_size; 1052 1053 if (max_mw_size && size > max_mw_size) 1054 size = max_mw_size; 1055 1056 spad = MW0_SZ_HIGH + (i * 2); 1057 ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size)); 1058 1059 spad = MW0_SZ_LOW + (i * 2); 1060 ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size)); 1061 } 1062 1063 ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count); 1064 1065 ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count); 1066 1067 ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION); 1068 1069 /* Query the remote side for its info */ 1070 val = ntb_spad_read(ndev, VERSION); 1071 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 1072 if (val != NTB_TRANSPORT_VERSION) 1073 goto out; 1074 1075 val = ntb_spad_read(ndev, NUM_QPS); 1076 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 1077 if (val != nt->qp_count) 1078 goto out; 1079 1080 val = ntb_spad_read(ndev, NUM_MWS); 1081 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 1082 if (val != nt->mw_count) 1083 goto out; 1084 1085 for (i = 0; i < nt->mw_count; i++) { 1086 u64 val64; 1087 1088 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); 1089 val64 = (u64)val << 32; 1090 1091 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); 1092 val64 |= val; 1093 1094 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); 1095 1096 rc = ntb_set_mw(nt, i, val64); 1097 if (rc) 1098 goto out1; 1099 } 1100 1101 nt->link_is_up = true; 1102 1103 for (i = 0; i < nt->qp_count; i++) { 1104 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 1105 1106 ntb_transport_setup_qp_mw(nt, i); 1107 ntb_transport_setup_qp_peer_msi(nt, i); 1108 1109 if (qp->client_ready) 1110 schedule_delayed_work(&qp->link_work, 0); 1111 } 1112 1113 return; 1114 1115 out1: 1116 for (i = 0; i < nt->mw_count; i++) 1117 ntb_free_mw(nt, i); 1118 1119 /* if there's an actual failure, we should just bail */ 1120 if (rc < 0) 1121 return; 1122 1123 out: 1124 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 1125 schedule_delayed_work(&nt->link_work, 1126 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 1127 } 1128 1129 static void ntb_qp_link_work(struct work_struct *work) 1130 { 1131 struct ntb_transport_qp *qp = container_of(work, 1132 struct ntb_transport_qp, 1133 link_work.work); 1134 struct pci_dev *pdev = qp->ndev->pdev; 1135 struct ntb_transport_ctx *nt = qp->transport; 1136 int val; 1137 1138 WARN_ON(!nt->link_is_up); 1139 1140 val = ntb_spad_read(nt->ndev, QP_LINKS); 1141 1142 ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num)); 1143 1144 /* query remote spad for qp ready bits */ 1145 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); 1146 1147 /* See if the remote side is up */ 1148 if (val & BIT(qp->qp_num)) { 1149 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 1150 qp->link_is_up = true; 1151 qp->active = true; 1152 1153 if (qp->event_handler) 1154 qp->event_handler(qp->cb_data, qp->link_is_up); 1155 1156 if (qp->active) 1157 tasklet_schedule(&qp->rxc_db_work); 1158 } else if (nt->link_is_up) 1159 schedule_delayed_work(&qp->link_work, 1160 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 1161 } 1162 1163 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, 1164 unsigned int qp_num) 1165 { 1166 struct ntb_transport_qp *qp; 1167 phys_addr_t mw_base; 1168 resource_size_t mw_size; 1169 unsigned int num_qps_mw, tx_size; 1170 unsigned int mw_num, mw_count, qp_count; 1171 u64 qp_offset; 1172 1173 mw_count = nt->mw_count; 1174 qp_count = nt->qp_count; 1175 1176 mw_num = QP_TO_MW(nt, qp_num); 1177 1178 qp = &nt->qp_vec[qp_num]; 1179 qp->qp_num = qp_num; 1180 qp->transport = nt; 1181 qp->ndev = nt->ndev; 1182 qp->client_ready = false; 1183 qp->event_handler = NULL; 1184 ntb_qp_link_context_reset(qp); 1185 1186 if (mw_num < qp_count % mw_count) 1187 num_qps_mw = qp_count / mw_count + 1; 1188 else 1189 num_qps_mw = qp_count / mw_count; 1190 1191 mw_base = nt->mw_vec[mw_num].phys_addr; 1192 mw_size = nt->mw_vec[mw_num].phys_size; 1193 1194 if (max_mw_size && mw_size > max_mw_size) 1195 mw_size = max_mw_size; 1196 1197 tx_size = (unsigned int)mw_size / num_qps_mw; 1198 qp_offset = tx_size * (qp_num / mw_count); 1199 1200 qp->tx_mw_size = tx_size; 1201 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 1202 if (!qp->tx_mw) 1203 return -EINVAL; 1204 1205 qp->tx_mw_phys = mw_base + qp_offset; 1206 if (!qp->tx_mw_phys) 1207 return -EINVAL; 1208 1209 tx_size -= sizeof(struct ntb_rx_info); 1210 qp->rx_info = qp->tx_mw + tx_size; 1211 1212 /* Due to housekeeping, there must be atleast 2 buffs */ 1213 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 1214 qp->tx_max_entry = tx_size / qp->tx_max_frame; 1215 1216 if (nt->debugfs_node_dir) { 1217 char debugfs_name[4]; 1218 1219 snprintf(debugfs_name, 4, "qp%d", qp_num); 1220 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 1221 nt->debugfs_node_dir); 1222 1223 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 1224 qp->debugfs_dir, qp, 1225 &ntb_qp_debugfs_stats); 1226 } else { 1227 qp->debugfs_dir = NULL; 1228 qp->debugfs_stats = NULL; 1229 } 1230 1231 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 1232 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 1233 1234 spin_lock_init(&qp->ntb_rx_q_lock); 1235 spin_lock_init(&qp->ntb_tx_free_q_lock); 1236 1237 INIT_LIST_HEAD(&qp->rx_post_q); 1238 INIT_LIST_HEAD(&qp->rx_pend_q); 1239 INIT_LIST_HEAD(&qp->rx_free_q); 1240 INIT_LIST_HEAD(&qp->tx_free_q); 1241 1242 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, 1243 (unsigned long)qp); 1244 1245 return 0; 1246 } 1247 1248 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) 1249 { 1250 struct ntb_transport_ctx *nt; 1251 struct ntb_transport_mw *mw; 1252 unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads; 1253 u64 qp_bitmap; 1254 int node; 1255 int rc, i; 1256 1257 mw_count = ntb_peer_mw_count(ndev); 1258 1259 if (!ndev->ops->mw_set_trans) { 1260 dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); 1261 return -EINVAL; 1262 } 1263 1264 if (ntb_db_is_unsafe(ndev)) 1265 dev_dbg(&ndev->dev, 1266 "doorbell is unsafe, proceed anyway...\n"); 1267 if (ntb_spad_is_unsafe(ndev)) 1268 dev_dbg(&ndev->dev, 1269 "scratchpad is unsafe, proceed anyway...\n"); 1270 1271 if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT) 1272 dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n"); 1273 1274 node = dev_to_node(&ndev->dev); 1275 1276 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); 1277 if (!nt) 1278 return -ENOMEM; 1279 1280 nt->ndev = ndev; 1281 1282 /* 1283 * If we are using MSI, and have at least one extra memory window, 1284 * we will reserve the last MW for the MSI window. 1285 */ 1286 if (use_msi && mw_count > 1) { 1287 rc = ntb_msi_init(ndev, ntb_transport_msi_desc_changed); 1288 if (!rc) { 1289 mw_count -= 1; 1290 nt->use_msi = true; 1291 } 1292 } 1293 1294 spad_count = ntb_spad_count(ndev); 1295 1296 /* Limit the MW's based on the availability of scratchpads */ 1297 1298 if (spad_count < NTB_TRANSPORT_MIN_SPADS) { 1299 nt->mw_count = 0; 1300 rc = -EINVAL; 1301 goto err; 1302 } 1303 1304 max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2; 1305 nt->mw_count = min(mw_count, max_mw_count_for_spads); 1306 1307 nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH; 1308 1309 nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec), 1310 GFP_KERNEL, node); 1311 if (!nt->mw_vec) { 1312 rc = -ENOMEM; 1313 goto err; 1314 } 1315 1316 for (i = 0; i < mw_count; i++) { 1317 mw = &nt->mw_vec[i]; 1318 1319 rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, 1320 &mw->phys_size); 1321 if (rc) 1322 goto err1; 1323 1324 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); 1325 if (!mw->vbase) { 1326 rc = -ENOMEM; 1327 goto err1; 1328 } 1329 1330 mw->buff_size = 0; 1331 mw->xlat_size = 0; 1332 mw->virt_addr = NULL; 1333 mw->dma_addr = 0; 1334 } 1335 1336 qp_bitmap = ntb_db_valid_mask(ndev); 1337 1338 qp_count = ilog2(qp_bitmap); 1339 if (nt->use_msi) { 1340 qp_count -= 1; 1341 nt->msi_db_mask = 1 << qp_count; 1342 ntb_db_clear_mask(ndev, nt->msi_db_mask); 1343 } 1344 1345 if (max_num_clients && max_num_clients < qp_count) 1346 qp_count = max_num_clients; 1347 else if (nt->mw_count < qp_count) 1348 qp_count = nt->mw_count; 1349 1350 qp_bitmap &= BIT_ULL(qp_count) - 1; 1351 1352 nt->qp_count = qp_count; 1353 nt->qp_bitmap = qp_bitmap; 1354 nt->qp_bitmap_free = qp_bitmap; 1355 1356 nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec), 1357 GFP_KERNEL, node); 1358 if (!nt->qp_vec) { 1359 rc = -ENOMEM; 1360 goto err1; 1361 } 1362 1363 if (nt_debugfs_dir) { 1364 nt->debugfs_node_dir = 1365 debugfs_create_dir(pci_name(ndev->pdev), 1366 nt_debugfs_dir); 1367 } 1368 1369 for (i = 0; i < qp_count; i++) { 1370 rc = ntb_transport_init_queue(nt, i); 1371 if (rc) 1372 goto err2; 1373 } 1374 1375 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 1376 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); 1377 1378 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); 1379 if (rc) 1380 goto err2; 1381 1382 INIT_LIST_HEAD(&nt->client_devs); 1383 rc = ntb_bus_init(nt); 1384 if (rc) 1385 goto err3; 1386 1387 nt->link_is_up = false; 1388 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1389 ntb_link_event(ndev); 1390 1391 return 0; 1392 1393 err3: 1394 ntb_clear_ctx(ndev); 1395 err2: 1396 kfree(nt->qp_vec); 1397 err1: 1398 while (i--) { 1399 mw = &nt->mw_vec[i]; 1400 iounmap(mw->vbase); 1401 } 1402 kfree(nt->mw_vec); 1403 err: 1404 kfree(nt); 1405 return rc; 1406 } 1407 1408 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) 1409 { 1410 struct ntb_transport_ctx *nt = ndev->ctx; 1411 struct ntb_transport_qp *qp; 1412 u64 qp_bitmap_alloc; 1413 int i; 1414 1415 ntb_transport_link_cleanup(nt); 1416 cancel_work_sync(&nt->link_cleanup); 1417 cancel_delayed_work_sync(&nt->link_work); 1418 1419 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 1420 1421 /* verify that all the qp's are freed */ 1422 for (i = 0; i < nt->qp_count; i++) { 1423 qp = &nt->qp_vec[i]; 1424 if (qp_bitmap_alloc & BIT_ULL(i)) 1425 ntb_transport_free_queue(qp); 1426 debugfs_remove_recursive(qp->debugfs_dir); 1427 } 1428 1429 ntb_link_disable(ndev); 1430 ntb_clear_ctx(ndev); 1431 1432 ntb_bus_remove(nt); 1433 1434 for (i = nt->mw_count; i--; ) { 1435 ntb_free_mw(nt, i); 1436 iounmap(nt->mw_vec[i].vbase); 1437 } 1438 1439 kfree(nt->qp_vec); 1440 kfree(nt->mw_vec); 1441 kfree(nt); 1442 } 1443 1444 static void ntb_complete_rxc(struct ntb_transport_qp *qp) 1445 { 1446 struct ntb_queue_entry *entry; 1447 void *cb_data; 1448 unsigned int len; 1449 unsigned long irqflags; 1450 1451 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1452 1453 while (!list_empty(&qp->rx_post_q)) { 1454 entry = list_first_entry(&qp->rx_post_q, 1455 struct ntb_queue_entry, entry); 1456 if (!(entry->flags & DESC_DONE_FLAG)) 1457 break; 1458 1459 entry->rx_hdr->flags = 0; 1460 iowrite32(entry->rx_index, &qp->rx_info->entry); 1461 1462 cb_data = entry->cb_data; 1463 len = entry->len; 1464 1465 list_move_tail(&entry->entry, &qp->rx_free_q); 1466 1467 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1468 1469 if (qp->rx_handler && qp->client_ready) 1470 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1471 1472 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1473 } 1474 1475 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1476 } 1477 1478 static void ntb_rx_copy_callback(void *data, 1479 const struct dmaengine_result *res) 1480 { 1481 struct ntb_queue_entry *entry = data; 1482 1483 /* we need to check DMA results if we are using DMA */ 1484 if (res) { 1485 enum dmaengine_tx_result dma_err = res->result; 1486 1487 switch (dma_err) { 1488 case DMA_TRANS_READ_FAILED: 1489 case DMA_TRANS_WRITE_FAILED: 1490 entry->errors++; 1491 fallthrough; 1492 case DMA_TRANS_ABORTED: 1493 { 1494 struct ntb_transport_qp *qp = entry->qp; 1495 void *offset = qp->rx_buff + qp->rx_max_frame * 1496 qp->rx_index; 1497 1498 ntb_memcpy_rx(entry, offset); 1499 qp->rx_memcpy++; 1500 return; 1501 } 1502 1503 case DMA_TRANS_NOERROR: 1504 default: 1505 break; 1506 } 1507 } 1508 1509 entry->flags |= DESC_DONE_FLAG; 1510 1511 ntb_complete_rxc(entry->qp); 1512 } 1513 1514 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1515 { 1516 void *buf = entry->buf; 1517 size_t len = entry->len; 1518 1519 memcpy(buf, offset, len); 1520 1521 /* Ensure that the data is fully copied out before clearing the flag */ 1522 wmb(); 1523 1524 ntb_rx_copy_callback(entry, NULL); 1525 } 1526 1527 static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) 1528 { 1529 struct dma_async_tx_descriptor *txd; 1530 struct ntb_transport_qp *qp = entry->qp; 1531 struct dma_chan *chan = qp->rx_dma_chan; 1532 struct dma_device *device; 1533 size_t pay_off, buff_off, len; 1534 struct dmaengine_unmap_data *unmap; 1535 dma_cookie_t cookie; 1536 void *buf = entry->buf; 1537 1538 len = entry->len; 1539 device = chan->device; 1540 pay_off = (size_t)offset & ~PAGE_MASK; 1541 buff_off = (size_t)buf & ~PAGE_MASK; 1542 1543 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1544 goto err; 1545 1546 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1547 if (!unmap) 1548 goto err; 1549 1550 unmap->len = len; 1551 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1552 pay_off, len, DMA_TO_DEVICE); 1553 if (dma_mapping_error(device->dev, unmap->addr[0])) 1554 goto err_get_unmap; 1555 1556 unmap->to_cnt = 1; 1557 1558 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1559 buff_off, len, DMA_FROM_DEVICE); 1560 if (dma_mapping_error(device->dev, unmap->addr[1])) 1561 goto err_get_unmap; 1562 1563 unmap->from_cnt = 1; 1564 1565 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1566 unmap->addr[0], len, 1567 DMA_PREP_INTERRUPT); 1568 if (!txd) 1569 goto err_get_unmap; 1570 1571 txd->callback_result = ntb_rx_copy_callback; 1572 txd->callback_param = entry; 1573 dma_set_unmap(txd, unmap); 1574 1575 cookie = dmaengine_submit(txd); 1576 if (dma_submit_error(cookie)) 1577 goto err_set_unmap; 1578 1579 dmaengine_unmap_put(unmap); 1580 1581 qp->last_cookie = cookie; 1582 1583 qp->rx_async++; 1584 1585 return 0; 1586 1587 err_set_unmap: 1588 dmaengine_unmap_put(unmap); 1589 err_get_unmap: 1590 dmaengine_unmap_put(unmap); 1591 err: 1592 return -ENXIO; 1593 } 1594 1595 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1596 { 1597 struct ntb_transport_qp *qp = entry->qp; 1598 struct dma_chan *chan = qp->rx_dma_chan; 1599 int res; 1600 1601 if (!chan) 1602 goto err; 1603 1604 if (entry->len < copy_bytes) 1605 goto err; 1606 1607 res = ntb_async_rx_submit(entry, offset); 1608 if (res < 0) 1609 goto err; 1610 1611 if (!entry->retries) 1612 qp->rx_async++; 1613 1614 return; 1615 1616 err: 1617 ntb_memcpy_rx(entry, offset); 1618 qp->rx_memcpy++; 1619 } 1620 1621 static int ntb_process_rxc(struct ntb_transport_qp *qp) 1622 { 1623 struct ntb_payload_header *hdr; 1624 struct ntb_queue_entry *entry; 1625 void *offset; 1626 1627 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1628 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1629 1630 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", 1631 qp->qp_num, hdr->ver, hdr->len, hdr->flags); 1632 1633 if (!(hdr->flags & DESC_DONE_FLAG)) { 1634 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); 1635 qp->rx_ring_empty++; 1636 return -EAGAIN; 1637 } 1638 1639 if (hdr->flags & LINK_DOWN_FLAG) { 1640 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); 1641 ntb_qp_link_down(qp); 1642 hdr->flags = 0; 1643 return -EAGAIN; 1644 } 1645 1646 if (hdr->ver != (u32)qp->rx_pkts) { 1647 dev_dbg(&qp->ndev->pdev->dev, 1648 "version mismatch, expected %llu - got %u\n", 1649 qp->rx_pkts, hdr->ver); 1650 qp->rx_err_ver++; 1651 return -EIO; 1652 } 1653 1654 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 1655 if (!entry) { 1656 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1657 qp->rx_err_no_buf++; 1658 return -EAGAIN; 1659 } 1660 1661 entry->rx_hdr = hdr; 1662 entry->rx_index = qp->rx_index; 1663 1664 if (hdr->len > entry->len) { 1665 dev_dbg(&qp->ndev->pdev->dev, 1666 "receive buffer overflow! Wanted %d got %d\n", 1667 hdr->len, entry->len); 1668 qp->rx_err_oflow++; 1669 1670 entry->len = -EIO; 1671 entry->flags |= DESC_DONE_FLAG; 1672 1673 ntb_complete_rxc(qp); 1674 } else { 1675 dev_dbg(&qp->ndev->pdev->dev, 1676 "RX OK index %u ver %u size %d into buf size %d\n", 1677 qp->rx_index, hdr->ver, hdr->len, entry->len); 1678 1679 qp->rx_bytes += hdr->len; 1680 qp->rx_pkts++; 1681 1682 entry->len = hdr->len; 1683 1684 ntb_async_rx(entry, offset); 1685 } 1686 1687 qp->rx_index++; 1688 qp->rx_index %= qp->rx_max_entry; 1689 1690 return 0; 1691 } 1692 1693 static void ntb_transport_rxc_db(unsigned long data) 1694 { 1695 struct ntb_transport_qp *qp = (void *)data; 1696 int rc, i; 1697 1698 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", 1699 __func__, qp->qp_num); 1700 1701 /* Limit the number of packets processed in a single interrupt to 1702 * provide fairness to others 1703 */ 1704 for (i = 0; i < qp->rx_max_entry; i++) { 1705 rc = ntb_process_rxc(qp); 1706 if (rc) 1707 break; 1708 } 1709 1710 if (i && qp->rx_dma_chan) 1711 dma_async_issue_pending(qp->rx_dma_chan); 1712 1713 if (i == qp->rx_max_entry) { 1714 /* there is more work to do */ 1715 if (qp->active) 1716 tasklet_schedule(&qp->rxc_db_work); 1717 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1718 /* the doorbell bit is set: clear it */ 1719 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); 1720 /* ntb_db_read ensures ntb_db_clear write is committed */ 1721 ntb_db_read(qp->ndev); 1722 1723 /* an interrupt may have arrived between finishing 1724 * ntb_process_rxc and clearing the doorbell bit: 1725 * there might be some more work to do. 1726 */ 1727 if (qp->active) 1728 tasklet_schedule(&qp->rxc_db_work); 1729 } 1730 } 1731 1732 static void ntb_tx_copy_callback(void *data, 1733 const struct dmaengine_result *res) 1734 { 1735 struct ntb_queue_entry *entry = data; 1736 struct ntb_transport_qp *qp = entry->qp; 1737 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1738 1739 /* we need to check DMA results if we are using DMA */ 1740 if (res) { 1741 enum dmaengine_tx_result dma_err = res->result; 1742 1743 switch (dma_err) { 1744 case DMA_TRANS_READ_FAILED: 1745 case DMA_TRANS_WRITE_FAILED: 1746 entry->errors++; 1747 fallthrough; 1748 case DMA_TRANS_ABORTED: 1749 { 1750 void __iomem *offset = 1751 qp->tx_mw + qp->tx_max_frame * 1752 entry->tx_index; 1753 1754 /* resubmit via CPU */ 1755 ntb_memcpy_tx(entry, offset); 1756 qp->tx_memcpy++; 1757 return; 1758 } 1759 1760 case DMA_TRANS_NOERROR: 1761 default: 1762 break; 1763 } 1764 } 1765 1766 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1767 1768 if (qp->use_msi) 1769 ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc); 1770 else 1771 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1772 1773 /* The entry length can only be zero if the packet is intended to be a 1774 * "link down" or similar. Since no payload is being sent in these 1775 * cases, there is nothing to add to the completion queue. 1776 */ 1777 if (entry->len > 0) { 1778 qp->tx_bytes += entry->len; 1779 1780 if (qp->tx_handler) 1781 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1782 entry->len); 1783 } 1784 1785 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1786 } 1787 1788 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) 1789 { 1790 #ifdef ARCH_HAS_NOCACHE_UACCESS 1791 /* 1792 * Using non-temporal mov to improve performance on non-cached 1793 * writes, even though we aren't actually copying from user space. 1794 */ 1795 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); 1796 #else 1797 memcpy_toio(offset, entry->buf, entry->len); 1798 #endif 1799 1800 /* Ensure that the data is fully copied out before setting the flags */ 1801 wmb(); 1802 1803 ntb_tx_copy_callback(entry, NULL); 1804 } 1805 1806 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 1807 struct ntb_queue_entry *entry) 1808 { 1809 struct dma_async_tx_descriptor *txd; 1810 struct dma_chan *chan = qp->tx_dma_chan; 1811 struct dma_device *device; 1812 size_t len = entry->len; 1813 void *buf = entry->buf; 1814 size_t dest_off, buff_off; 1815 struct dmaengine_unmap_data *unmap; 1816 dma_addr_t dest; 1817 dma_cookie_t cookie; 1818 1819 device = chan->device; 1820 dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index; 1821 buff_off = (size_t)buf & ~PAGE_MASK; 1822 dest_off = (size_t)dest & ~PAGE_MASK; 1823 1824 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1825 goto err; 1826 1827 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); 1828 if (!unmap) 1829 goto err; 1830 1831 unmap->len = len; 1832 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), 1833 buff_off, len, DMA_TO_DEVICE); 1834 if (dma_mapping_error(device->dev, unmap->addr[0])) 1835 goto err_get_unmap; 1836 1837 unmap->to_cnt = 1; 1838 1839 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1840 DMA_PREP_INTERRUPT); 1841 if (!txd) 1842 goto err_get_unmap; 1843 1844 txd->callback_result = ntb_tx_copy_callback; 1845 txd->callback_param = entry; 1846 dma_set_unmap(txd, unmap); 1847 1848 cookie = dmaengine_submit(txd); 1849 if (dma_submit_error(cookie)) 1850 goto err_set_unmap; 1851 1852 dmaengine_unmap_put(unmap); 1853 1854 dma_async_issue_pending(chan); 1855 1856 return 0; 1857 err_set_unmap: 1858 dmaengine_unmap_put(unmap); 1859 err_get_unmap: 1860 dmaengine_unmap_put(unmap); 1861 err: 1862 return -ENXIO; 1863 } 1864 1865 static void ntb_async_tx(struct ntb_transport_qp *qp, 1866 struct ntb_queue_entry *entry) 1867 { 1868 struct ntb_payload_header __iomem *hdr; 1869 struct dma_chan *chan = qp->tx_dma_chan; 1870 void __iomem *offset; 1871 int res; 1872 1873 entry->tx_index = qp->tx_index; 1874 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index; 1875 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1876 entry->tx_hdr = hdr; 1877 1878 iowrite32(entry->len, &hdr->len); 1879 iowrite32((u32)qp->tx_pkts, &hdr->ver); 1880 1881 if (!chan) 1882 goto err; 1883 1884 if (entry->len < copy_bytes) 1885 goto err; 1886 1887 res = ntb_async_tx_submit(qp, entry); 1888 if (res < 0) 1889 goto err; 1890 1891 if (!entry->retries) 1892 qp->tx_async++; 1893 1894 return; 1895 1896 err: 1897 ntb_memcpy_tx(entry, offset); 1898 qp->tx_memcpy++; 1899 } 1900 1901 static int ntb_process_tx(struct ntb_transport_qp *qp, 1902 struct ntb_queue_entry *entry) 1903 { 1904 if (!ntb_transport_tx_free_entry(qp)) { 1905 qp->tx_ring_full++; 1906 return -EAGAIN; 1907 } 1908 1909 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1910 if (qp->tx_handler) 1911 qp->tx_handler(qp, qp->cb_data, NULL, -EIO); 1912 1913 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1914 &qp->tx_free_q); 1915 return 0; 1916 } 1917 1918 ntb_async_tx(qp, entry); 1919 1920 qp->tx_index++; 1921 qp->tx_index %= qp->tx_max_entry; 1922 1923 qp->tx_pkts++; 1924 1925 return 0; 1926 } 1927 1928 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1929 { 1930 struct pci_dev *pdev = qp->ndev->pdev; 1931 struct ntb_queue_entry *entry; 1932 int i, rc; 1933 1934 if (!qp->link_is_up) 1935 return; 1936 1937 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); 1938 1939 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1940 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1941 if (entry) 1942 break; 1943 msleep(100); 1944 } 1945 1946 if (!entry) 1947 return; 1948 1949 entry->cb_data = NULL; 1950 entry->buf = NULL; 1951 entry->len = 0; 1952 entry->flags = LINK_DOWN_FLAG; 1953 1954 rc = ntb_process_tx(qp, entry); 1955 if (rc) 1956 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1957 qp->qp_num); 1958 1959 ntb_qp_link_down_reset(qp); 1960 } 1961 1962 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) 1963 { 1964 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; 1965 } 1966 1967 /** 1968 * ntb_transport_create_queue - Create a new NTB transport layer queue 1969 * @rx_handler: receive callback function 1970 * @tx_handler: transmit callback function 1971 * @event_handler: event callback function 1972 * 1973 * Create a new NTB transport layer queue and provide the queue with a callback 1974 * routine for both transmit and receive. The receive callback routine will be 1975 * used to pass up data when the transport has received it on the queue. The 1976 * transmit callback routine will be called when the transport has completed the 1977 * transmission of the data on the queue and the data is ready to be freed. 1978 * 1979 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1980 */ 1981 struct ntb_transport_qp * 1982 ntb_transport_create_queue(void *data, struct device *client_dev, 1983 const struct ntb_queue_handlers *handlers) 1984 { 1985 struct ntb_dev *ndev; 1986 struct pci_dev *pdev; 1987 struct ntb_transport_ctx *nt; 1988 struct ntb_queue_entry *entry; 1989 struct ntb_transport_qp *qp; 1990 u64 qp_bit; 1991 unsigned int free_queue; 1992 dma_cap_mask_t dma_mask; 1993 int node; 1994 int i; 1995 1996 ndev = dev_ntb(client_dev->parent); 1997 pdev = ndev->pdev; 1998 nt = ndev->ctx; 1999 2000 node = dev_to_node(&ndev->dev); 2001 2002 free_queue = ffs(nt->qp_bitmap_free); 2003 if (!free_queue) 2004 goto err; 2005 2006 /* decrement free_queue to make it zero based */ 2007 free_queue--; 2008 2009 qp = &nt->qp_vec[free_queue]; 2010 qp_bit = BIT_ULL(qp->qp_num); 2011 2012 nt->qp_bitmap_free &= ~qp_bit; 2013 2014 qp->cb_data = data; 2015 qp->rx_handler = handlers->rx_handler; 2016 qp->tx_handler = handlers->tx_handler; 2017 qp->event_handler = handlers->event_handler; 2018 2019 dma_cap_zero(dma_mask); 2020 dma_cap_set(DMA_MEMCPY, dma_mask); 2021 2022 if (use_dma) { 2023 qp->tx_dma_chan = 2024 dma_request_channel(dma_mask, ntb_dma_filter_fn, 2025 (void *)(unsigned long)node); 2026 if (!qp->tx_dma_chan) 2027 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n"); 2028 2029 qp->rx_dma_chan = 2030 dma_request_channel(dma_mask, ntb_dma_filter_fn, 2031 (void *)(unsigned long)node); 2032 if (!qp->rx_dma_chan) 2033 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n"); 2034 } else { 2035 qp->tx_dma_chan = NULL; 2036 qp->rx_dma_chan = NULL; 2037 } 2038 2039 qp->tx_mw_dma_addr = 0; 2040 if (qp->tx_dma_chan) { 2041 qp->tx_mw_dma_addr = 2042 dma_map_resource(qp->tx_dma_chan->device->dev, 2043 qp->tx_mw_phys, qp->tx_mw_size, 2044 DMA_FROM_DEVICE, 0); 2045 if (dma_mapping_error(qp->tx_dma_chan->device->dev, 2046 qp->tx_mw_dma_addr)) { 2047 qp->tx_mw_dma_addr = 0; 2048 goto err1; 2049 } 2050 } 2051 2052 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", 2053 qp->tx_dma_chan ? "DMA" : "CPU"); 2054 2055 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n", 2056 qp->rx_dma_chan ? "DMA" : "CPU"); 2057 2058 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 2059 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); 2060 if (!entry) 2061 goto err1; 2062 2063 entry->qp = qp; 2064 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 2065 &qp->rx_free_q); 2066 } 2067 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES; 2068 2069 for (i = 0; i < qp->tx_max_entry; i++) { 2070 entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); 2071 if (!entry) 2072 goto err2; 2073 2074 entry->qp = qp; 2075 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 2076 &qp->tx_free_q); 2077 } 2078 2079 ntb_db_clear(qp->ndev, qp_bit); 2080 ntb_db_clear_mask(qp->ndev, qp_bit); 2081 2082 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 2083 2084 return qp; 2085 2086 err2: 2087 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 2088 kfree(entry); 2089 err1: 2090 qp->rx_alloc_entry = 0; 2091 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 2092 kfree(entry); 2093 if (qp->tx_mw_dma_addr) 2094 dma_unmap_resource(qp->tx_dma_chan->device->dev, 2095 qp->tx_mw_dma_addr, qp->tx_mw_size, 2096 DMA_FROM_DEVICE, 0); 2097 if (qp->tx_dma_chan) 2098 dma_release_channel(qp->tx_dma_chan); 2099 if (qp->rx_dma_chan) 2100 dma_release_channel(qp->rx_dma_chan); 2101 nt->qp_bitmap_free |= qp_bit; 2102 err: 2103 return NULL; 2104 } 2105 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 2106 2107 /** 2108 * ntb_transport_free_queue - Frees NTB transport queue 2109 * @qp: NTB queue to be freed 2110 * 2111 * Frees NTB transport queue 2112 */ 2113 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 2114 { 2115 struct pci_dev *pdev; 2116 struct ntb_queue_entry *entry; 2117 u64 qp_bit; 2118 2119 if (!qp) 2120 return; 2121 2122 pdev = qp->ndev->pdev; 2123 2124 qp->active = false; 2125 2126 if (qp->tx_dma_chan) { 2127 struct dma_chan *chan = qp->tx_dma_chan; 2128 /* Putting the dma_chan to NULL will force any new traffic to be 2129 * processed by the CPU instead of the DAM engine 2130 */ 2131 qp->tx_dma_chan = NULL; 2132 2133 /* Try to be nice and wait for any queued DMA engine 2134 * transactions to process before smashing it with a rock 2135 */ 2136 dma_sync_wait(chan, qp->last_cookie); 2137 dmaengine_terminate_all(chan); 2138 2139 dma_unmap_resource(chan->device->dev, 2140 qp->tx_mw_dma_addr, qp->tx_mw_size, 2141 DMA_FROM_DEVICE, 0); 2142 2143 dma_release_channel(chan); 2144 } 2145 2146 if (qp->rx_dma_chan) { 2147 struct dma_chan *chan = qp->rx_dma_chan; 2148 /* Putting the dma_chan to NULL will force any new traffic to be 2149 * processed by the CPU instead of the DAM engine 2150 */ 2151 qp->rx_dma_chan = NULL; 2152 2153 /* Try to be nice and wait for any queued DMA engine 2154 * transactions to process before smashing it with a rock 2155 */ 2156 dma_sync_wait(chan, qp->last_cookie); 2157 dmaengine_terminate_all(chan); 2158 dma_release_channel(chan); 2159 } 2160 2161 qp_bit = BIT_ULL(qp->qp_num); 2162 2163 ntb_db_set_mask(qp->ndev, qp_bit); 2164 tasklet_kill(&qp->rxc_db_work); 2165 2166 cancel_delayed_work_sync(&qp->link_work); 2167 2168 qp->cb_data = NULL; 2169 qp->rx_handler = NULL; 2170 qp->tx_handler = NULL; 2171 qp->event_handler = NULL; 2172 2173 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 2174 kfree(entry); 2175 2176 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { 2177 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); 2178 kfree(entry); 2179 } 2180 2181 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { 2182 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); 2183 kfree(entry); 2184 } 2185 2186 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 2187 kfree(entry); 2188 2189 qp->transport->qp_bitmap_free |= qp_bit; 2190 2191 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 2192 } 2193 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 2194 2195 /** 2196 * ntb_transport_rx_remove - Dequeues enqueued rx packet 2197 * @qp: NTB queue to be freed 2198 * @len: pointer to variable to write enqueued buffers length 2199 * 2200 * Dequeues unused buffers from receive queue. Should only be used during 2201 * shutdown of qp. 2202 * 2203 * RETURNS: NULL error value on error, or void* for success. 2204 */ 2205 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 2206 { 2207 struct ntb_queue_entry *entry; 2208 void *buf; 2209 2210 if (!qp || qp->client_ready) 2211 return NULL; 2212 2213 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); 2214 if (!entry) 2215 return NULL; 2216 2217 buf = entry->cb_data; 2218 *len = entry->len; 2219 2220 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); 2221 2222 return buf; 2223 } 2224 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 2225 2226 /** 2227 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 2228 * @qp: NTB transport layer queue the entry is to be enqueued on 2229 * @cb: per buffer pointer for callback function to use 2230 * @data: pointer to data buffer that incoming packets will be copied into 2231 * @len: length of the data buffer 2232 * 2233 * Enqueue a new receive buffer onto the transport queue into which a NTB 2234 * payload can be received into. 2235 * 2236 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2237 */ 2238 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 2239 unsigned int len) 2240 { 2241 struct ntb_queue_entry *entry; 2242 2243 if (!qp) 2244 return -EINVAL; 2245 2246 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); 2247 if (!entry) 2248 return -ENOMEM; 2249 2250 entry->cb_data = cb; 2251 entry->buf = data; 2252 entry->len = len; 2253 entry->flags = 0; 2254 entry->retries = 0; 2255 entry->errors = 0; 2256 entry->rx_index = 0; 2257 2258 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 2259 2260 if (qp->active) 2261 tasklet_schedule(&qp->rxc_db_work); 2262 2263 return 0; 2264 } 2265 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 2266 2267 /** 2268 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 2269 * @qp: NTB transport layer queue the entry is to be enqueued on 2270 * @cb: per buffer pointer for callback function to use 2271 * @data: pointer to data buffer that will be sent 2272 * @len: length of the data buffer 2273 * 2274 * Enqueue a new transmit buffer onto the transport queue from which a NTB 2275 * payload will be transmitted. This assumes that a lock is being held to 2276 * serialize access to the qp. 2277 * 2278 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2279 */ 2280 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 2281 unsigned int len) 2282 { 2283 struct ntb_queue_entry *entry; 2284 int rc; 2285 2286 if (!qp || !len) 2287 return -EINVAL; 2288 2289 /* If the qp link is down already, just ignore. */ 2290 if (!qp->link_is_up) 2291 return 0; 2292 2293 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 2294 if (!entry) { 2295 qp->tx_err_no_buf++; 2296 return -EBUSY; 2297 } 2298 2299 entry->cb_data = cb; 2300 entry->buf = data; 2301 entry->len = len; 2302 entry->flags = 0; 2303 entry->errors = 0; 2304 entry->retries = 0; 2305 entry->tx_index = 0; 2306 2307 rc = ntb_process_tx(qp, entry); 2308 if (rc) 2309 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 2310 &qp->tx_free_q); 2311 2312 return rc; 2313 } 2314 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 2315 2316 /** 2317 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 2318 * @qp: NTB transport layer queue to be enabled 2319 * 2320 * Notify NTB transport layer of client readiness to use queue 2321 */ 2322 void ntb_transport_link_up(struct ntb_transport_qp *qp) 2323 { 2324 if (!qp) 2325 return; 2326 2327 qp->client_ready = true; 2328 2329 if (qp->transport->link_is_up) 2330 schedule_delayed_work(&qp->link_work, 0); 2331 } 2332 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 2333 2334 /** 2335 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 2336 * @qp: NTB transport layer queue to be disabled 2337 * 2338 * Notify NTB transport layer of client's desire to no longer receive data on 2339 * transport queue specified. It is the client's responsibility to ensure all 2340 * entries on queue are purged or otherwise handled appropriately. 2341 */ 2342 void ntb_transport_link_down(struct ntb_transport_qp *qp) 2343 { 2344 int val; 2345 2346 if (!qp) 2347 return; 2348 2349 qp->client_ready = false; 2350 2351 val = ntb_spad_read(qp->ndev, QP_LINKS); 2352 2353 ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num)); 2354 2355 if (qp->link_is_up) 2356 ntb_send_link_down(qp); 2357 else 2358 cancel_delayed_work_sync(&qp->link_work); 2359 } 2360 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 2361 2362 /** 2363 * ntb_transport_link_query - Query transport link state 2364 * @qp: NTB transport layer queue to be queried 2365 * 2366 * Query connectivity to the remote system of the NTB transport queue 2367 * 2368 * RETURNS: true for link up or false for link down 2369 */ 2370 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 2371 { 2372 if (!qp) 2373 return false; 2374 2375 return qp->link_is_up; 2376 } 2377 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 2378 2379 /** 2380 * ntb_transport_qp_num - Query the qp number 2381 * @qp: NTB transport layer queue to be queried 2382 * 2383 * Query qp number of the NTB transport queue 2384 * 2385 * RETURNS: a zero based number specifying the qp number 2386 */ 2387 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 2388 { 2389 if (!qp) 2390 return 0; 2391 2392 return qp->qp_num; 2393 } 2394 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 2395 2396 /** 2397 * ntb_transport_max_size - Query the max payload size of a qp 2398 * @qp: NTB transport layer queue to be queried 2399 * 2400 * Query the maximum payload size permissible on the given qp 2401 * 2402 * RETURNS: the max payload size of a qp 2403 */ 2404 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 2405 { 2406 unsigned int max_size; 2407 unsigned int copy_align; 2408 struct dma_chan *rx_chan, *tx_chan; 2409 2410 if (!qp) 2411 return 0; 2412 2413 rx_chan = qp->rx_dma_chan; 2414 tx_chan = qp->tx_dma_chan; 2415 2416 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0, 2417 tx_chan ? tx_chan->device->copy_align : 0); 2418 2419 /* If DMA engine usage is possible, try to find the max size for that */ 2420 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); 2421 max_size = round_down(max_size, 1 << copy_align); 2422 2423 return max_size; 2424 } 2425 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 2426 2427 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) 2428 { 2429 unsigned int head = qp->tx_index; 2430 unsigned int tail = qp->remote_rx_info->entry; 2431 2432 return tail >= head ? tail - head : qp->tx_max_entry + tail - head; 2433 } 2434 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry); 2435 2436 static void ntb_transport_doorbell_callback(void *data, int vector) 2437 { 2438 struct ntb_transport_ctx *nt = data; 2439 struct ntb_transport_qp *qp; 2440 u64 db_bits; 2441 unsigned int qp_num; 2442 2443 if (ntb_db_read(nt->ndev) & nt->msi_db_mask) { 2444 ntb_transport_msi_peer_desc_changed(nt); 2445 ntb_db_clear(nt->ndev, nt->msi_db_mask); 2446 } 2447 2448 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 2449 ntb_db_vector_mask(nt->ndev, vector)); 2450 2451 while (db_bits) { 2452 qp_num = __ffs(db_bits); 2453 qp = &nt->qp_vec[qp_num]; 2454 2455 if (qp->active) 2456 tasklet_schedule(&qp->rxc_db_work); 2457 2458 db_bits &= ~BIT_ULL(qp_num); 2459 } 2460 } 2461 2462 static const struct ntb_ctx_ops ntb_transport_ops = { 2463 .link_event = ntb_transport_event_callback, 2464 .db_event = ntb_transport_doorbell_callback, 2465 }; 2466 2467 static struct ntb_client ntb_transport_client = { 2468 .ops = { 2469 .probe = ntb_transport_probe, 2470 .remove = ntb_transport_free, 2471 }, 2472 }; 2473 2474 static int __init ntb_transport_init(void) 2475 { 2476 int rc; 2477 2478 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER); 2479 2480 if (debugfs_initialized()) 2481 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 2482 2483 rc = bus_register(&ntb_transport_bus); 2484 if (rc) 2485 goto err_bus; 2486 2487 rc = ntb_register_client(&ntb_transport_client); 2488 if (rc) 2489 goto err_client; 2490 2491 return 0; 2492 2493 err_client: 2494 bus_unregister(&ntb_transport_bus); 2495 err_bus: 2496 debugfs_remove_recursive(nt_debugfs_dir); 2497 return rc; 2498 } 2499 module_init(ntb_transport_init); 2500 2501 static void __exit ntb_transport_exit(void) 2502 { 2503 ntb_unregister_client(&ntb_transport_client); 2504 bus_unregister(&ntb_transport_bus); 2505 debugfs_remove_recursive(nt_debugfs_dir); 2506 } 2507 module_exit(ntb_transport_exit); 2508