1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * BSD LICENSE 14 * 15 * Copyright(c) 2012 Intel Corporation. All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 21 * * Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * * Redistributions in binary form must reproduce the above copy 24 * notice, this list of conditions and the following disclaimer in 25 * the documentation and/or other materials provided with the 26 * distribution. 27 * * Neither the name of Intel Corporation nor the names of its 28 * contributors may be used to endorse or promote products derived 29 * from this software without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 * 43 * Intel PCIe NTB Linux driver 44 * 45 * Contact Information: 46 * Jon Mason <jon.mason@intel.com> 47 */ 48 #include <linux/debugfs.h> 49 #include <linux/delay.h> 50 #include <linux/dma-mapping.h> 51 #include <linux/errno.h> 52 #include <linux/export.h> 53 #include <linux/interrupt.h> 54 #include <linux/module.h> 55 #include <linux/pci.h> 56 #include <linux/slab.h> 57 #include <linux/types.h> 58 #include <linux/ntb.h> 59 #include "ntb_hw.h" 60 61 #define NTB_TRANSPORT_VERSION 3 62 63 static unsigned int transport_mtu = 0x401E; 64 module_param(transport_mtu, uint, 0644); 65 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 66 67 static unsigned char max_num_clients = 2; 68 module_param(max_num_clients, byte, 0644); 69 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 70 71 struct ntb_queue_entry { 72 /* ntb_queue list reference */ 73 struct list_head entry; 74 /* pointers to data to be transfered */ 75 void *cb_data; 76 void *buf; 77 unsigned int len; 78 unsigned int flags; 79 }; 80 81 struct ntb_rx_info { 82 unsigned int entry; 83 }; 84 85 struct ntb_transport_qp { 86 struct ntb_transport *transport; 87 struct ntb_device *ndev; 88 void *cb_data; 89 90 bool client_ready; 91 bool qp_link; 92 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 93 94 struct ntb_rx_info __iomem *rx_info; 95 struct ntb_rx_info *remote_rx_info; 96 97 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 98 void *data, int len); 99 struct list_head tx_free_q; 100 spinlock_t ntb_tx_free_q_lock; 101 void __iomem *tx_mw; 102 unsigned int tx_index; 103 unsigned int tx_max_entry; 104 unsigned int tx_max_frame; 105 106 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 107 void *data, int len); 108 struct tasklet_struct rx_work; 109 struct list_head rx_pend_q; 110 struct list_head rx_free_q; 111 spinlock_t ntb_rx_pend_q_lock; 112 spinlock_t ntb_rx_free_q_lock; 113 void *rx_buff; 114 unsigned int rx_index; 115 unsigned int rx_max_entry; 116 unsigned int rx_max_frame; 117 118 void (*event_handler) (void *data, int status); 119 struct delayed_work link_work; 120 struct work_struct link_cleanup; 121 122 struct dentry *debugfs_dir; 123 struct dentry *debugfs_stats; 124 125 /* Stats */ 126 u64 rx_bytes; 127 u64 rx_pkts; 128 u64 rx_ring_empty; 129 u64 rx_err_no_buf; 130 u64 rx_err_oflow; 131 u64 rx_err_ver; 132 u64 tx_bytes; 133 u64 tx_pkts; 134 u64 tx_ring_full; 135 }; 136 137 struct ntb_transport_mw { 138 size_t size; 139 void *virt_addr; 140 dma_addr_t dma_addr; 141 }; 142 143 struct ntb_transport_client_dev { 144 struct list_head entry; 145 struct device dev; 146 }; 147 148 struct ntb_transport { 149 struct list_head entry; 150 struct list_head client_devs; 151 152 struct ntb_device *ndev; 153 struct ntb_transport_mw mw[NTB_NUM_MW]; 154 struct ntb_transport_qp *qps; 155 unsigned int max_qps; 156 unsigned long qp_bitmap; 157 bool transport_link; 158 struct delayed_work link_work; 159 struct work_struct link_cleanup; 160 struct dentry *debugfs_dir; 161 }; 162 163 enum { 164 DESC_DONE_FLAG = 1 << 0, 165 LINK_DOWN_FLAG = 1 << 1, 166 }; 167 168 struct ntb_payload_header { 169 unsigned int ver; 170 unsigned int len; 171 unsigned int flags; 172 }; 173 174 enum { 175 VERSION = 0, 176 QP_LINKS, 177 NUM_QPS, 178 NUM_MWS, 179 MW0_SZ_HIGH, 180 MW0_SZ_LOW, 181 MW1_SZ_HIGH, 182 MW1_SZ_LOW, 183 MAX_SPAD, 184 }; 185 186 #define QP_TO_MW(qp) ((qp) % NTB_NUM_MW) 187 #define NTB_QP_DEF_NUM_ENTRIES 100 188 #define NTB_LINK_DOWN_TIMEOUT 10 189 190 static int ntb_match_bus(struct device *dev, struct device_driver *drv) 191 { 192 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 193 } 194 195 static int ntb_client_probe(struct device *dev) 196 { 197 const struct ntb_client *drv = container_of(dev->driver, 198 struct ntb_client, driver); 199 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev); 200 int rc = -EINVAL; 201 202 get_device(dev); 203 if (drv && drv->probe) 204 rc = drv->probe(pdev); 205 if (rc) 206 put_device(dev); 207 208 return rc; 209 } 210 211 static int ntb_client_remove(struct device *dev) 212 { 213 const struct ntb_client *drv = container_of(dev->driver, 214 struct ntb_client, driver); 215 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev); 216 217 if (drv && drv->remove) 218 drv->remove(pdev); 219 220 put_device(dev); 221 222 return 0; 223 } 224 225 static struct bus_type ntb_bus_type = { 226 .name = "ntb_bus", 227 .match = ntb_match_bus, 228 .probe = ntb_client_probe, 229 .remove = ntb_client_remove, 230 }; 231 232 static LIST_HEAD(ntb_transport_list); 233 234 static int ntb_bus_init(struct ntb_transport *nt) 235 { 236 if (list_empty(&ntb_transport_list)) { 237 int rc = bus_register(&ntb_bus_type); 238 if (rc) 239 return rc; 240 } 241 242 list_add(&nt->entry, &ntb_transport_list); 243 244 return 0; 245 } 246 247 static void ntb_bus_remove(struct ntb_transport *nt) 248 { 249 struct ntb_transport_client_dev *client_dev, *cd; 250 251 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 252 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 253 dev_name(&client_dev->dev)); 254 list_del(&client_dev->entry); 255 device_unregister(&client_dev->dev); 256 } 257 258 list_del(&nt->entry); 259 260 if (list_empty(&ntb_transport_list)) 261 bus_unregister(&ntb_bus_type); 262 } 263 264 static void ntb_client_release(struct device *dev) 265 { 266 struct ntb_transport_client_dev *client_dev; 267 client_dev = container_of(dev, struct ntb_transport_client_dev, dev); 268 269 kfree(client_dev); 270 } 271 272 /** 273 * ntb_unregister_client_dev - Unregister NTB client device 274 * @device_name: Name of NTB client device 275 * 276 * Unregister an NTB client device with the NTB transport layer 277 */ 278 void ntb_unregister_client_dev(char *device_name) 279 { 280 struct ntb_transport_client_dev *client, *cd; 281 struct ntb_transport *nt; 282 283 list_for_each_entry(nt, &ntb_transport_list, entry) 284 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 285 if (!strncmp(dev_name(&client->dev), device_name, 286 strlen(device_name))) { 287 list_del(&client->entry); 288 device_unregister(&client->dev); 289 } 290 } 291 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev); 292 293 /** 294 * ntb_register_client_dev - Register NTB client device 295 * @device_name: Name of NTB client device 296 * 297 * Register an NTB client device with the NTB transport layer 298 */ 299 int ntb_register_client_dev(char *device_name) 300 { 301 struct ntb_transport_client_dev *client_dev; 302 struct ntb_transport *nt; 303 int rc, i = 0; 304 305 if (list_empty(&ntb_transport_list)) 306 return -ENODEV; 307 308 list_for_each_entry(nt, &ntb_transport_list, entry) { 309 struct device *dev; 310 311 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev), 312 GFP_KERNEL); 313 if (!client_dev) { 314 rc = -ENOMEM; 315 goto err; 316 } 317 318 dev = &client_dev->dev; 319 320 /* setup and register client devices */ 321 dev_set_name(dev, "%s%d", device_name, i); 322 dev->bus = &ntb_bus_type; 323 dev->release = ntb_client_release; 324 dev->parent = &ntb_query_pdev(nt->ndev)->dev; 325 326 rc = device_register(dev); 327 if (rc) { 328 kfree(client_dev); 329 goto err; 330 } 331 332 list_add_tail(&client_dev->entry, &nt->client_devs); 333 i++; 334 } 335 336 return 0; 337 338 err: 339 ntb_unregister_client_dev(device_name); 340 341 return rc; 342 } 343 EXPORT_SYMBOL_GPL(ntb_register_client_dev); 344 345 /** 346 * ntb_register_client - Register NTB client driver 347 * @drv: NTB client driver to be registered 348 * 349 * Register an NTB client driver with the NTB transport layer 350 * 351 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 352 */ 353 int ntb_register_client(struct ntb_client *drv) 354 { 355 drv->driver.bus = &ntb_bus_type; 356 357 if (list_empty(&ntb_transport_list)) 358 return -ENODEV; 359 360 return driver_register(&drv->driver); 361 } 362 EXPORT_SYMBOL_GPL(ntb_register_client); 363 364 /** 365 * ntb_unregister_client - Unregister NTB client driver 366 * @drv: NTB client driver to be unregistered 367 * 368 * Unregister an NTB client driver with the NTB transport layer 369 * 370 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 371 */ 372 void ntb_unregister_client(struct ntb_client *drv) 373 { 374 driver_unregister(&drv->driver); 375 } 376 EXPORT_SYMBOL_GPL(ntb_unregister_client); 377 378 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 379 loff_t *offp) 380 { 381 struct ntb_transport_qp *qp; 382 char *buf; 383 ssize_t ret, out_offset, out_count; 384 385 out_count = 600; 386 387 buf = kmalloc(out_count, GFP_KERNEL); 388 if (!buf) 389 return -ENOMEM; 390 391 qp = filp->private_data; 392 out_offset = 0; 393 out_offset += snprintf(buf + out_offset, out_count - out_offset, 394 "NTB QP stats\n"); 395 out_offset += snprintf(buf + out_offset, out_count - out_offset, 396 "rx_bytes - \t%llu\n", qp->rx_bytes); 397 out_offset += snprintf(buf + out_offset, out_count - out_offset, 398 "rx_pkts - \t%llu\n", qp->rx_pkts); 399 out_offset += snprintf(buf + out_offset, out_count - out_offset, 400 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 401 out_offset += snprintf(buf + out_offset, out_count - out_offset, 402 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 403 out_offset += snprintf(buf + out_offset, out_count - out_offset, 404 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 405 out_offset += snprintf(buf + out_offset, out_count - out_offset, 406 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 407 out_offset += snprintf(buf + out_offset, out_count - out_offset, 408 "rx_buff - \t%p\n", qp->rx_buff); 409 out_offset += snprintf(buf + out_offset, out_count - out_offset, 410 "rx_index - \t%u\n", qp->rx_index); 411 out_offset += snprintf(buf + out_offset, out_count - out_offset, 412 "rx_max_entry - \t%u\n", qp->rx_max_entry); 413 414 out_offset += snprintf(buf + out_offset, out_count - out_offset, 415 "tx_bytes - \t%llu\n", qp->tx_bytes); 416 out_offset += snprintf(buf + out_offset, out_count - out_offset, 417 "tx_pkts - \t%llu\n", qp->tx_pkts); 418 out_offset += snprintf(buf + out_offset, out_count - out_offset, 419 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 420 out_offset += snprintf(buf + out_offset, out_count - out_offset, 421 "tx_mw - \t%p\n", qp->tx_mw); 422 out_offset += snprintf(buf + out_offset, out_count - out_offset, 423 "tx_index - \t%u\n", qp->tx_index); 424 out_offset += snprintf(buf + out_offset, out_count - out_offset, 425 "tx_max_entry - \t%u\n", qp->tx_max_entry); 426 427 out_offset += snprintf(buf + out_offset, out_count - out_offset, 428 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ? 429 "Up" : "Down"); 430 if (out_offset > out_count) 431 out_offset = out_count; 432 433 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 434 kfree(buf); 435 return ret; 436 } 437 438 static const struct file_operations ntb_qp_debugfs_stats = { 439 .owner = THIS_MODULE, 440 .open = simple_open, 441 .read = debugfs_read, 442 }; 443 444 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 445 struct list_head *list) 446 { 447 unsigned long flags; 448 449 spin_lock_irqsave(lock, flags); 450 list_add_tail(entry, list); 451 spin_unlock_irqrestore(lock, flags); 452 } 453 454 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 455 struct list_head *list) 456 { 457 struct ntb_queue_entry *entry; 458 unsigned long flags; 459 460 spin_lock_irqsave(lock, flags); 461 if (list_empty(list)) { 462 entry = NULL; 463 goto out; 464 } 465 entry = list_first_entry(list, struct ntb_queue_entry, entry); 466 list_del(&entry->entry); 467 out: 468 spin_unlock_irqrestore(lock, flags); 469 470 return entry; 471 } 472 473 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, 474 unsigned int qp_num) 475 { 476 struct ntb_transport_qp *qp = &nt->qps[qp_num]; 477 unsigned int rx_size, num_qps_mw; 478 u8 mw_num = QP_TO_MW(qp_num); 479 unsigned int i; 480 481 WARN_ON(nt->mw[mw_num].virt_addr == NULL); 482 483 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) 484 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; 485 else 486 num_qps_mw = nt->max_qps / NTB_NUM_MW; 487 488 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw; 489 qp->remote_rx_info = nt->mw[mw_num].virt_addr + 490 (qp_num / NTB_NUM_MW * rx_size); 491 rx_size -= sizeof(struct ntb_rx_info); 492 493 qp->rx_buff = qp->remote_rx_info + 1; 494 /* Due to housekeeping, there must be atleast 2 buffs */ 495 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 496 qp->rx_max_entry = rx_size / qp->rx_max_frame; 497 qp->rx_index = 0; 498 499 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 500 501 /* setup the hdr offsets with 0's */ 502 for (i = 0; i < qp->rx_max_entry; i++) { 503 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) - 504 sizeof(struct ntb_payload_header); 505 memset(offset, 0, sizeof(struct ntb_payload_header)); 506 } 507 508 qp->rx_pkts = 0; 509 qp->tx_pkts = 0; 510 qp->tx_index = 0; 511 } 512 513 static void ntb_free_mw(struct ntb_transport *nt, int num_mw) 514 { 515 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 516 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 517 518 if (!mw->virt_addr) 519 return; 520 521 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); 522 mw->virt_addr = NULL; 523 } 524 525 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) 526 { 527 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 528 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 529 530 /* No need to re-setup */ 531 if (mw->size == ALIGN(size, 4096)) 532 return 0; 533 534 if (mw->size != 0) 535 ntb_free_mw(nt, num_mw); 536 537 /* Alloc memory for receiving data. Must be 4k aligned */ 538 mw->size = ALIGN(size, 4096); 539 540 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, 541 GFP_KERNEL); 542 if (!mw->virt_addr) { 543 mw->size = 0; 544 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", 545 (int) mw->size); 546 return -ENOMEM; 547 } 548 549 /* Notify HW the memory location of the receive buffer */ 550 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr); 551 552 return 0; 553 } 554 555 static void ntb_qp_link_cleanup(struct work_struct *work) 556 { 557 struct ntb_transport_qp *qp = container_of(work, 558 struct ntb_transport_qp, 559 link_cleanup); 560 struct ntb_transport *nt = qp->transport; 561 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 562 563 if (qp->qp_link == NTB_LINK_DOWN) { 564 cancel_delayed_work_sync(&qp->link_work); 565 return; 566 } 567 568 if (qp->event_handler) 569 qp->event_handler(qp->cb_data, NTB_LINK_DOWN); 570 571 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); 572 qp->qp_link = NTB_LINK_DOWN; 573 574 if (nt->transport_link == NTB_LINK_UP) 575 schedule_delayed_work(&qp->link_work, 576 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 577 } 578 579 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 580 { 581 schedule_work(&qp->link_cleanup); 582 } 583 584 static void ntb_transport_link_cleanup(struct work_struct *work) 585 { 586 struct ntb_transport *nt = container_of(work, struct ntb_transport, 587 link_cleanup); 588 int i; 589 590 if (nt->transport_link == NTB_LINK_DOWN) 591 cancel_delayed_work_sync(&nt->link_work); 592 else 593 nt->transport_link = NTB_LINK_DOWN; 594 595 /* Pass along the info to any clients */ 596 for (i = 0; i < nt->max_qps; i++) 597 if (!test_bit(i, &nt->qp_bitmap)) 598 ntb_qp_link_down(&nt->qps[i]); 599 600 /* The scratchpad registers keep the values if the remote side 601 * goes down, blast them now to give them a sane value the next 602 * time they are accessed 603 */ 604 for (i = 0; i < MAX_SPAD; i++) 605 ntb_write_local_spad(nt->ndev, i, 0); 606 } 607 608 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event) 609 { 610 struct ntb_transport *nt = data; 611 612 switch (event) { 613 case NTB_EVENT_HW_LINK_UP: 614 schedule_delayed_work(&nt->link_work, 0); 615 break; 616 case NTB_EVENT_HW_LINK_DOWN: 617 schedule_work(&nt->link_cleanup); 618 break; 619 default: 620 BUG(); 621 } 622 } 623 624 static void ntb_transport_link_work(struct work_struct *work) 625 { 626 struct ntb_transport *nt = container_of(work, struct ntb_transport, 627 link_work.work); 628 struct ntb_device *ndev = nt->ndev; 629 struct pci_dev *pdev = ntb_query_pdev(ndev); 630 u32 val; 631 int rc, i; 632 633 /* send the local info, in the opposite order of the way we read it */ 634 for (i = 0; i < NTB_NUM_MW; i++) { 635 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), 636 ntb_get_mw_size(ndev, i) >> 32); 637 if (rc) { 638 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", 639 (u32)(ntb_get_mw_size(ndev, i) >> 32), 640 MW0_SZ_HIGH + (i * 2)); 641 goto out; 642 } 643 644 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2), 645 (u32) ntb_get_mw_size(ndev, i)); 646 if (rc) { 647 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", 648 (u32) ntb_get_mw_size(ndev, i), 649 MW0_SZ_LOW + (i * 2)); 650 goto out; 651 } 652 } 653 654 rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW); 655 if (rc) { 656 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 657 NTB_NUM_MW, NUM_MWS); 658 goto out; 659 } 660 661 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps); 662 if (rc) { 663 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 664 nt->max_qps, NUM_QPS); 665 goto out; 666 } 667 668 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); 669 if (rc) { 670 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 671 NTB_TRANSPORT_VERSION, VERSION); 672 goto out; 673 } 674 675 /* Query the remote side for its info */ 676 rc = ntb_read_remote_spad(ndev, VERSION, &val); 677 if (rc) { 678 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION); 679 goto out; 680 } 681 682 if (val != NTB_TRANSPORT_VERSION) 683 goto out; 684 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 685 686 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val); 687 if (rc) { 688 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS); 689 goto out; 690 } 691 692 if (val != nt->max_qps) 693 goto out; 694 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 695 696 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val); 697 if (rc) { 698 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS); 699 goto out; 700 } 701 702 if (val != NTB_NUM_MW) 703 goto out; 704 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 705 706 for (i = 0; i < NTB_NUM_MW; i++) { 707 u64 val64; 708 709 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val); 710 if (rc) { 711 dev_err(&pdev->dev, "Error reading remote spad %d\n", 712 MW0_SZ_HIGH + (i * 2)); 713 goto out1; 714 } 715 716 val64 = (u64) val << 32; 717 718 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val); 719 if (rc) { 720 dev_err(&pdev->dev, "Error reading remote spad %d\n", 721 MW0_SZ_LOW + (i * 2)); 722 goto out1; 723 } 724 725 val64 |= val; 726 727 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64); 728 729 rc = ntb_set_mw(nt, i, val64); 730 if (rc) 731 goto out1; 732 } 733 734 nt->transport_link = NTB_LINK_UP; 735 736 for (i = 0; i < nt->max_qps; i++) { 737 struct ntb_transport_qp *qp = &nt->qps[i]; 738 739 ntb_transport_setup_qp_mw(nt, i); 740 741 if (qp->client_ready == NTB_LINK_UP) 742 schedule_delayed_work(&qp->link_work, 0); 743 } 744 745 return; 746 747 out1: 748 for (i = 0; i < NTB_NUM_MW; i++) 749 ntb_free_mw(nt, i); 750 out: 751 if (ntb_hw_link_status(ndev)) 752 schedule_delayed_work(&nt->link_work, 753 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 754 } 755 756 static void ntb_qp_link_work(struct work_struct *work) 757 { 758 struct ntb_transport_qp *qp = container_of(work, 759 struct ntb_transport_qp, 760 link_work.work); 761 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); 762 struct ntb_transport *nt = qp->transport; 763 int rc, val; 764 765 WARN_ON(nt->transport_link != NTB_LINK_UP); 766 767 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); 768 if (rc) { 769 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); 770 return; 771 } 772 773 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num); 774 if (rc) 775 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 776 val | 1 << qp->qp_num, QP_LINKS); 777 778 /* query remote spad for qp ready bits */ 779 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val); 780 if (rc) 781 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS); 782 783 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val); 784 785 /* See if the remote side is up */ 786 if (1 << qp->qp_num & val) { 787 qp->qp_link = NTB_LINK_UP; 788 789 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 790 if (qp->event_handler) 791 qp->event_handler(qp->cb_data, NTB_LINK_UP); 792 } else if (nt->transport_link == NTB_LINK_UP) 793 schedule_delayed_work(&qp->link_work, 794 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 795 } 796 797 static void ntb_transport_init_queue(struct ntb_transport *nt, 798 unsigned int qp_num) 799 { 800 struct ntb_transport_qp *qp; 801 unsigned int num_qps_mw, tx_size; 802 u8 mw_num = QP_TO_MW(qp_num); 803 804 qp = &nt->qps[qp_num]; 805 qp->qp_num = qp_num; 806 qp->transport = nt; 807 qp->ndev = nt->ndev; 808 qp->qp_link = NTB_LINK_DOWN; 809 qp->client_ready = NTB_LINK_DOWN; 810 qp->event_handler = NULL; 811 812 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) 813 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; 814 else 815 num_qps_mw = nt->max_qps / NTB_NUM_MW; 816 817 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw; 818 qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) + 819 (qp_num / NTB_NUM_MW * tx_size); 820 tx_size -= sizeof(struct ntb_rx_info); 821 822 qp->tx_mw = qp->rx_info + 1; 823 /* Due to housekeeping, there must be atleast 2 buffs */ 824 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 825 qp->tx_max_entry = tx_size / qp->tx_max_frame; 826 827 if (nt->debugfs_dir) { 828 char debugfs_name[4]; 829 830 snprintf(debugfs_name, 4, "qp%d", qp_num); 831 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 832 nt->debugfs_dir); 833 834 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 835 qp->debugfs_dir, qp, 836 &ntb_qp_debugfs_stats); 837 } 838 839 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 840 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup); 841 842 spin_lock_init(&qp->ntb_rx_pend_q_lock); 843 spin_lock_init(&qp->ntb_rx_free_q_lock); 844 spin_lock_init(&qp->ntb_tx_free_q_lock); 845 846 INIT_LIST_HEAD(&qp->rx_pend_q); 847 INIT_LIST_HEAD(&qp->rx_free_q); 848 INIT_LIST_HEAD(&qp->tx_free_q); 849 } 850 851 int ntb_transport_init(struct pci_dev *pdev) 852 { 853 struct ntb_transport *nt; 854 int rc, i; 855 856 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL); 857 if (!nt) 858 return -ENOMEM; 859 860 if (debugfs_initialized()) 861 nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 862 else 863 nt->debugfs_dir = NULL; 864 865 nt->ndev = ntb_register_transport(pdev, nt); 866 if (!nt->ndev) { 867 rc = -EIO; 868 goto err; 869 } 870 871 nt->max_qps = min(nt->ndev->max_cbs, max_num_clients); 872 873 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp), 874 GFP_KERNEL); 875 if (!nt->qps) { 876 rc = -ENOMEM; 877 goto err1; 878 } 879 880 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1; 881 882 for (i = 0; i < nt->max_qps; i++) 883 ntb_transport_init_queue(nt, i); 884 885 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 886 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup); 887 888 rc = ntb_register_event_callback(nt->ndev, 889 ntb_transport_event_callback); 890 if (rc) 891 goto err2; 892 893 INIT_LIST_HEAD(&nt->client_devs); 894 rc = ntb_bus_init(nt); 895 if (rc) 896 goto err3; 897 898 if (ntb_hw_link_status(nt->ndev)) 899 schedule_delayed_work(&nt->link_work, 0); 900 901 return 0; 902 903 err3: 904 ntb_unregister_event_callback(nt->ndev); 905 err2: 906 kfree(nt->qps); 907 err1: 908 ntb_unregister_transport(nt->ndev); 909 err: 910 debugfs_remove_recursive(nt->debugfs_dir); 911 kfree(nt); 912 return rc; 913 } 914 915 void ntb_transport_free(void *transport) 916 { 917 struct ntb_transport *nt = transport; 918 struct pci_dev *pdev; 919 int i; 920 921 nt->transport_link = NTB_LINK_DOWN; 922 923 /* verify that all the qp's are freed */ 924 for (i = 0; i < nt->max_qps; i++) 925 if (!test_bit(i, &nt->qp_bitmap)) 926 ntb_transport_free_queue(&nt->qps[i]); 927 928 ntb_bus_remove(nt); 929 930 cancel_delayed_work_sync(&nt->link_work); 931 932 debugfs_remove_recursive(nt->debugfs_dir); 933 934 ntb_unregister_event_callback(nt->ndev); 935 936 pdev = ntb_query_pdev(nt->ndev); 937 938 for (i = 0; i < NTB_NUM_MW; i++) 939 ntb_free_mw(nt, i); 940 941 kfree(nt->qps); 942 ntb_unregister_transport(nt->ndev); 943 kfree(nt); 944 } 945 946 static void ntb_rx_copy_task(struct ntb_transport_qp *qp, 947 struct ntb_queue_entry *entry, void *offset) 948 { 949 void *cb_data = entry->cb_data; 950 unsigned int len = entry->len; 951 952 memcpy(entry->buf, offset, entry->len); 953 954 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 955 956 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP) 957 qp->rx_handler(qp, qp->cb_data, cb_data, len); 958 } 959 960 static int ntb_process_rxc(struct ntb_transport_qp *qp) 961 { 962 struct ntb_payload_header *hdr; 963 struct ntb_queue_entry *entry; 964 void *offset; 965 966 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 967 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 968 969 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 970 if (!entry) { 971 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, 972 "no buffer - HDR ver %u, len %d, flags %x\n", 973 hdr->ver, hdr->len, hdr->flags); 974 qp->rx_err_no_buf++; 975 return -ENOMEM; 976 } 977 978 if (!(hdr->flags & DESC_DONE_FLAG)) { 979 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, 980 &qp->rx_pend_q); 981 qp->rx_ring_empty++; 982 return -EAGAIN; 983 } 984 985 if (hdr->ver != (u32) qp->rx_pkts) { 986 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, 987 "qp %d: version mismatch, expected %llu - got %u\n", 988 qp->qp_num, qp->rx_pkts, hdr->ver); 989 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, 990 &qp->rx_pend_q); 991 qp->rx_err_ver++; 992 return -EIO; 993 } 994 995 if (hdr->flags & LINK_DOWN_FLAG) { 996 ntb_qp_link_down(qp); 997 998 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, 999 &qp->rx_pend_q); 1000 goto out; 1001 } 1002 1003 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, 1004 "rx offset %u, ver %u - %d payload received, buf size %d\n", 1005 qp->rx_index, hdr->ver, hdr->len, entry->len); 1006 1007 if (hdr->len <= entry->len) { 1008 entry->len = hdr->len; 1009 ntb_rx_copy_task(qp, entry, offset); 1010 } else { 1011 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, 1012 &qp->rx_pend_q); 1013 1014 qp->rx_err_oflow++; 1015 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, 1016 "RX overflow! Wanted %d got %d\n", 1017 hdr->len, entry->len); 1018 } 1019 1020 qp->rx_bytes += hdr->len; 1021 qp->rx_pkts++; 1022 1023 out: 1024 /* Ensure that the data is fully copied out before clearing the flag */ 1025 wmb(); 1026 hdr->flags = 0; 1027 iowrite32(qp->rx_index, &qp->rx_info->entry); 1028 1029 qp->rx_index++; 1030 qp->rx_index %= qp->rx_max_entry; 1031 1032 return 0; 1033 } 1034 1035 static void ntb_transport_rx(unsigned long data) 1036 { 1037 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1038 int rc, i; 1039 1040 /* Limit the number of packets processed in a single interrupt to 1041 * provide fairness to others 1042 */ 1043 for (i = 0; i < qp->rx_max_entry; i++) { 1044 rc = ntb_process_rxc(qp); 1045 if (rc) 1046 break; 1047 } 1048 } 1049 1050 static void ntb_transport_rxc_db(void *data, int db_num) 1051 { 1052 struct ntb_transport_qp *qp = data; 1053 1054 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n", 1055 __func__, db_num); 1056 1057 tasklet_schedule(&qp->rx_work); 1058 } 1059 1060 static void ntb_tx_copy_task(struct ntb_transport_qp *qp, 1061 struct ntb_queue_entry *entry, 1062 void __iomem *offset) 1063 { 1064 struct ntb_payload_header __iomem *hdr; 1065 1066 memcpy_toio(offset, entry->buf, entry->len); 1067 1068 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1069 iowrite32(entry->len, &hdr->len); 1070 iowrite32((u32) qp->tx_pkts, &hdr->ver); 1071 1072 /* Ensure that the data is fully copied out before setting the flag */ 1073 wmb(); 1074 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1075 1076 ntb_ring_sdb(qp->ndev, qp->qp_num); 1077 1078 /* The entry length can only be zero if the packet is intended to be a 1079 * "link down" or similar. Since no payload is being sent in these 1080 * cases, there is nothing to add to the completion queue. 1081 */ 1082 if (entry->len > 0) { 1083 qp->tx_bytes += entry->len; 1084 1085 if (qp->tx_handler) 1086 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1087 entry->len); 1088 } 1089 1090 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1091 } 1092 1093 static int ntb_process_tx(struct ntb_transport_qp *qp, 1094 struct ntb_queue_entry *entry) 1095 { 1096 void __iomem *offset; 1097 1098 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1099 1100 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n", 1101 qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags, 1102 entry->buf); 1103 if (qp->tx_index == qp->remote_rx_info->entry) { 1104 qp->tx_ring_full++; 1105 return -EAGAIN; 1106 } 1107 1108 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1109 if (qp->tx_handler) 1110 qp->tx_handler(qp->cb_data, qp, NULL, -EIO); 1111 1112 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1113 &qp->tx_free_q); 1114 return 0; 1115 } 1116 1117 ntb_tx_copy_task(qp, entry, offset); 1118 1119 qp->tx_index++; 1120 qp->tx_index %= qp->tx_max_entry; 1121 1122 qp->tx_pkts++; 1123 1124 return 0; 1125 } 1126 1127 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1128 { 1129 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); 1130 struct ntb_queue_entry *entry; 1131 int i, rc; 1132 1133 if (qp->qp_link == NTB_LINK_DOWN) 1134 return; 1135 1136 qp->qp_link = NTB_LINK_DOWN; 1137 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); 1138 1139 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1140 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1141 if (entry) 1142 break; 1143 msleep(100); 1144 } 1145 1146 if (!entry) 1147 return; 1148 1149 entry->cb_data = NULL; 1150 entry->buf = NULL; 1151 entry->len = 0; 1152 entry->flags = LINK_DOWN_FLAG; 1153 1154 rc = ntb_process_tx(qp, entry); 1155 if (rc) 1156 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1157 qp->qp_num); 1158 } 1159 1160 /** 1161 * ntb_transport_create_queue - Create a new NTB transport layer queue 1162 * @rx_handler: receive callback function 1163 * @tx_handler: transmit callback function 1164 * @event_handler: event callback function 1165 * 1166 * Create a new NTB transport layer queue and provide the queue with a callback 1167 * routine for both transmit and receive. The receive callback routine will be 1168 * used to pass up data when the transport has received it on the queue. The 1169 * transmit callback routine will be called when the transport has completed the 1170 * transmission of the data on the queue and the data is ready to be freed. 1171 * 1172 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1173 */ 1174 struct ntb_transport_qp * 1175 ntb_transport_create_queue(void *data, struct pci_dev *pdev, 1176 const struct ntb_queue_handlers *handlers) 1177 { 1178 struct ntb_queue_entry *entry; 1179 struct ntb_transport_qp *qp; 1180 struct ntb_transport *nt; 1181 unsigned int free_queue; 1182 int rc, i; 1183 1184 nt = ntb_find_transport(pdev); 1185 if (!nt) 1186 goto err; 1187 1188 free_queue = ffs(nt->qp_bitmap); 1189 if (!free_queue) 1190 goto err; 1191 1192 /* decrement free_queue to make it zero based */ 1193 free_queue--; 1194 1195 clear_bit(free_queue, &nt->qp_bitmap); 1196 1197 qp = &nt->qps[free_queue]; 1198 qp->cb_data = data; 1199 qp->rx_handler = handlers->rx_handler; 1200 qp->tx_handler = handlers->tx_handler; 1201 qp->event_handler = handlers->event_handler; 1202 1203 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1204 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); 1205 if (!entry) 1206 goto err1; 1207 1208 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, 1209 &qp->rx_free_q); 1210 } 1211 1212 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1213 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); 1214 if (!entry) 1215 goto err2; 1216 1217 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1218 &qp->tx_free_q); 1219 } 1220 1221 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp); 1222 1223 rc = ntb_register_db_callback(qp->ndev, free_queue, qp, 1224 ntb_transport_rxc_db); 1225 if (rc) 1226 goto err3; 1227 1228 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1229 1230 return qp; 1231 1232 err3: 1233 tasklet_disable(&qp->rx_work); 1234 err2: 1235 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1236 kfree(entry); 1237 err1: 1238 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1239 kfree(entry); 1240 set_bit(free_queue, &nt->qp_bitmap); 1241 err: 1242 return NULL; 1243 } 1244 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 1245 1246 /** 1247 * ntb_transport_free_queue - Frees NTB transport queue 1248 * @qp: NTB queue to be freed 1249 * 1250 * Frees NTB transport queue 1251 */ 1252 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1253 { 1254 struct pci_dev *pdev; 1255 struct ntb_queue_entry *entry; 1256 1257 if (!qp) 1258 return; 1259 1260 pdev = ntb_query_pdev(qp->ndev); 1261 1262 cancel_delayed_work_sync(&qp->link_work); 1263 1264 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1265 tasklet_disable(&qp->rx_work); 1266 1267 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1268 kfree(entry); 1269 1270 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { 1271 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); 1272 kfree(entry); 1273 } 1274 1275 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1276 kfree(entry); 1277 1278 set_bit(qp->qp_num, &qp->transport->qp_bitmap); 1279 1280 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1281 } 1282 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 1283 1284 /** 1285 * ntb_transport_rx_remove - Dequeues enqueued rx packet 1286 * @qp: NTB queue to be freed 1287 * @len: pointer to variable to write enqueued buffers length 1288 * 1289 * Dequeues unused buffers from receive queue. Should only be used during 1290 * shutdown of qp. 1291 * 1292 * RETURNS: NULL error value on error, or void* for success. 1293 */ 1294 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 1295 { 1296 struct ntb_queue_entry *entry; 1297 void *buf; 1298 1299 if (!qp || qp->client_ready == NTB_LINK_UP) 1300 return NULL; 1301 1302 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1303 if (!entry) 1304 return NULL; 1305 1306 buf = entry->cb_data; 1307 *len = entry->len; 1308 1309 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1310 1311 return buf; 1312 } 1313 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 1314 1315 /** 1316 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 1317 * @qp: NTB transport layer queue the entry is to be enqueued on 1318 * @cb: per buffer pointer for callback function to use 1319 * @data: pointer to data buffer that incoming packets will be copied into 1320 * @len: length of the data buffer 1321 * 1322 * Enqueue a new receive buffer onto the transport queue into which a NTB 1323 * payload can be received into. 1324 * 1325 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1326 */ 1327 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1328 unsigned int len) 1329 { 1330 struct ntb_queue_entry *entry; 1331 1332 if (!qp) 1333 return -EINVAL; 1334 1335 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); 1336 if (!entry) 1337 return -ENOMEM; 1338 1339 entry->cb_data = cb; 1340 entry->buf = data; 1341 entry->len = len; 1342 1343 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); 1344 1345 return 0; 1346 } 1347 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 1348 1349 /** 1350 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 1351 * @qp: NTB transport layer queue the entry is to be enqueued on 1352 * @cb: per buffer pointer for callback function to use 1353 * @data: pointer to data buffer that will be sent 1354 * @len: length of the data buffer 1355 * 1356 * Enqueue a new transmit buffer onto the transport queue from which a NTB 1357 * payload will be transmitted. This assumes that a lock is behing held to 1358 * serialize access to the qp. 1359 * 1360 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1361 */ 1362 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1363 unsigned int len) 1364 { 1365 struct ntb_queue_entry *entry; 1366 int rc; 1367 1368 if (!qp || qp->qp_link != NTB_LINK_UP || !len) 1369 return -EINVAL; 1370 1371 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1372 if (!entry) 1373 return -ENOMEM; 1374 1375 entry->cb_data = cb; 1376 entry->buf = data; 1377 entry->len = len; 1378 entry->flags = 0; 1379 1380 rc = ntb_process_tx(qp, entry); 1381 if (rc) 1382 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1383 &qp->tx_free_q); 1384 1385 return rc; 1386 } 1387 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 1388 1389 /** 1390 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 1391 * @qp: NTB transport layer queue to be enabled 1392 * 1393 * Notify NTB transport layer of client readiness to use queue 1394 */ 1395 void ntb_transport_link_up(struct ntb_transport_qp *qp) 1396 { 1397 if (!qp) 1398 return; 1399 1400 qp->client_ready = NTB_LINK_UP; 1401 1402 if (qp->transport->transport_link == NTB_LINK_UP) 1403 schedule_delayed_work(&qp->link_work, 0); 1404 } 1405 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 1406 1407 /** 1408 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1409 * @qp: NTB transport layer queue to be disabled 1410 * 1411 * Notify NTB transport layer of client's desire to no longer receive data on 1412 * transport queue specified. It is the client's responsibility to ensure all 1413 * entries on queue are purged or otherwise handled appropraitely. 1414 */ 1415 void ntb_transport_link_down(struct ntb_transport_qp *qp) 1416 { 1417 struct pci_dev *pdev; 1418 int rc, val; 1419 1420 if (!qp) 1421 return; 1422 1423 pdev = ntb_query_pdev(qp->ndev); 1424 qp->client_ready = NTB_LINK_DOWN; 1425 1426 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); 1427 if (rc) { 1428 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); 1429 return; 1430 } 1431 1432 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS, 1433 val & ~(1 << qp->qp_num)); 1434 if (rc) 1435 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 1436 val & ~(1 << qp->qp_num), QP_LINKS); 1437 1438 if (qp->qp_link == NTB_LINK_UP) 1439 ntb_send_link_down(qp); 1440 else 1441 cancel_delayed_work_sync(&qp->link_work); 1442 } 1443 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 1444 1445 /** 1446 * ntb_transport_link_query - Query transport link state 1447 * @qp: NTB transport layer queue to be queried 1448 * 1449 * Query connectivity to the remote system of the NTB transport queue 1450 * 1451 * RETURNS: true for link up or false for link down 1452 */ 1453 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 1454 { 1455 if (!qp) 1456 return false; 1457 1458 return qp->qp_link == NTB_LINK_UP; 1459 } 1460 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 1461 1462 /** 1463 * ntb_transport_qp_num - Query the qp number 1464 * @qp: NTB transport layer queue to be queried 1465 * 1466 * Query qp number of the NTB transport queue 1467 * 1468 * RETURNS: a zero based number specifying the qp number 1469 */ 1470 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 1471 { 1472 if (!qp) 1473 return 0; 1474 1475 return qp->qp_num; 1476 } 1477 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 1478 1479 /** 1480 * ntb_transport_max_size - Query the max payload size of a qp 1481 * @qp: NTB transport layer queue to be queried 1482 * 1483 * Query the maximum payload size permissible on the given qp 1484 * 1485 * RETURNS: the max payload size of a qp 1486 */ 1487 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 1488 { 1489 if (!qp) 1490 return 0; 1491 1492 return qp->tx_max_frame - sizeof(struct ntb_payload_header); 1493 } 1494 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 1495