xref: /openbmc/linux/drivers/ntb/ntb_transport.c (revision ef114ed5)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *
9  *   This program is free software; you can redistribute it and/or modify
10  *   it under the terms of version 2 of the GNU General Public License as
11  *   published by the Free Software Foundation.
12  *
13  *   BSD LICENSE
14  *
15  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
16  *
17  *   Redistribution and use in source and binary forms, with or without
18  *   modification, are permitted provided that the following conditions
19  *   are met:
20  *
21  *     * Redistributions of source code must retain the above copyright
22  *       notice, this list of conditions and the following disclaimer.
23  *     * Redistributions in binary form must reproduce the above copy
24  *       notice, this list of conditions and the following disclaimer in
25  *       the documentation and/or other materials provided with the
26  *       distribution.
27  *     * Neither the name of Intel Corporation nor the names of its
28  *       contributors may be used to endorse or promote products derived
29  *       from this software without specific prior written permission.
30  *
31  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42  *
43  * Intel PCIe NTB Linux driver
44  *
45  * Contact Information:
46  * Jon Mason <jon.mason@intel.com>
47  */
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/errno.h>
52 #include <linux/export.h>
53 #include <linux/interrupt.h>
54 #include <linux/module.h>
55 #include <linux/pci.h>
56 #include <linux/slab.h>
57 #include <linux/types.h>
58 #include <linux/ntb.h>
59 #include "ntb_hw.h"
60 
61 #define NTB_TRANSPORT_VERSION	1
62 
63 static unsigned int transport_mtu = 0x401E;
64 module_param(transport_mtu, uint, 0644);
65 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
66 
67 static unsigned char max_num_clients = 2;
68 module_param(max_num_clients, byte, 0644);
69 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
70 
71 struct ntb_queue_entry {
72 	/* ntb_queue list reference */
73 	struct list_head entry;
74 	/* pointers to data to be transfered */
75 	void *cb_data;
76 	void *buf;
77 	unsigned int len;
78 	unsigned int flags;
79 };
80 
81 struct ntb_transport_qp {
82 	struct ntb_transport *transport;
83 	struct ntb_device *ndev;
84 	void *cb_data;
85 
86 	bool client_ready;
87 	bool qp_link;
88 	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */
89 
90 	void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
91 			    void *data, int len);
92 	struct list_head tx_free_q;
93 	spinlock_t ntb_tx_free_q_lock;
94 	void *tx_mw_begin;
95 	void *tx_mw_end;
96 	void *tx_offset;
97 	unsigned int tx_max_frame;
98 
99 	void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
100 			    void *data, int len);
101 	struct tasklet_struct rx_work;
102 	struct list_head rx_pend_q;
103 	struct list_head rx_free_q;
104 	spinlock_t ntb_rx_pend_q_lock;
105 	spinlock_t ntb_rx_free_q_lock;
106 	void *rx_buff_begin;
107 	void *rx_buff_end;
108 	void *rx_offset;
109 	unsigned int rx_max_frame;
110 
111 	void (*event_handler) (void *data, int status);
112 	struct delayed_work link_work;
113 
114 	struct dentry *debugfs_dir;
115 	struct dentry *debugfs_stats;
116 
117 	/* Stats */
118 	u64 rx_bytes;
119 	u64 rx_pkts;
120 	u64 rx_ring_empty;
121 	u64 rx_err_no_buf;
122 	u64 rx_err_oflow;
123 	u64 rx_err_ver;
124 	u64 tx_bytes;
125 	u64 tx_pkts;
126 	u64 tx_ring_full;
127 };
128 
129 struct ntb_transport_mw {
130 	size_t size;
131 	void *virt_addr;
132 	dma_addr_t dma_addr;
133 };
134 
135 struct ntb_transport_client_dev {
136 	struct list_head entry;
137 	struct device dev;
138 };
139 
140 struct ntb_transport {
141 	struct list_head entry;
142 	struct list_head client_devs;
143 
144 	struct ntb_device *ndev;
145 	struct ntb_transport_mw mw[NTB_NUM_MW];
146 	struct ntb_transport_qp *qps;
147 	unsigned int max_qps;
148 	unsigned long qp_bitmap;
149 	bool transport_link;
150 	struct delayed_work link_work;
151 	struct dentry *debugfs_dir;
152 };
153 
154 enum {
155 	DESC_DONE_FLAG = 1 << 0,
156 	LINK_DOWN_FLAG = 1 << 1,
157 };
158 
159 struct ntb_payload_header {
160 	u64 ver;
161 	unsigned int len;
162 	unsigned int flags;
163 };
164 
165 enum {
166 	VERSION = 0,
167 	MW0_SZ,
168 	MW1_SZ,
169 	NUM_QPS,
170 	QP_LINKS,
171 	MAX_SPAD,
172 };
173 
174 #define QP_TO_MW(qp)		((qp) % NTB_NUM_MW)
175 #define NTB_QP_DEF_NUM_ENTRIES	100
176 #define NTB_LINK_DOWN_TIMEOUT	10
177 
178 static int ntb_match_bus(struct device *dev, struct device_driver *drv)
179 {
180 	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
181 }
182 
183 static int ntb_client_probe(struct device *dev)
184 {
185 	const struct ntb_client *drv = container_of(dev->driver,
186 						    struct ntb_client, driver);
187 	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
188 	int rc = -EINVAL;
189 
190 	get_device(dev);
191 	if (drv && drv->probe)
192 		rc = drv->probe(pdev);
193 	if (rc)
194 		put_device(dev);
195 
196 	return rc;
197 }
198 
199 static int ntb_client_remove(struct device *dev)
200 {
201 	const struct ntb_client *drv = container_of(dev->driver,
202 						    struct ntb_client, driver);
203 	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
204 
205 	if (drv && drv->remove)
206 		drv->remove(pdev);
207 
208 	put_device(dev);
209 
210 	return 0;
211 }
212 
213 struct bus_type ntb_bus_type = {
214 	.name = "ntb_bus",
215 	.match = ntb_match_bus,
216 	.probe = ntb_client_probe,
217 	.remove = ntb_client_remove,
218 };
219 
220 static LIST_HEAD(ntb_transport_list);
221 
222 static int ntb_bus_init(struct ntb_transport *nt)
223 {
224 	if (list_empty(&ntb_transport_list)) {
225 		int rc = bus_register(&ntb_bus_type);
226 		if (rc)
227 			return rc;
228 	}
229 
230 	list_add(&nt->entry, &ntb_transport_list);
231 
232 	return 0;
233 }
234 
235 static void ntb_bus_remove(struct ntb_transport *nt)
236 {
237 	struct ntb_transport_client_dev *client_dev, *cd;
238 
239 	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
240 		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
241 			dev_name(&client_dev->dev));
242 		list_del(&client_dev->entry);
243 		device_unregister(&client_dev->dev);
244 	}
245 
246 	list_del(&nt->entry);
247 
248 	if (list_empty(&ntb_transport_list))
249 		bus_unregister(&ntb_bus_type);
250 }
251 
252 static void ntb_client_release(struct device *dev)
253 {
254 	struct ntb_transport_client_dev *client_dev;
255 	client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
256 
257 	kfree(client_dev);
258 }
259 
260 /**
261  * ntb_unregister_client_dev - Unregister NTB client device
262  * @device_name: Name of NTB client device
263  *
264  * Unregister an NTB client device with the NTB transport layer
265  */
266 void ntb_unregister_client_dev(char *device_name)
267 {
268 	struct ntb_transport_client_dev *client, *cd;
269 	struct ntb_transport *nt;
270 
271 	list_for_each_entry(nt, &ntb_transport_list, entry)
272 		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
273 			if (!strncmp(dev_name(&client->dev), device_name,
274 				     strlen(device_name))) {
275 				list_del(&client->entry);
276 				device_unregister(&client->dev);
277 			}
278 }
279 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
280 
281 /**
282  * ntb_register_client_dev - Register NTB client device
283  * @device_name: Name of NTB client device
284  *
285  * Register an NTB client device with the NTB transport layer
286  */
287 int ntb_register_client_dev(char *device_name)
288 {
289 	struct ntb_transport_client_dev *client_dev;
290 	struct ntb_transport *nt;
291 	int rc;
292 
293 	if (list_empty(&ntb_transport_list))
294 		return -ENODEV;
295 
296 	list_for_each_entry(nt, &ntb_transport_list, entry) {
297 		struct device *dev;
298 
299 		client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
300 				     GFP_KERNEL);
301 		if (!client_dev) {
302 			rc = -ENOMEM;
303 			goto err;
304 		}
305 
306 		dev = &client_dev->dev;
307 
308 		/* setup and register client devices */
309 		dev_set_name(dev, "%s", device_name);
310 		dev->bus = &ntb_bus_type;
311 		dev->release = ntb_client_release;
312 		dev->parent = &ntb_query_pdev(nt->ndev)->dev;
313 
314 		rc = device_register(dev);
315 		if (rc) {
316 			kfree(client_dev);
317 			goto err;
318 		}
319 
320 		list_add_tail(&client_dev->entry, &nt->client_devs);
321 	}
322 
323 	return 0;
324 
325 err:
326 	ntb_unregister_client_dev(device_name);
327 
328 	return rc;
329 }
330 EXPORT_SYMBOL_GPL(ntb_register_client_dev);
331 
332 /**
333  * ntb_register_client - Register NTB client driver
334  * @drv: NTB client driver to be registered
335  *
336  * Register an NTB client driver with the NTB transport layer
337  *
338  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
339  */
340 int ntb_register_client(struct ntb_client *drv)
341 {
342 	drv->driver.bus = &ntb_bus_type;
343 
344 	if (list_empty(&ntb_transport_list))
345 		return -ENODEV;
346 
347 	return driver_register(&drv->driver);
348 }
349 EXPORT_SYMBOL_GPL(ntb_register_client);
350 
351 /**
352  * ntb_unregister_client - Unregister NTB client driver
353  * @drv: NTB client driver to be unregistered
354  *
355  * Unregister an NTB client driver with the NTB transport layer
356  *
357  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
358  */
359 void ntb_unregister_client(struct ntb_client *drv)
360 {
361 	driver_unregister(&drv->driver);
362 }
363 EXPORT_SYMBOL_GPL(ntb_unregister_client);
364 
365 static int debugfs_open(struct inode *inode, struct file *filp)
366 {
367 	filp->private_data = inode->i_private;
368 	return 0;
369 }
370 
371 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
372 			    loff_t *offp)
373 {
374 	struct ntb_transport_qp *qp;
375 	char buf[1024];
376 	ssize_t ret, out_offset, out_count;
377 
378 	out_count = 1024;
379 
380 	qp = filp->private_data;
381 	out_offset = 0;
382 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
383 			       "NTB QP stats\n");
384 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
385 			       "rx_bytes - \t%llu\n", qp->rx_bytes);
386 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
387 			       "rx_pkts - \t%llu\n", qp->rx_pkts);
388 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
389 			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
390 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
391 			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
392 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
393 			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
394 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
395 			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
396 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
397 			       "rx_buff_begin - %p\n", qp->rx_buff_begin);
398 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
399 			       "rx_offset - \t%p\n", qp->rx_offset);
400 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
401 			       "rx_buff_end - \t%p\n", qp->rx_buff_end);
402 
403 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
404 			       "tx_bytes - \t%llu\n", qp->tx_bytes);
405 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
406 			       "tx_pkts - \t%llu\n", qp->tx_pkts);
407 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
408 			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
409 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
410 			       "tx_mw_begin - \t%p\n", qp->tx_mw_begin);
411 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
412 			       "tx_offset - \t%p\n", qp->tx_offset);
413 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
414 			       "tx_mw_end - \t%p\n", qp->tx_mw_end);
415 
416 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
417 			       "QP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
418 			       "Up" : "Down");
419 
420 	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
421 	return ret;
422 }
423 
424 static const struct file_operations ntb_qp_debugfs_stats = {
425 	.owner = THIS_MODULE,
426 	.open = debugfs_open,
427 	.read = debugfs_read,
428 };
429 
430 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
431 			 struct list_head *list)
432 {
433 	unsigned long flags;
434 
435 	spin_lock_irqsave(lock, flags);
436 	list_add_tail(entry, list);
437 	spin_unlock_irqrestore(lock, flags);
438 }
439 
440 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
441 						struct list_head *list)
442 {
443 	struct ntb_queue_entry *entry;
444 	unsigned long flags;
445 
446 	spin_lock_irqsave(lock, flags);
447 	if (list_empty(list)) {
448 		entry = NULL;
449 		goto out;
450 	}
451 	entry = list_first_entry(list, struct ntb_queue_entry, entry);
452 	list_del(&entry->entry);
453 out:
454 	spin_unlock_irqrestore(lock, flags);
455 
456 	return entry;
457 }
458 
459 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
460 				      unsigned int qp_num)
461 {
462 	struct ntb_transport_qp *qp = &nt->qps[qp_num];
463 	unsigned int rx_size, num_qps_mw;
464 	u8 mw_num = QP_TO_MW(qp_num);
465 	void *offset;
466 
467 	WARN_ON(nt->mw[mw_num].virt_addr == 0);
468 
469 	if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
470 		num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
471 	else
472 		num_qps_mw = nt->max_qps / NTB_NUM_MW;
473 
474 	rx_size = nt->mw[mw_num].size / num_qps_mw;
475 	qp->rx_buff_begin = nt->mw[mw_num].virt_addr +
476 			    (qp_num / NTB_NUM_MW * rx_size);
477 	qp->rx_buff_end = qp->rx_buff_begin + rx_size;
478 	qp->rx_offset = qp->rx_buff_begin;
479 	qp->rx_max_frame = min(transport_mtu, rx_size);
480 
481 	/* setup the hdr offsets with 0's */
482 	for (offset = qp->rx_buff_begin + qp->rx_max_frame -
483 		      sizeof(struct ntb_payload_header);
484 	     offset < qp->rx_buff_end; offset += qp->rx_max_frame)
485 		memset(offset, 0, sizeof(struct ntb_payload_header));
486 
487 	qp->rx_pkts = 0;
488 	qp->tx_pkts = 0;
489 }
490 
491 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
492 {
493 	struct ntb_transport_mw *mw = &nt->mw[num_mw];
494 	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
495 
496 	/* Alloc memory for receiving data.  Must be 4k aligned */
497 	mw->size = ALIGN(size, 4096);
498 
499 	mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
500 					   GFP_KERNEL);
501 	if (!mw->virt_addr) {
502 		dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
503 		       (int) mw->size);
504 		return -ENOMEM;
505 	}
506 
507 	/* Notify HW the memory location of the receive buffer */
508 	ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
509 
510 	return 0;
511 }
512 
513 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
514 {
515 	struct ntb_transport *nt = qp->transport;
516 	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
517 
518 	if (qp->qp_link == NTB_LINK_DOWN) {
519 		cancel_delayed_work_sync(&qp->link_work);
520 		return;
521 	}
522 
523 	if (qp->event_handler)
524 		qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
525 
526 	dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
527 	qp->qp_link = NTB_LINK_DOWN;
528 
529 	if (nt->transport_link == NTB_LINK_UP)
530 		schedule_delayed_work(&qp->link_work,
531 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
532 }
533 
534 static void ntb_transport_conn_down(struct ntb_transport *nt)
535 {
536 	int i;
537 
538 	if (nt->transport_link == NTB_LINK_DOWN)
539 		cancel_delayed_work_sync(&nt->link_work);
540 	else
541 		nt->transport_link = NTB_LINK_DOWN;
542 
543 	/* Pass along the info to any clients */
544 	for (i = 0; i < nt->max_qps; i++)
545 		if (!test_bit(i, &nt->qp_bitmap))
546 			ntb_qp_link_down(&nt->qps[i]);
547 
548 	/* The scratchpad registers keep the values if the remote side
549 	 * goes down, blast them now to give them a sane value the next
550 	 * time they are accessed
551 	 */
552 	for (i = 0; i < MAX_SPAD; i++)
553 		ntb_write_local_spad(nt->ndev, i, 0);
554 }
555 
556 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
557 {
558 	struct ntb_transport *nt = data;
559 
560 	switch (event) {
561 	case NTB_EVENT_HW_LINK_UP:
562 		schedule_delayed_work(&nt->link_work, 0);
563 		break;
564 	case NTB_EVENT_HW_LINK_DOWN:
565 		ntb_transport_conn_down(nt);
566 		break;
567 	default:
568 		BUG();
569 	}
570 }
571 
572 static void ntb_transport_link_work(struct work_struct *work)
573 {
574 	struct ntb_transport *nt = container_of(work, struct ntb_transport,
575 						link_work.work);
576 	struct ntb_device *ndev = nt->ndev;
577 	struct pci_dev *pdev = ntb_query_pdev(ndev);
578 	u32 val;
579 	int rc, i;
580 
581 	/* send the local info */
582 	rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
583 	if (rc) {
584 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
585 			0, VERSION);
586 		goto out;
587 	}
588 
589 	rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
590 	if (rc) {
591 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
592 			(u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
593 		goto out;
594 	}
595 
596 	rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
597 	if (rc) {
598 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
599 			(u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
600 		goto out;
601 	}
602 
603 	rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
604 	if (rc) {
605 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
606 			nt->max_qps, NUM_QPS);
607 		goto out;
608 	}
609 
610 	rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
611 	if (rc) {
612 		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
613 		goto out;
614 	}
615 
616 	rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
617 	if (rc) {
618 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
619 			val, QP_LINKS);
620 		goto out;
621 	}
622 
623 	/* Query the remote side for its info */
624 	rc = ntb_read_remote_spad(ndev, VERSION, &val);
625 	if (rc) {
626 		dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
627 		goto out;
628 	}
629 
630 	if (val != NTB_TRANSPORT_VERSION)
631 		goto out;
632 	dev_dbg(&pdev->dev, "Remote version = %d\n", val);
633 
634 	rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
635 	if (rc) {
636 		dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
637 		goto out;
638 	}
639 
640 	if (val != nt->max_qps)
641 		goto out;
642 	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
643 
644 	rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
645 	if (rc) {
646 		dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
647 		goto out;
648 	}
649 
650 	if (!val)
651 		goto out;
652 	dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
653 
654 	rc = ntb_set_mw(nt, 0, val);
655 	if (rc)
656 		goto out;
657 
658 	rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
659 	if (rc) {
660 		dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
661 		goto out;
662 	}
663 
664 	if (!val)
665 		goto out;
666 	dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
667 
668 	rc = ntb_set_mw(nt, 1, val);
669 	if (rc)
670 		goto out;
671 
672 	nt->transport_link = NTB_LINK_UP;
673 
674 	for (i = 0; i < nt->max_qps; i++) {
675 		struct ntb_transport_qp *qp = &nt->qps[i];
676 
677 		ntb_transport_setup_qp_mw(nt, i);
678 
679 		if (qp->client_ready == NTB_LINK_UP)
680 			schedule_delayed_work(&qp->link_work, 0);
681 	}
682 
683 	return;
684 
685 out:
686 	if (ntb_hw_link_status(ndev))
687 		schedule_delayed_work(&nt->link_work,
688 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
689 }
690 
691 static void ntb_qp_link_work(struct work_struct *work)
692 {
693 	struct ntb_transport_qp *qp = container_of(work,
694 						   struct ntb_transport_qp,
695 						   link_work.work);
696 	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
697 	struct ntb_transport *nt = qp->transport;
698 	int rc, val;
699 
700 	WARN_ON(nt->transport_link != NTB_LINK_UP);
701 
702 	rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
703 	if (rc) {
704 		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
705 		return;
706 	}
707 
708 	rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
709 	if (rc)
710 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
711 			val | 1 << qp->qp_num, QP_LINKS);
712 
713 	/* query remote spad for qp ready bits */
714 	rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
715 	if (rc)
716 		dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
717 
718 	dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
719 
720 	/* See if the remote side is up */
721 	if (1 << qp->qp_num & val) {
722 		qp->qp_link = NTB_LINK_UP;
723 
724 		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
725 		if (qp->event_handler)
726 			qp->event_handler(qp->cb_data, NTB_LINK_UP);
727 	} else if (nt->transport_link == NTB_LINK_UP)
728 		schedule_delayed_work(&qp->link_work,
729 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
730 }
731 
732 static void ntb_transport_init_queue(struct ntb_transport *nt,
733 				     unsigned int qp_num)
734 {
735 	struct ntb_transport_qp *qp;
736 	unsigned int num_qps_mw, tx_size;
737 	u8 mw_num = QP_TO_MW(qp_num);
738 
739 	qp = &nt->qps[qp_num];
740 	qp->qp_num = qp_num;
741 	qp->transport = nt;
742 	qp->ndev = nt->ndev;
743 	qp->qp_link = NTB_LINK_DOWN;
744 	qp->client_ready = NTB_LINK_DOWN;
745 	qp->event_handler = NULL;
746 
747 	if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
748 		num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
749 	else
750 		num_qps_mw = nt->max_qps / NTB_NUM_MW;
751 
752 	tx_size = ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
753 	qp->tx_mw_begin = ntb_get_mw_vbase(nt->ndev, mw_num) +
754 			  (qp_num / NTB_NUM_MW * tx_size);
755 	qp->tx_mw_end = qp->tx_mw_begin + tx_size;
756 	qp->tx_offset = qp->tx_mw_begin;
757 	qp->tx_max_frame = min(transport_mtu, tx_size);
758 
759 	if (nt->debugfs_dir) {
760 		char debugfs_name[4];
761 
762 		snprintf(debugfs_name, 4, "qp%d", qp_num);
763 		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
764 						     nt->debugfs_dir);
765 
766 		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
767 							qp->debugfs_dir, qp,
768 							&ntb_qp_debugfs_stats);
769 	}
770 
771 	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
772 
773 	spin_lock_init(&qp->ntb_rx_pend_q_lock);
774 	spin_lock_init(&qp->ntb_rx_free_q_lock);
775 	spin_lock_init(&qp->ntb_tx_free_q_lock);
776 
777 	INIT_LIST_HEAD(&qp->rx_pend_q);
778 	INIT_LIST_HEAD(&qp->rx_free_q);
779 	INIT_LIST_HEAD(&qp->tx_free_q);
780 }
781 
782 int ntb_transport_init(struct pci_dev *pdev)
783 {
784 	struct ntb_transport *nt;
785 	int rc, i;
786 
787 	nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
788 	if (!nt)
789 		return -ENOMEM;
790 
791 	if (debugfs_initialized())
792 		nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
793 	else
794 		nt->debugfs_dir = NULL;
795 
796 	nt->ndev = ntb_register_transport(pdev, nt);
797 	if (!nt->ndev) {
798 		rc = -EIO;
799 		goto err;
800 	}
801 
802 	nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
803 
804 	nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
805 			  GFP_KERNEL);
806 	if (!nt->qps) {
807 		rc = -ENOMEM;
808 		goto err1;
809 	}
810 
811 	nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
812 
813 	for (i = 0; i < nt->max_qps; i++)
814 		ntb_transport_init_queue(nt, i);
815 
816 	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
817 
818 	rc = ntb_register_event_callback(nt->ndev,
819 					 ntb_transport_event_callback);
820 	if (rc)
821 		goto err2;
822 
823 	INIT_LIST_HEAD(&nt->client_devs);
824 	rc = ntb_bus_init(nt);
825 	if (rc)
826 		goto err3;
827 
828 	if (ntb_hw_link_status(nt->ndev))
829 		schedule_delayed_work(&nt->link_work, 0);
830 
831 	return 0;
832 
833 err3:
834 	ntb_unregister_event_callback(nt->ndev);
835 err2:
836 	kfree(nt->qps);
837 err1:
838 	ntb_unregister_transport(nt->ndev);
839 err:
840 	debugfs_remove_recursive(nt->debugfs_dir);
841 	kfree(nt);
842 	return rc;
843 }
844 
845 void ntb_transport_free(void *transport)
846 {
847 	struct ntb_transport *nt = transport;
848 	struct pci_dev *pdev;
849 	int i;
850 
851 	nt->transport_link = NTB_LINK_DOWN;
852 
853 	/* verify that all the qp's are freed */
854 	for (i = 0; i < nt->max_qps; i++)
855 		if (!test_bit(i, &nt->qp_bitmap))
856 			ntb_transport_free_queue(&nt->qps[i]);
857 
858 	ntb_bus_remove(nt);
859 
860 	cancel_delayed_work_sync(&nt->link_work);
861 
862 	debugfs_remove_recursive(nt->debugfs_dir);
863 
864 	ntb_unregister_event_callback(nt->ndev);
865 
866 	pdev = ntb_query_pdev(nt->ndev);
867 
868 	for (i = 0; i < NTB_NUM_MW; i++)
869 		if (nt->mw[i].virt_addr)
870 			dma_free_coherent(&pdev->dev, nt->mw[i].size,
871 					  nt->mw[i].virt_addr,
872 					  nt->mw[i].dma_addr);
873 
874 	kfree(nt->qps);
875 	ntb_unregister_transport(nt->ndev);
876 	kfree(nt);
877 }
878 
879 static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
880 			     struct ntb_queue_entry *entry, void *offset)
881 {
882 
883 	struct ntb_payload_header *hdr;
884 
885 	BUG_ON(offset < qp->rx_buff_begin ||
886 	       offset + qp->rx_max_frame >= qp->rx_buff_end);
887 
888 	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
889 	entry->len = hdr->len;
890 
891 	memcpy(entry->buf, offset, entry->len);
892 
893 	/* Ensure that the data is fully copied out before clearing the flag */
894 	wmb();
895 	hdr->flags = 0;
896 
897 	if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
898 		qp->rx_handler(qp, qp->cb_data, entry->cb_data, entry->len);
899 
900 	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
901 }
902 
903 static int ntb_process_rxc(struct ntb_transport_qp *qp)
904 {
905 	struct ntb_payload_header *hdr;
906 	struct ntb_queue_entry *entry;
907 	void *offset;
908 
909 	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
910 	if (!entry) {
911 		hdr = offset + qp->rx_max_frame -
912 		      sizeof(struct ntb_payload_header);
913 		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
914 			"no buffer - HDR ver %llu, len %d, flags %x\n",
915 			hdr->ver, hdr->len, hdr->flags);
916 		qp->rx_err_no_buf++;
917 		return -ENOMEM;
918 	}
919 
920 	offset = qp->rx_offset;
921 	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
922 
923 	if (!(hdr->flags & DESC_DONE_FLAG)) {
924 		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
925 				  &qp->rx_pend_q);
926 		qp->rx_ring_empty++;
927 		return -EAGAIN;
928 	}
929 
930 	if (hdr->ver != qp->rx_pkts) {
931 		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
932 			"qp %d: version mismatch, expected %llu - got %llu\n",
933 			qp->qp_num, qp->rx_pkts, hdr->ver);
934 		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
935 				  &qp->rx_pend_q);
936 		qp->rx_err_ver++;
937 		return -EIO;
938 	}
939 
940 	if (hdr->flags & LINK_DOWN_FLAG) {
941 		ntb_qp_link_down(qp);
942 
943 		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
944 				  &qp->rx_pend_q);
945 
946 		/* Ensure that the data is fully copied out before clearing the
947 		 * done flag
948 		 */
949 		wmb();
950 		hdr->flags = 0;
951 		goto out;
952 	}
953 
954 	dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
955 		"rx offset %p, ver %llu - %d payload received, buf size %d\n",
956 		qp->rx_offset, hdr->ver, hdr->len, entry->len);
957 
958 	if (hdr->len <= entry->len)
959 		ntb_rx_copy_task(qp, entry, offset);
960 	else {
961 		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
962 				  &qp->rx_pend_q);
963 
964 		/* Ensure that the data is fully copied out before clearing the
965 		 * done flag
966 		 */
967 		wmb();
968 		hdr->flags = 0;
969 		qp->rx_err_oflow++;
970 		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
971 			"RX overflow! Wanted %d got %d\n",
972 			hdr->len, entry->len);
973 	}
974 
975 	qp->rx_bytes += hdr->len;
976 	qp->rx_pkts++;
977 
978 out:
979 	qp->rx_offset += qp->rx_max_frame;
980 	if (qp->rx_offset + qp->rx_max_frame >= qp->rx_buff_end)
981 		qp->rx_offset = qp->rx_buff_begin;
982 
983 	return 0;
984 }
985 
986 static void ntb_transport_rx(unsigned long data)
987 {
988 	struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
989 	int rc;
990 
991 	do {
992 		rc = ntb_process_rxc(qp);
993 	} while (!rc);
994 }
995 
996 static void ntb_transport_rxc_db(void *data, int db_num)
997 {
998 	struct ntb_transport_qp *qp = data;
999 
1000 	dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1001 		__func__, db_num);
1002 
1003 	tasklet_schedule(&qp->rx_work);
1004 }
1005 
1006 static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
1007 			     struct ntb_queue_entry *entry,
1008 			     void *offset)
1009 {
1010 	struct ntb_payload_header *hdr;
1011 
1012 	BUG_ON(offset < qp->tx_mw_begin ||
1013 	       offset + qp->tx_max_frame >= qp->tx_mw_end);
1014 
1015 	memcpy_toio(offset, entry->buf, entry->len);
1016 
1017 	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1018 	hdr->len = entry->len;
1019 	hdr->ver = qp->tx_pkts;
1020 
1021 	/* Ensure that the data is fully copied out before setting the flag */
1022 	wmb();
1023 	hdr->flags = entry->flags | DESC_DONE_FLAG;
1024 
1025 	ntb_ring_sdb(qp->ndev, qp->qp_num);
1026 
1027 	/* The entry length can only be zero if the packet is intended to be a
1028 	 * "link down" or similar.  Since no payload is being sent in these
1029 	 * cases, there is nothing to add to the completion queue.
1030 	 */
1031 	if (entry->len > 0) {
1032 		qp->tx_bytes += entry->len;
1033 
1034 		if (qp->tx_handler)
1035 			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1036 				       entry->len);
1037 	}
1038 
1039 	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1040 }
1041 
1042 static int ntb_process_tx(struct ntb_transport_qp *qp,
1043 			  struct ntb_queue_entry *entry)
1044 {
1045 	struct ntb_payload_header *hdr;
1046 	void *offset;
1047 
1048 	offset = qp->tx_offset;
1049 	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1050 
1051 	dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %p, entry len %d flags %x buff %p\n",
1052 		 qp->tx_pkts, offset, qp->tx_offset, entry->len, entry->flags,
1053 		 entry->buf);
1054 	if (hdr->flags) {
1055 		qp->tx_ring_full++;
1056 		return -EAGAIN;
1057 	}
1058 
1059 	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1060 		if (qp->tx_handler)
1061 			qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1062 
1063 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1064 			     &qp->tx_free_q);
1065 		return 0;
1066 	}
1067 
1068 	ntb_tx_copy_task(qp, entry, offset);
1069 
1070 	qp->tx_offset += qp->tx_max_frame;
1071 	if (qp->tx_offset + qp->tx_max_frame >= qp->tx_mw_end)
1072 		qp->tx_offset = qp->tx_mw_begin;
1073 
1074 	qp->tx_pkts++;
1075 
1076 	return 0;
1077 }
1078 
1079 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1080 {
1081 	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1082 	struct ntb_queue_entry *entry;
1083 	int i, rc;
1084 
1085 	if (qp->qp_link == NTB_LINK_DOWN)
1086 		return;
1087 
1088 	qp->qp_link = NTB_LINK_DOWN;
1089 	dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1090 
1091 	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1092 		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock,
1093 					 &qp->tx_free_q);
1094 		if (entry)
1095 			break;
1096 		msleep(100);
1097 	}
1098 
1099 	if (!entry)
1100 		return;
1101 
1102 	entry->cb_data = NULL;
1103 	entry->buf = NULL;
1104 	entry->len = 0;
1105 	entry->flags = LINK_DOWN_FLAG;
1106 
1107 	rc = ntb_process_tx(qp, entry);
1108 	if (rc)
1109 		dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1110 			qp->qp_num);
1111 }
1112 
1113 /**
1114  * ntb_transport_create_queue - Create a new NTB transport layer queue
1115  * @rx_handler: receive callback function
1116  * @tx_handler: transmit callback function
1117  * @event_handler: event callback function
1118  *
1119  * Create a new NTB transport layer queue and provide the queue with a callback
1120  * routine for both transmit and receive.  The receive callback routine will be
1121  * used to pass up data when the transport has received it on the queue.   The
1122  * transmit callback routine will be called when the transport has completed the
1123  * transmission of the data on the queue and the data is ready to be freed.
1124  *
1125  * RETURNS: pointer to newly created ntb_queue, NULL on error.
1126  */
1127 struct ntb_transport_qp *
1128 ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1129 			   const struct ntb_queue_handlers *handlers)
1130 {
1131 	struct ntb_queue_entry *entry;
1132 	struct ntb_transport_qp *qp;
1133 	struct ntb_transport *nt;
1134 	unsigned int free_queue;
1135 	int rc, i;
1136 
1137 	nt = ntb_find_transport(pdev);
1138 	if (!nt)
1139 		goto err;
1140 
1141 	free_queue = ffs(nt->qp_bitmap);
1142 	if (!free_queue)
1143 		goto err;
1144 
1145 	/* decrement free_queue to make it zero based */
1146 	free_queue--;
1147 
1148 	clear_bit(free_queue, &nt->qp_bitmap);
1149 
1150 	qp = &nt->qps[free_queue];
1151 	qp->cb_data = data;
1152 	qp->rx_handler = handlers->rx_handler;
1153 	qp->tx_handler = handlers->tx_handler;
1154 	qp->event_handler = handlers->event_handler;
1155 
1156 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1157 		entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1158 		if (!entry)
1159 			goto err1;
1160 
1161 		ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1162 				  &qp->rx_free_q);
1163 	}
1164 
1165 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1166 		entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1167 		if (!entry)
1168 			goto err2;
1169 
1170 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1171 				  &qp->tx_free_q);
1172 	}
1173 
1174 	tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1175 
1176 	rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1177 				      ntb_transport_rxc_db);
1178 	if (rc)
1179 		goto err3;
1180 
1181 	dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1182 
1183 	return qp;
1184 
1185 err3:
1186 	tasklet_disable(&qp->rx_work);
1187 err2:
1188 	while ((entry =
1189 		ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1190 		kfree(entry);
1191 err1:
1192 	while ((entry =
1193 		ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1194 		kfree(entry);
1195 	set_bit(free_queue, &nt->qp_bitmap);
1196 err:
1197 	return NULL;
1198 }
1199 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1200 
1201 /**
1202  * ntb_transport_free_queue - Frees NTB transport queue
1203  * @qp: NTB queue to be freed
1204  *
1205  * Frees NTB transport queue
1206  */
1207 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1208 {
1209 	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1210 	struct ntb_queue_entry *entry;
1211 
1212 	if (!qp)
1213 		return;
1214 
1215 	cancel_delayed_work_sync(&qp->link_work);
1216 
1217 	ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1218 	tasklet_disable(&qp->rx_work);
1219 
1220 	while ((entry =
1221 		ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1222 		kfree(entry);
1223 
1224 	while ((entry =
1225 		ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1226 		dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1227 		kfree(entry);
1228 	}
1229 
1230 	while ((entry =
1231 		ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1232 		kfree(entry);
1233 
1234 	set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1235 
1236 	dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1237 }
1238 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1239 
1240 /**
1241  * ntb_transport_rx_remove - Dequeues enqueued rx packet
1242  * @qp: NTB queue to be freed
1243  * @len: pointer to variable to write enqueued buffers length
1244  *
1245  * Dequeues unused buffers from receive queue.  Should only be used during
1246  * shutdown of qp.
1247  *
1248  * RETURNS: NULL error value on error, or void* for success.
1249  */
1250 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1251 {
1252 	struct ntb_queue_entry *entry;
1253 	void *buf;
1254 
1255 	if (!qp || qp->client_ready == NTB_LINK_UP)
1256 		return NULL;
1257 
1258 	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1259 	if (!entry)
1260 		return NULL;
1261 
1262 	buf = entry->cb_data;
1263 	*len = entry->len;
1264 
1265 	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1266 			  &qp->rx_free_q);
1267 
1268 	return buf;
1269 }
1270 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1271 
1272 /**
1273  * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1274  * @qp: NTB transport layer queue the entry is to be enqueued on
1275  * @cb: per buffer pointer for callback function to use
1276  * @data: pointer to data buffer that incoming packets will be copied into
1277  * @len: length of the data buffer
1278  *
1279  * Enqueue a new receive buffer onto the transport queue into which a NTB
1280  * payload can be received into.
1281  *
1282  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1283  */
1284 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1285 			     unsigned int len)
1286 {
1287 	struct ntb_queue_entry *entry;
1288 
1289 	if (!qp)
1290 		return -EINVAL;
1291 
1292 	entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1293 	if (!entry)
1294 		return -ENOMEM;
1295 
1296 	entry->cb_data = cb;
1297 	entry->buf = data;
1298 	entry->len = len;
1299 
1300 	ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1301 			  &qp->rx_pend_q);
1302 
1303 	return 0;
1304 }
1305 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1306 
1307 /**
1308  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1309  * @qp: NTB transport layer queue the entry is to be enqueued on
1310  * @cb: per buffer pointer for callback function to use
1311  * @data: pointer to data buffer that will be sent
1312  * @len: length of the data buffer
1313  *
1314  * Enqueue a new transmit buffer onto the transport queue from which a NTB
1315  * payload will be transmitted.  This assumes that a lock is behing held to
1316  * serialize access to the qp.
1317  *
1318  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1319  */
1320 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1321 			     unsigned int len)
1322 {
1323 	struct ntb_queue_entry *entry;
1324 	int rc;
1325 
1326 	if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1327 		return -EINVAL;
1328 
1329 	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1330 	if (!entry)
1331 		return -ENOMEM;
1332 
1333 	entry->cb_data = cb;
1334 	entry->buf = data;
1335 	entry->len = len;
1336 	entry->flags = 0;
1337 
1338 	rc = ntb_process_tx(qp, entry);
1339 	if (rc)
1340 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1341 			     &qp->tx_free_q);
1342 
1343 	return rc;
1344 }
1345 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1346 
1347 /**
1348  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1349  * @qp: NTB transport layer queue to be enabled
1350  *
1351  * Notify NTB transport layer of client readiness to use queue
1352  */
1353 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1354 {
1355 	if (!qp)
1356 		return;
1357 
1358 	qp->client_ready = NTB_LINK_UP;
1359 
1360 	if (qp->transport->transport_link == NTB_LINK_UP)
1361 		schedule_delayed_work(&qp->link_work, 0);
1362 }
1363 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1364 
1365 /**
1366  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1367  * @qp: NTB transport layer queue to be disabled
1368  *
1369  * Notify NTB transport layer of client's desire to no longer receive data on
1370  * transport queue specified.  It is the client's responsibility to ensure all
1371  * entries on queue are purged or otherwise handled appropraitely.
1372  */
1373 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1374 {
1375 	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1376 	int rc, val;
1377 
1378 	if (!qp)
1379 		return;
1380 
1381 	qp->client_ready = NTB_LINK_DOWN;
1382 
1383 	rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1384 	if (rc) {
1385 		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1386 		return;
1387 	}
1388 
1389 	rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1390 				   val & ~(1 << qp->qp_num));
1391 	if (rc)
1392 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1393 			val & ~(1 << qp->qp_num), QP_LINKS);
1394 
1395 	if (qp->qp_link == NTB_LINK_UP)
1396 		ntb_send_link_down(qp);
1397 	else
1398 		cancel_delayed_work_sync(&qp->link_work);
1399 }
1400 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1401 
1402 /**
1403  * ntb_transport_link_query - Query transport link state
1404  * @qp: NTB transport layer queue to be queried
1405  *
1406  * Query connectivity to the remote system of the NTB transport queue
1407  *
1408  * RETURNS: true for link up or false for link down
1409  */
1410 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1411 {
1412 	return qp->qp_link == NTB_LINK_UP;
1413 }
1414 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1415 
1416 /**
1417  * ntb_transport_qp_num - Query the qp number
1418  * @qp: NTB transport layer queue to be queried
1419  *
1420  * Query qp number of the NTB transport queue
1421  *
1422  * RETURNS: a zero based number specifying the qp number
1423  */
1424 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1425 {
1426 	return qp->qp_num;
1427 }
1428 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1429 
1430 /**
1431  * ntb_transport_max_size - Query the max payload size of a qp
1432  * @qp: NTB transport layer queue to be queried
1433  *
1434  * Query the maximum payload size permissible on the given qp
1435  *
1436  * RETURNS: the max payload size of a qp
1437  */
1438 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1439 {
1440 	return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1441 }
1442 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
1443