xref: /openbmc/linux/drivers/ntb/ntb_transport.c (revision d7237e22)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *
9  *   This program is free software; you can redistribute it and/or modify
10  *   it under the terms of version 2 of the GNU General Public License as
11  *   published by the Free Software Foundation.
12  *
13  *   BSD LICENSE
14  *
15  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
16  *
17  *   Redistribution and use in source and binary forms, with or without
18  *   modification, are permitted provided that the following conditions
19  *   are met:
20  *
21  *     * Redistributions of source code must retain the above copyright
22  *       notice, this list of conditions and the following disclaimer.
23  *     * Redistributions in binary form must reproduce the above copy
24  *       notice, this list of conditions and the following disclaimer in
25  *       the documentation and/or other materials provided with the
26  *       distribution.
27  *     * Neither the name of Intel Corporation nor the names of its
28  *       contributors may be used to endorse or promote products derived
29  *       from this software without specific prior written permission.
30  *
31  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42  *
43  * Intel PCIe NTB Linux driver
44  *
45  * Contact Information:
46  * Jon Mason <jon.mason@intel.com>
47  */
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/errno.h>
52 #include <linux/export.h>
53 #include <linux/interrupt.h>
54 #include <linux/module.h>
55 #include <linux/pci.h>
56 #include <linux/slab.h>
57 #include <linux/types.h>
58 #include <linux/ntb.h>
59 #include "ntb_hw.h"
60 
61 #define NTB_TRANSPORT_VERSION	1
62 
63 static unsigned int transport_mtu = 0x401E;
64 module_param(transport_mtu, uint, 0644);
65 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
66 
67 static unsigned char max_num_clients = 2;
68 module_param(max_num_clients, byte, 0644);
69 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
70 
71 struct ntb_queue_entry {
72 	/* ntb_queue list reference */
73 	struct list_head entry;
74 	/* pointers to data to be transfered */
75 	void *cb_data;
76 	void *buf;
77 	unsigned int len;
78 	unsigned int flags;
79 };
80 
81 struct ntb_transport_qp {
82 	struct ntb_transport *transport;
83 	struct ntb_device *ndev;
84 	void *cb_data;
85 
86 	bool client_ready;
87 	bool qp_link;
88 	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */
89 
90 	void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
91 			    void *data, int len);
92 	struct list_head tx_free_q;
93 	spinlock_t ntb_tx_free_q_lock;
94 	void *tx_mw_begin;
95 	void *tx_mw_end;
96 	void *tx_offset;
97 	unsigned int tx_max_frame;
98 
99 	void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
100 			    void *data, int len);
101 	struct tasklet_struct rx_work;
102 	struct list_head rx_pend_q;
103 	struct list_head rx_free_q;
104 	spinlock_t ntb_rx_pend_q_lock;
105 	spinlock_t ntb_rx_free_q_lock;
106 	void *rx_buff_begin;
107 	void *rx_buff_end;
108 	void *rx_offset;
109 	unsigned int rx_max_frame;
110 
111 	void (*event_handler) (void *data, int status);
112 	struct delayed_work link_work;
113 	struct work_struct link_cleanup;
114 
115 	struct dentry *debugfs_dir;
116 	struct dentry *debugfs_stats;
117 
118 	/* Stats */
119 	u64 rx_bytes;
120 	u64 rx_pkts;
121 	u64 rx_ring_empty;
122 	u64 rx_err_no_buf;
123 	u64 rx_err_oflow;
124 	u64 rx_err_ver;
125 	u64 tx_bytes;
126 	u64 tx_pkts;
127 	u64 tx_ring_full;
128 };
129 
130 struct ntb_transport_mw {
131 	size_t size;
132 	void *virt_addr;
133 	dma_addr_t dma_addr;
134 };
135 
136 struct ntb_transport_client_dev {
137 	struct list_head entry;
138 	struct device dev;
139 };
140 
141 struct ntb_transport {
142 	struct list_head entry;
143 	struct list_head client_devs;
144 
145 	struct ntb_device *ndev;
146 	struct ntb_transport_mw mw[NTB_NUM_MW];
147 	struct ntb_transport_qp *qps;
148 	unsigned int max_qps;
149 	unsigned long qp_bitmap;
150 	bool transport_link;
151 	struct delayed_work link_work;
152 	struct work_struct link_cleanup;
153 	struct dentry *debugfs_dir;
154 };
155 
156 enum {
157 	DESC_DONE_FLAG = 1 << 0,
158 	LINK_DOWN_FLAG = 1 << 1,
159 };
160 
161 struct ntb_payload_header {
162 	u64 ver;
163 	unsigned int len;
164 	unsigned int flags;
165 };
166 
167 enum {
168 	VERSION = 0,
169 	MW0_SZ,
170 	MW1_SZ,
171 	NUM_QPS,
172 	QP_LINKS,
173 	MAX_SPAD,
174 };
175 
176 #define QP_TO_MW(qp)		((qp) % NTB_NUM_MW)
177 #define NTB_QP_DEF_NUM_ENTRIES	100
178 #define NTB_LINK_DOWN_TIMEOUT	10
179 
180 static int ntb_match_bus(struct device *dev, struct device_driver *drv)
181 {
182 	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
183 }
184 
185 static int ntb_client_probe(struct device *dev)
186 {
187 	const struct ntb_client *drv = container_of(dev->driver,
188 						    struct ntb_client, driver);
189 	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
190 	int rc = -EINVAL;
191 
192 	get_device(dev);
193 	if (drv && drv->probe)
194 		rc = drv->probe(pdev);
195 	if (rc)
196 		put_device(dev);
197 
198 	return rc;
199 }
200 
201 static int ntb_client_remove(struct device *dev)
202 {
203 	const struct ntb_client *drv = container_of(dev->driver,
204 						    struct ntb_client, driver);
205 	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
206 
207 	if (drv && drv->remove)
208 		drv->remove(pdev);
209 
210 	put_device(dev);
211 
212 	return 0;
213 }
214 
215 static struct bus_type ntb_bus_type = {
216 	.name = "ntb_bus",
217 	.match = ntb_match_bus,
218 	.probe = ntb_client_probe,
219 	.remove = ntb_client_remove,
220 };
221 
222 static LIST_HEAD(ntb_transport_list);
223 
224 static int ntb_bus_init(struct ntb_transport *nt)
225 {
226 	if (list_empty(&ntb_transport_list)) {
227 		int rc = bus_register(&ntb_bus_type);
228 		if (rc)
229 			return rc;
230 	}
231 
232 	list_add(&nt->entry, &ntb_transport_list);
233 
234 	return 0;
235 }
236 
237 static void ntb_bus_remove(struct ntb_transport *nt)
238 {
239 	struct ntb_transport_client_dev *client_dev, *cd;
240 
241 	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
242 		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
243 			dev_name(&client_dev->dev));
244 		list_del(&client_dev->entry);
245 		device_unregister(&client_dev->dev);
246 	}
247 
248 	list_del(&nt->entry);
249 
250 	if (list_empty(&ntb_transport_list))
251 		bus_unregister(&ntb_bus_type);
252 }
253 
254 static void ntb_client_release(struct device *dev)
255 {
256 	struct ntb_transport_client_dev *client_dev;
257 	client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
258 
259 	kfree(client_dev);
260 }
261 
262 /**
263  * ntb_unregister_client_dev - Unregister NTB client device
264  * @device_name: Name of NTB client device
265  *
266  * Unregister an NTB client device with the NTB transport layer
267  */
268 void ntb_unregister_client_dev(char *device_name)
269 {
270 	struct ntb_transport_client_dev *client, *cd;
271 	struct ntb_transport *nt;
272 
273 	list_for_each_entry(nt, &ntb_transport_list, entry)
274 		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
275 			if (!strncmp(dev_name(&client->dev), device_name,
276 				     strlen(device_name))) {
277 				list_del(&client->entry);
278 				device_unregister(&client->dev);
279 			}
280 }
281 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
282 
283 /**
284  * ntb_register_client_dev - Register NTB client device
285  * @device_name: Name of NTB client device
286  *
287  * Register an NTB client device with the NTB transport layer
288  */
289 int ntb_register_client_dev(char *device_name)
290 {
291 	struct ntb_transport_client_dev *client_dev;
292 	struct ntb_transport *nt;
293 	int rc;
294 
295 	if (list_empty(&ntb_transport_list))
296 		return -ENODEV;
297 
298 	list_for_each_entry(nt, &ntb_transport_list, entry) {
299 		struct device *dev;
300 
301 		client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
302 				     GFP_KERNEL);
303 		if (!client_dev) {
304 			rc = -ENOMEM;
305 			goto err;
306 		}
307 
308 		dev = &client_dev->dev;
309 
310 		/* setup and register client devices */
311 		dev_set_name(dev, "%s", device_name);
312 		dev->bus = &ntb_bus_type;
313 		dev->release = ntb_client_release;
314 		dev->parent = &ntb_query_pdev(nt->ndev)->dev;
315 
316 		rc = device_register(dev);
317 		if (rc) {
318 			kfree(client_dev);
319 			goto err;
320 		}
321 
322 		list_add_tail(&client_dev->entry, &nt->client_devs);
323 	}
324 
325 	return 0;
326 
327 err:
328 	ntb_unregister_client_dev(device_name);
329 
330 	return rc;
331 }
332 EXPORT_SYMBOL_GPL(ntb_register_client_dev);
333 
334 /**
335  * ntb_register_client - Register NTB client driver
336  * @drv: NTB client driver to be registered
337  *
338  * Register an NTB client driver with the NTB transport layer
339  *
340  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
341  */
342 int ntb_register_client(struct ntb_client *drv)
343 {
344 	drv->driver.bus = &ntb_bus_type;
345 
346 	if (list_empty(&ntb_transport_list))
347 		return -ENODEV;
348 
349 	return driver_register(&drv->driver);
350 }
351 EXPORT_SYMBOL_GPL(ntb_register_client);
352 
353 /**
354  * ntb_unregister_client - Unregister NTB client driver
355  * @drv: NTB client driver to be unregistered
356  *
357  * Unregister an NTB client driver with the NTB transport layer
358  *
359  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
360  */
361 void ntb_unregister_client(struct ntb_client *drv)
362 {
363 	driver_unregister(&drv->driver);
364 }
365 EXPORT_SYMBOL_GPL(ntb_unregister_client);
366 
367 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
368 			    loff_t *offp)
369 {
370 	struct ntb_transport_qp *qp;
371 	char *buf;
372 	ssize_t ret, out_offset, out_count;
373 
374 	out_count = 600;
375 
376 	buf = kmalloc(out_count, GFP_KERNEL);
377 	if (!buf)
378 		return -ENOMEM;
379 
380 	qp = filp->private_data;
381 	out_offset = 0;
382 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
383 			       "NTB QP stats\n");
384 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
385 			       "rx_bytes - \t%llu\n", qp->rx_bytes);
386 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
387 			       "rx_pkts - \t%llu\n", qp->rx_pkts);
388 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
389 			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
390 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
391 			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
392 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
393 			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
394 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
395 			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
396 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
397 			       "rx_buff_begin - %p\n", qp->rx_buff_begin);
398 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
399 			       "rx_offset - \t%p\n", qp->rx_offset);
400 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
401 			       "rx_buff_end - \t%p\n", qp->rx_buff_end);
402 
403 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
404 			       "tx_bytes - \t%llu\n", qp->tx_bytes);
405 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
406 			       "tx_pkts - \t%llu\n", qp->tx_pkts);
407 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
408 			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
409 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
410 			       "tx_mw_begin - \t%p\n", qp->tx_mw_begin);
411 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
412 			       "tx_offset - \t%p\n", qp->tx_offset);
413 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
414 			       "tx_mw_end - \t%p\n", qp->tx_mw_end);
415 
416 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
417 			       "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
418 			       "Up" : "Down");
419 	if (out_offset > out_count)
420 		out_offset = out_count;
421 
422 	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
423 	kfree(buf);
424 	return ret;
425 }
426 
427 static const struct file_operations ntb_qp_debugfs_stats = {
428 	.owner = THIS_MODULE,
429 	.open = simple_open,
430 	.read = debugfs_read,
431 };
432 
433 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
434 			 struct list_head *list)
435 {
436 	unsigned long flags;
437 
438 	spin_lock_irqsave(lock, flags);
439 	list_add_tail(entry, list);
440 	spin_unlock_irqrestore(lock, flags);
441 }
442 
443 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
444 						struct list_head *list)
445 {
446 	struct ntb_queue_entry *entry;
447 	unsigned long flags;
448 
449 	spin_lock_irqsave(lock, flags);
450 	if (list_empty(list)) {
451 		entry = NULL;
452 		goto out;
453 	}
454 	entry = list_first_entry(list, struct ntb_queue_entry, entry);
455 	list_del(&entry->entry);
456 out:
457 	spin_unlock_irqrestore(lock, flags);
458 
459 	return entry;
460 }
461 
462 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
463 				      unsigned int qp_num)
464 {
465 	struct ntb_transport_qp *qp = &nt->qps[qp_num];
466 	unsigned int rx_size, num_qps_mw;
467 	u8 mw_num = QP_TO_MW(qp_num);
468 	void *offset;
469 
470 	WARN_ON(nt->mw[mw_num].virt_addr == 0);
471 
472 	if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
473 		num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
474 	else
475 		num_qps_mw = nt->max_qps / NTB_NUM_MW;
476 
477 	rx_size = nt->mw[mw_num].size / num_qps_mw;
478 	qp->rx_buff_begin = nt->mw[mw_num].virt_addr +
479 			    (qp_num / NTB_NUM_MW * rx_size);
480 	qp->rx_buff_end = qp->rx_buff_begin + rx_size;
481 	qp->rx_offset = qp->rx_buff_begin;
482 	qp->rx_max_frame = min(transport_mtu, rx_size);
483 
484 	/* setup the hdr offsets with 0's */
485 	for (offset = qp->rx_buff_begin + qp->rx_max_frame -
486 		      sizeof(struct ntb_payload_header);
487 	     offset < qp->rx_buff_end; offset += qp->rx_max_frame)
488 		memset(offset, 0, sizeof(struct ntb_payload_header));
489 
490 	qp->rx_pkts = 0;
491 	qp->tx_pkts = 0;
492 }
493 
494 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
495 {
496 	struct ntb_transport_mw *mw = &nt->mw[num_mw];
497 	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
498 
499 	/* Alloc memory for receiving data.  Must be 4k aligned */
500 	mw->size = ALIGN(size, 4096);
501 
502 	mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
503 					   GFP_KERNEL);
504 	if (!mw->virt_addr) {
505 		dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
506 		       (int) mw->size);
507 		return -ENOMEM;
508 	}
509 
510 	/* Notify HW the memory location of the receive buffer */
511 	ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
512 
513 	return 0;
514 }
515 
516 static void ntb_qp_link_cleanup(struct work_struct *work)
517 {
518 	struct ntb_transport_qp *qp = container_of(work,
519 						   struct ntb_transport_qp,
520 						   link_cleanup);
521 	struct ntb_transport *nt = qp->transport;
522 	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
523 
524 	if (qp->qp_link == NTB_LINK_DOWN) {
525 		cancel_delayed_work_sync(&qp->link_work);
526 		return;
527 	}
528 
529 	if (qp->event_handler)
530 		qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
531 
532 	dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
533 	qp->qp_link = NTB_LINK_DOWN;
534 
535 	if (nt->transport_link == NTB_LINK_UP)
536 		schedule_delayed_work(&qp->link_work,
537 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
538 }
539 
540 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
541 {
542 	schedule_work(&qp->link_cleanup);
543 }
544 
545 static void ntb_transport_link_cleanup(struct work_struct *work)
546 {
547 	struct ntb_transport *nt = container_of(work, struct ntb_transport,
548 						link_cleanup);
549 	int i;
550 
551 	if (nt->transport_link == NTB_LINK_DOWN)
552 		cancel_delayed_work_sync(&nt->link_work);
553 	else
554 		nt->transport_link = NTB_LINK_DOWN;
555 
556 	/* Pass along the info to any clients */
557 	for (i = 0; i < nt->max_qps; i++)
558 		if (!test_bit(i, &nt->qp_bitmap))
559 			ntb_qp_link_down(&nt->qps[i]);
560 
561 	/* The scratchpad registers keep the values if the remote side
562 	 * goes down, blast them now to give them a sane value the next
563 	 * time they are accessed
564 	 */
565 	for (i = 0; i < MAX_SPAD; i++)
566 		ntb_write_local_spad(nt->ndev, i, 0);
567 }
568 
569 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
570 {
571 	struct ntb_transport *nt = data;
572 
573 	switch (event) {
574 	case NTB_EVENT_HW_LINK_UP:
575 		schedule_delayed_work(&nt->link_work, 0);
576 		break;
577 	case NTB_EVENT_HW_LINK_DOWN:
578 		schedule_work(&nt->link_cleanup);
579 		break;
580 	default:
581 		BUG();
582 	}
583 }
584 
585 static void ntb_transport_link_work(struct work_struct *work)
586 {
587 	struct ntb_transport *nt = container_of(work, struct ntb_transport,
588 						link_work.work);
589 	struct ntb_device *ndev = nt->ndev;
590 	struct pci_dev *pdev = ntb_query_pdev(ndev);
591 	u32 val;
592 	int rc, i;
593 
594 	/* send the local info */
595 	rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
596 	if (rc) {
597 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
598 			0, VERSION);
599 		goto out;
600 	}
601 
602 	rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
603 	if (rc) {
604 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
605 			(u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
606 		goto out;
607 	}
608 
609 	rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
610 	if (rc) {
611 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
612 			(u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
613 		goto out;
614 	}
615 
616 	rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
617 	if (rc) {
618 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
619 			nt->max_qps, NUM_QPS);
620 		goto out;
621 	}
622 
623 	rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
624 	if (rc) {
625 		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
626 		goto out;
627 	}
628 
629 	rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
630 	if (rc) {
631 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
632 			val, QP_LINKS);
633 		goto out;
634 	}
635 
636 	/* Query the remote side for its info */
637 	rc = ntb_read_remote_spad(ndev, VERSION, &val);
638 	if (rc) {
639 		dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
640 		goto out;
641 	}
642 
643 	if (val != NTB_TRANSPORT_VERSION)
644 		goto out;
645 	dev_dbg(&pdev->dev, "Remote version = %d\n", val);
646 
647 	rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
648 	if (rc) {
649 		dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
650 		goto out;
651 	}
652 
653 	if (val != nt->max_qps)
654 		goto out;
655 	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
656 
657 	rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
658 	if (rc) {
659 		dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
660 		goto out;
661 	}
662 
663 	if (!val)
664 		goto out;
665 	dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
666 
667 	rc = ntb_set_mw(nt, 0, val);
668 	if (rc)
669 		goto out;
670 
671 	rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
672 	if (rc) {
673 		dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
674 		goto out;
675 	}
676 
677 	if (!val)
678 		goto out;
679 	dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
680 
681 	rc = ntb_set_mw(nt, 1, val);
682 	if (rc)
683 		goto out;
684 
685 	nt->transport_link = NTB_LINK_UP;
686 
687 	for (i = 0; i < nt->max_qps; i++) {
688 		struct ntb_transport_qp *qp = &nt->qps[i];
689 
690 		ntb_transport_setup_qp_mw(nt, i);
691 
692 		if (qp->client_ready == NTB_LINK_UP)
693 			schedule_delayed_work(&qp->link_work, 0);
694 	}
695 
696 	return;
697 
698 out:
699 	if (ntb_hw_link_status(ndev))
700 		schedule_delayed_work(&nt->link_work,
701 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
702 }
703 
704 static void ntb_qp_link_work(struct work_struct *work)
705 {
706 	struct ntb_transport_qp *qp = container_of(work,
707 						   struct ntb_transport_qp,
708 						   link_work.work);
709 	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
710 	struct ntb_transport *nt = qp->transport;
711 	int rc, val;
712 
713 	WARN_ON(nt->transport_link != NTB_LINK_UP);
714 
715 	rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
716 	if (rc) {
717 		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
718 		return;
719 	}
720 
721 	rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
722 	if (rc)
723 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
724 			val | 1 << qp->qp_num, QP_LINKS);
725 
726 	/* query remote spad for qp ready bits */
727 	rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
728 	if (rc)
729 		dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
730 
731 	dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
732 
733 	/* See if the remote side is up */
734 	if (1 << qp->qp_num & val) {
735 		qp->qp_link = NTB_LINK_UP;
736 
737 		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
738 		if (qp->event_handler)
739 			qp->event_handler(qp->cb_data, NTB_LINK_UP);
740 	} else if (nt->transport_link == NTB_LINK_UP)
741 		schedule_delayed_work(&qp->link_work,
742 				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
743 }
744 
745 static void ntb_transport_init_queue(struct ntb_transport *nt,
746 				     unsigned int qp_num)
747 {
748 	struct ntb_transport_qp *qp;
749 	unsigned int num_qps_mw, tx_size;
750 	u8 mw_num = QP_TO_MW(qp_num);
751 
752 	qp = &nt->qps[qp_num];
753 	qp->qp_num = qp_num;
754 	qp->transport = nt;
755 	qp->ndev = nt->ndev;
756 	qp->qp_link = NTB_LINK_DOWN;
757 	qp->client_ready = NTB_LINK_DOWN;
758 	qp->event_handler = NULL;
759 
760 	if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
761 		num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
762 	else
763 		num_qps_mw = nt->max_qps / NTB_NUM_MW;
764 
765 	tx_size = ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
766 	qp->tx_mw_begin = ntb_get_mw_vbase(nt->ndev, mw_num) +
767 			  (qp_num / NTB_NUM_MW * tx_size);
768 	qp->tx_mw_end = qp->tx_mw_begin + tx_size;
769 	qp->tx_offset = qp->tx_mw_begin;
770 	qp->tx_max_frame = min(transport_mtu, tx_size);
771 
772 	if (nt->debugfs_dir) {
773 		char debugfs_name[4];
774 
775 		snprintf(debugfs_name, 4, "qp%d", qp_num);
776 		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
777 						     nt->debugfs_dir);
778 
779 		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
780 							qp->debugfs_dir, qp,
781 							&ntb_qp_debugfs_stats);
782 	}
783 
784 	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
785 	INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
786 
787 	spin_lock_init(&qp->ntb_rx_pend_q_lock);
788 	spin_lock_init(&qp->ntb_rx_free_q_lock);
789 	spin_lock_init(&qp->ntb_tx_free_q_lock);
790 
791 	INIT_LIST_HEAD(&qp->rx_pend_q);
792 	INIT_LIST_HEAD(&qp->rx_free_q);
793 	INIT_LIST_HEAD(&qp->tx_free_q);
794 }
795 
796 int ntb_transport_init(struct pci_dev *pdev)
797 {
798 	struct ntb_transport *nt;
799 	int rc, i;
800 
801 	nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
802 	if (!nt)
803 		return -ENOMEM;
804 
805 	if (debugfs_initialized())
806 		nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
807 	else
808 		nt->debugfs_dir = NULL;
809 
810 	nt->ndev = ntb_register_transport(pdev, nt);
811 	if (!nt->ndev) {
812 		rc = -EIO;
813 		goto err;
814 	}
815 
816 	nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
817 
818 	nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
819 			  GFP_KERNEL);
820 	if (!nt->qps) {
821 		rc = -ENOMEM;
822 		goto err1;
823 	}
824 
825 	nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
826 
827 	for (i = 0; i < nt->max_qps; i++)
828 		ntb_transport_init_queue(nt, i);
829 
830 	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
831 	INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
832 
833 	rc = ntb_register_event_callback(nt->ndev,
834 					 ntb_transport_event_callback);
835 	if (rc)
836 		goto err2;
837 
838 	INIT_LIST_HEAD(&nt->client_devs);
839 	rc = ntb_bus_init(nt);
840 	if (rc)
841 		goto err3;
842 
843 	if (ntb_hw_link_status(nt->ndev))
844 		schedule_delayed_work(&nt->link_work, 0);
845 
846 	return 0;
847 
848 err3:
849 	ntb_unregister_event_callback(nt->ndev);
850 err2:
851 	kfree(nt->qps);
852 err1:
853 	ntb_unregister_transport(nt->ndev);
854 err:
855 	debugfs_remove_recursive(nt->debugfs_dir);
856 	kfree(nt);
857 	return rc;
858 }
859 
860 void ntb_transport_free(void *transport)
861 {
862 	struct ntb_transport *nt = transport;
863 	struct pci_dev *pdev;
864 	int i;
865 
866 	nt->transport_link = NTB_LINK_DOWN;
867 
868 	/* verify that all the qp's are freed */
869 	for (i = 0; i < nt->max_qps; i++)
870 		if (!test_bit(i, &nt->qp_bitmap))
871 			ntb_transport_free_queue(&nt->qps[i]);
872 
873 	ntb_bus_remove(nt);
874 
875 	cancel_delayed_work_sync(&nt->link_work);
876 
877 	debugfs_remove_recursive(nt->debugfs_dir);
878 
879 	ntb_unregister_event_callback(nt->ndev);
880 
881 	pdev = ntb_query_pdev(nt->ndev);
882 
883 	for (i = 0; i < NTB_NUM_MW; i++)
884 		if (nt->mw[i].virt_addr)
885 			dma_free_coherent(&pdev->dev, nt->mw[i].size,
886 					  nt->mw[i].virt_addr,
887 					  nt->mw[i].dma_addr);
888 
889 	kfree(nt->qps);
890 	ntb_unregister_transport(nt->ndev);
891 	kfree(nt);
892 }
893 
894 static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
895 			     struct ntb_queue_entry *entry, void *offset)
896 {
897 
898 	struct ntb_payload_header *hdr;
899 
900 	BUG_ON(offset < qp->rx_buff_begin ||
901 	       offset + qp->rx_max_frame >= qp->rx_buff_end);
902 
903 	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
904 	entry->len = hdr->len;
905 
906 	memcpy(entry->buf, offset, entry->len);
907 
908 	/* Ensure that the data is fully copied out before clearing the flag */
909 	wmb();
910 	hdr->flags = 0;
911 
912 	if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
913 		qp->rx_handler(qp, qp->cb_data, entry->cb_data, entry->len);
914 
915 	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
916 }
917 
918 static int ntb_process_rxc(struct ntb_transport_qp *qp)
919 {
920 	struct ntb_payload_header *hdr;
921 	struct ntb_queue_entry *entry;
922 	void *offset;
923 
924 	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
925 	if (!entry) {
926 		hdr = offset + qp->rx_max_frame -
927 		      sizeof(struct ntb_payload_header);
928 		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
929 			"no buffer - HDR ver %llu, len %d, flags %x\n",
930 			hdr->ver, hdr->len, hdr->flags);
931 		qp->rx_err_no_buf++;
932 		return -ENOMEM;
933 	}
934 
935 	offset = qp->rx_offset;
936 	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
937 
938 	if (!(hdr->flags & DESC_DONE_FLAG)) {
939 		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
940 			     &qp->rx_pend_q);
941 		qp->rx_ring_empty++;
942 		return -EAGAIN;
943 	}
944 
945 	if (hdr->ver != qp->rx_pkts) {
946 		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
947 			"qp %d: version mismatch, expected %llu - got %llu\n",
948 			qp->qp_num, qp->rx_pkts, hdr->ver);
949 		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
950 			     &qp->rx_pend_q);
951 		qp->rx_err_ver++;
952 		return -EIO;
953 	}
954 
955 	if (hdr->flags & LINK_DOWN_FLAG) {
956 		ntb_qp_link_down(qp);
957 
958 		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
959 			     &qp->rx_pend_q);
960 
961 		/* Ensure that the data is fully copied out before clearing the
962 		 * done flag
963 		 */
964 		wmb();
965 		hdr->flags = 0;
966 		goto out;
967 	}
968 
969 	dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
970 		"rx offset %p, ver %llu - %d payload received, buf size %d\n",
971 		qp->rx_offset, hdr->ver, hdr->len, entry->len);
972 
973 	if (hdr->len <= entry->len)
974 		ntb_rx_copy_task(qp, entry, offset);
975 	else {
976 		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
977 			     &qp->rx_pend_q);
978 
979 		/* Ensure that the data is fully copied out before clearing the
980 		 * done flag
981 		 */
982 		wmb();
983 		hdr->flags = 0;
984 		qp->rx_err_oflow++;
985 		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
986 			"RX overflow! Wanted %d got %d\n",
987 			hdr->len, entry->len);
988 	}
989 
990 	qp->rx_bytes += hdr->len;
991 	qp->rx_pkts++;
992 
993 out:
994 	qp->rx_offset += qp->rx_max_frame;
995 	if (qp->rx_offset + qp->rx_max_frame >= qp->rx_buff_end)
996 		qp->rx_offset = qp->rx_buff_begin;
997 
998 	return 0;
999 }
1000 
1001 static void ntb_transport_rx(unsigned long data)
1002 {
1003 	struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
1004 	int rc;
1005 
1006 	do {
1007 		rc = ntb_process_rxc(qp);
1008 	} while (!rc);
1009 }
1010 
1011 static void ntb_transport_rxc_db(void *data, int db_num)
1012 {
1013 	struct ntb_transport_qp *qp = data;
1014 
1015 	dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1016 		__func__, db_num);
1017 
1018 	tasklet_schedule(&qp->rx_work);
1019 }
1020 
1021 static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
1022 			     struct ntb_queue_entry *entry,
1023 			     void *offset)
1024 {
1025 	struct ntb_payload_header *hdr;
1026 
1027 	BUG_ON(offset < qp->tx_mw_begin ||
1028 	       offset + qp->tx_max_frame >= qp->tx_mw_end);
1029 
1030 	memcpy_toio(offset, entry->buf, entry->len);
1031 
1032 	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1033 	hdr->len = entry->len;
1034 	hdr->ver = qp->tx_pkts;
1035 
1036 	/* Ensure that the data is fully copied out before setting the flag */
1037 	wmb();
1038 	hdr->flags = entry->flags | DESC_DONE_FLAG;
1039 
1040 	ntb_ring_sdb(qp->ndev, qp->qp_num);
1041 
1042 	/* The entry length can only be zero if the packet is intended to be a
1043 	 * "link down" or similar.  Since no payload is being sent in these
1044 	 * cases, there is nothing to add to the completion queue.
1045 	 */
1046 	if (entry->len > 0) {
1047 		qp->tx_bytes += entry->len;
1048 
1049 		if (qp->tx_handler)
1050 			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1051 				       entry->len);
1052 	}
1053 
1054 	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1055 }
1056 
1057 static int ntb_process_tx(struct ntb_transport_qp *qp,
1058 			  struct ntb_queue_entry *entry)
1059 {
1060 	struct ntb_payload_header *hdr;
1061 	void *offset;
1062 
1063 	offset = qp->tx_offset;
1064 	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1065 
1066 	dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %p, entry len %d flags %x buff %p\n",
1067 		qp->tx_pkts, offset, qp->tx_offset, entry->len, entry->flags,
1068 		entry->buf);
1069 	if (hdr->flags) {
1070 		qp->tx_ring_full++;
1071 		return -EAGAIN;
1072 	}
1073 
1074 	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1075 		if (qp->tx_handler)
1076 			qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1077 
1078 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1079 			     &qp->tx_free_q);
1080 		return 0;
1081 	}
1082 
1083 	ntb_tx_copy_task(qp, entry, offset);
1084 
1085 	qp->tx_offset += qp->tx_max_frame;
1086 	if (qp->tx_offset + qp->tx_max_frame >= qp->tx_mw_end)
1087 		qp->tx_offset = qp->tx_mw_begin;
1088 
1089 	qp->tx_pkts++;
1090 
1091 	return 0;
1092 }
1093 
1094 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1095 {
1096 	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1097 	struct ntb_queue_entry *entry;
1098 	int i, rc;
1099 
1100 	if (qp->qp_link == NTB_LINK_DOWN)
1101 		return;
1102 
1103 	qp->qp_link = NTB_LINK_DOWN;
1104 	dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1105 
1106 	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1107 		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1108 		if (entry)
1109 			break;
1110 		msleep(100);
1111 	}
1112 
1113 	if (!entry)
1114 		return;
1115 
1116 	entry->cb_data = NULL;
1117 	entry->buf = NULL;
1118 	entry->len = 0;
1119 	entry->flags = LINK_DOWN_FLAG;
1120 
1121 	rc = ntb_process_tx(qp, entry);
1122 	if (rc)
1123 		dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1124 			qp->qp_num);
1125 }
1126 
1127 /**
1128  * ntb_transport_create_queue - Create a new NTB transport layer queue
1129  * @rx_handler: receive callback function
1130  * @tx_handler: transmit callback function
1131  * @event_handler: event callback function
1132  *
1133  * Create a new NTB transport layer queue and provide the queue with a callback
1134  * routine for both transmit and receive.  The receive callback routine will be
1135  * used to pass up data when the transport has received it on the queue.   The
1136  * transmit callback routine will be called when the transport has completed the
1137  * transmission of the data on the queue and the data is ready to be freed.
1138  *
1139  * RETURNS: pointer to newly created ntb_queue, NULL on error.
1140  */
1141 struct ntb_transport_qp *
1142 ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1143 			   const struct ntb_queue_handlers *handlers)
1144 {
1145 	struct ntb_queue_entry *entry;
1146 	struct ntb_transport_qp *qp;
1147 	struct ntb_transport *nt;
1148 	unsigned int free_queue;
1149 	int rc, i;
1150 
1151 	nt = ntb_find_transport(pdev);
1152 	if (!nt)
1153 		goto err;
1154 
1155 	free_queue = ffs(nt->qp_bitmap);
1156 	if (!free_queue)
1157 		goto err;
1158 
1159 	/* decrement free_queue to make it zero based */
1160 	free_queue--;
1161 
1162 	clear_bit(free_queue, &nt->qp_bitmap);
1163 
1164 	qp = &nt->qps[free_queue];
1165 	qp->cb_data = data;
1166 	qp->rx_handler = handlers->rx_handler;
1167 	qp->tx_handler = handlers->tx_handler;
1168 	qp->event_handler = handlers->event_handler;
1169 
1170 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1171 		entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1172 		if (!entry)
1173 			goto err1;
1174 
1175 		ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1176 			     &qp->rx_free_q);
1177 	}
1178 
1179 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1180 		entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1181 		if (!entry)
1182 			goto err2;
1183 
1184 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1185 			     &qp->tx_free_q);
1186 	}
1187 
1188 	tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1189 
1190 	rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1191 				      ntb_transport_rxc_db);
1192 	if (rc)
1193 		goto err3;
1194 
1195 	dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1196 
1197 	return qp;
1198 
1199 err3:
1200 	tasklet_disable(&qp->rx_work);
1201 err2:
1202 	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1203 		kfree(entry);
1204 err1:
1205 	while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1206 		kfree(entry);
1207 	set_bit(free_queue, &nt->qp_bitmap);
1208 err:
1209 	return NULL;
1210 }
1211 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1212 
1213 /**
1214  * ntb_transport_free_queue - Frees NTB transport queue
1215  * @qp: NTB queue to be freed
1216  *
1217  * Frees NTB transport queue
1218  */
1219 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1220 {
1221 	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1222 	struct ntb_queue_entry *entry;
1223 
1224 	if (!qp)
1225 		return;
1226 
1227 	cancel_delayed_work_sync(&qp->link_work);
1228 
1229 	ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1230 	tasklet_disable(&qp->rx_work);
1231 
1232 	while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1233 		kfree(entry);
1234 
1235 	while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1236 		dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1237 		kfree(entry);
1238 	}
1239 
1240 	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1241 		kfree(entry);
1242 
1243 	set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1244 
1245 	dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1246 }
1247 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1248 
1249 /**
1250  * ntb_transport_rx_remove - Dequeues enqueued rx packet
1251  * @qp: NTB queue to be freed
1252  * @len: pointer to variable to write enqueued buffers length
1253  *
1254  * Dequeues unused buffers from receive queue.  Should only be used during
1255  * shutdown of qp.
1256  *
1257  * RETURNS: NULL error value on error, or void* for success.
1258  */
1259 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1260 {
1261 	struct ntb_queue_entry *entry;
1262 	void *buf;
1263 
1264 	if (!qp || qp->client_ready == NTB_LINK_UP)
1265 		return NULL;
1266 
1267 	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1268 	if (!entry)
1269 		return NULL;
1270 
1271 	buf = entry->cb_data;
1272 	*len = entry->len;
1273 
1274 	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1275 
1276 	return buf;
1277 }
1278 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1279 
1280 /**
1281  * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1282  * @qp: NTB transport layer queue the entry is to be enqueued on
1283  * @cb: per buffer pointer for callback function to use
1284  * @data: pointer to data buffer that incoming packets will be copied into
1285  * @len: length of the data buffer
1286  *
1287  * Enqueue a new receive buffer onto the transport queue into which a NTB
1288  * payload can be received into.
1289  *
1290  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1291  */
1292 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1293 			     unsigned int len)
1294 {
1295 	struct ntb_queue_entry *entry;
1296 
1297 	if (!qp)
1298 		return -EINVAL;
1299 
1300 	entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1301 	if (!entry)
1302 		return -ENOMEM;
1303 
1304 	entry->cb_data = cb;
1305 	entry->buf = data;
1306 	entry->len = len;
1307 
1308 	ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1309 
1310 	return 0;
1311 }
1312 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1313 
1314 /**
1315  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1316  * @qp: NTB transport layer queue the entry is to be enqueued on
1317  * @cb: per buffer pointer for callback function to use
1318  * @data: pointer to data buffer that will be sent
1319  * @len: length of the data buffer
1320  *
1321  * Enqueue a new transmit buffer onto the transport queue from which a NTB
1322  * payload will be transmitted.  This assumes that a lock is behing held to
1323  * serialize access to the qp.
1324  *
1325  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1326  */
1327 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1328 			     unsigned int len)
1329 {
1330 	struct ntb_queue_entry *entry;
1331 	int rc;
1332 
1333 	if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1334 		return -EINVAL;
1335 
1336 	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1337 	if (!entry)
1338 		return -ENOMEM;
1339 
1340 	entry->cb_data = cb;
1341 	entry->buf = data;
1342 	entry->len = len;
1343 	entry->flags = 0;
1344 
1345 	rc = ntb_process_tx(qp, entry);
1346 	if (rc)
1347 		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1348 			     &qp->tx_free_q);
1349 
1350 	return rc;
1351 }
1352 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1353 
1354 /**
1355  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1356  * @qp: NTB transport layer queue to be enabled
1357  *
1358  * Notify NTB transport layer of client readiness to use queue
1359  */
1360 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1361 {
1362 	if (!qp)
1363 		return;
1364 
1365 	qp->client_ready = NTB_LINK_UP;
1366 
1367 	if (qp->transport->transport_link == NTB_LINK_UP)
1368 		schedule_delayed_work(&qp->link_work, 0);
1369 }
1370 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1371 
1372 /**
1373  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1374  * @qp: NTB transport layer queue to be disabled
1375  *
1376  * Notify NTB transport layer of client's desire to no longer receive data on
1377  * transport queue specified.  It is the client's responsibility to ensure all
1378  * entries on queue are purged or otherwise handled appropraitely.
1379  */
1380 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1381 {
1382 	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1383 	int rc, val;
1384 
1385 	if (!qp)
1386 		return;
1387 
1388 	qp->client_ready = NTB_LINK_DOWN;
1389 
1390 	rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1391 	if (rc) {
1392 		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1393 		return;
1394 	}
1395 
1396 	rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1397 				   val & ~(1 << qp->qp_num));
1398 	if (rc)
1399 		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1400 			val & ~(1 << qp->qp_num), QP_LINKS);
1401 
1402 	if (qp->qp_link == NTB_LINK_UP)
1403 		ntb_send_link_down(qp);
1404 	else
1405 		cancel_delayed_work_sync(&qp->link_work);
1406 }
1407 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1408 
1409 /**
1410  * ntb_transport_link_query - Query transport link state
1411  * @qp: NTB transport layer queue to be queried
1412  *
1413  * Query connectivity to the remote system of the NTB transport queue
1414  *
1415  * RETURNS: true for link up or false for link down
1416  */
1417 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1418 {
1419 	return qp->qp_link == NTB_LINK_UP;
1420 }
1421 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1422 
1423 /**
1424  * ntb_transport_qp_num - Query the qp number
1425  * @qp: NTB transport layer queue to be queried
1426  *
1427  * Query qp number of the NTB transport queue
1428  *
1429  * RETURNS: a zero based number specifying the qp number
1430  */
1431 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1432 {
1433 	return qp->qp_num;
1434 }
1435 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1436 
1437 /**
1438  * ntb_transport_max_size - Query the max payload size of a qp
1439  * @qp: NTB transport layer queue to be queried
1440  *
1441  * Query the maximum payload size permissible on the given qp
1442  *
1443  * RETURNS: the max payload size of a qp
1444  */
1445 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1446 {
1447 	return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1448 }
1449 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
1450