xref: /openbmc/linux/drivers/ntb/hw/epf/ntb_hw_epf.c (revision be58f710)
1 // SPDX-License-Identifier: GPL-2.0
2 /**
3  * Host side endpoint driver to implement Non-Transparent Bridge functionality
4  *
5  * Copyright (C) 2020 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/ntb.h>
14 
15 #define NTB_EPF_COMMAND		0x0
16 #define CMD_CONFIGURE_DOORBELL	1
17 #define CMD_TEARDOWN_DOORBELL	2
18 #define CMD_CONFIGURE_MW	3
19 #define CMD_TEARDOWN_MW		4
20 #define CMD_LINK_UP		5
21 #define CMD_LINK_DOWN		6
22 
23 #define NTB_EPF_ARGUMENT	0x4
24 #define MSIX_ENABLE		BIT(16)
25 
26 #define NTB_EPF_CMD_STATUS	0x8
27 #define COMMAND_STATUS_OK	1
28 #define COMMAND_STATUS_ERROR	2
29 
30 #define NTB_EPF_LINK_STATUS	0x0A
31 #define LINK_STATUS_UP		BIT(0)
32 
33 #define NTB_EPF_TOPOLOGY	0x0C
34 #define NTB_EPF_LOWER_ADDR	0x10
35 #define NTB_EPF_UPPER_ADDR	0x14
36 #define NTB_EPF_LOWER_SIZE	0x18
37 #define NTB_EPF_UPPER_SIZE	0x1C
38 #define NTB_EPF_MW_COUNT	0x20
39 #define NTB_EPF_MW1_OFFSET	0x24
40 #define NTB_EPF_SPAD_OFFSET	0x28
41 #define NTB_EPF_SPAD_COUNT	0x2C
42 #define NTB_EPF_DB_ENTRY_SIZE	0x30
43 #define NTB_EPF_DB_DATA(n)	(0x34 + (n) * 4)
44 #define NTB_EPF_DB_OFFSET(n)	(0xB4 + (n) * 4)
45 
46 #define NTB_EPF_MIN_DB_COUNT	3
47 #define NTB_EPF_MAX_DB_COUNT	31
48 #define NTB_EPF_MW_OFFSET	2
49 
50 #define NTB_EPF_COMMAND_TIMEOUT	1000 /* 1 Sec */
51 
52 enum pci_barno {
53 	BAR_0,
54 	BAR_1,
55 	BAR_2,
56 	BAR_3,
57 	BAR_4,
58 	BAR_5,
59 };
60 
61 struct ntb_epf_dev {
62 	struct ntb_dev ntb;
63 	struct device *dev;
64 	/* Mutex to protect providing commands to NTB EPF */
65 	struct mutex cmd_lock;
66 
67 	enum pci_barno ctrl_reg_bar;
68 	enum pci_barno peer_spad_reg_bar;
69 	enum pci_barno db_reg_bar;
70 
71 	unsigned int mw_count;
72 	unsigned int spad_count;
73 	unsigned int db_count;
74 
75 	void __iomem *ctrl_reg;
76 	void __iomem *db_reg;
77 	void __iomem *peer_spad_reg;
78 
79 	unsigned int self_spad;
80 	unsigned int peer_spad;
81 
82 	int db_val;
83 	u64 db_valid_mask;
84 };
85 
86 #define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb)
87 
88 struct ntb_epf_data {
89 	/* BAR that contains both control region and self spad region */
90 	enum pci_barno ctrl_reg_bar;
91 	/* BAR that contains peer spad region */
92 	enum pci_barno peer_spad_reg_bar;
93 	/* BAR that contains Doorbell region and Memory window '1' */
94 	enum pci_barno db_reg_bar;
95 };
96 
97 static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
98 				u32 argument)
99 {
100 	ktime_t timeout;
101 	bool timedout;
102 	int ret = 0;
103 	u32 status;
104 
105 	mutex_lock(&ndev->cmd_lock);
106 	writel(argument, ndev->ctrl_reg + NTB_EPF_ARGUMENT);
107 	writel(command, ndev->ctrl_reg + NTB_EPF_COMMAND);
108 
109 	timeout = ktime_add_ms(ktime_get(), NTB_EPF_COMMAND_TIMEOUT);
110 	while (1) {
111 		timedout = ktime_after(ktime_get(), timeout);
112 		status = readw(ndev->ctrl_reg + NTB_EPF_CMD_STATUS);
113 
114 		if (status == COMMAND_STATUS_ERROR) {
115 			ret = -EINVAL;
116 			break;
117 		}
118 
119 		if (status == COMMAND_STATUS_OK)
120 			break;
121 
122 		if (WARN_ON(timedout)) {
123 			ret = -ETIMEDOUT;
124 			break;
125 		}
126 
127 		usleep_range(5, 10);
128 	}
129 
130 	writew(0, ndev->ctrl_reg + NTB_EPF_CMD_STATUS);
131 	mutex_unlock(&ndev->cmd_lock);
132 
133 	return ret;
134 }
135 
136 static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx)
137 {
138 	struct device *dev = ndev->dev;
139 
140 	if (idx < 0 || idx > ndev->mw_count) {
141 		dev_err(dev, "Unsupported Memory Window index %d\n", idx);
142 		return -EINVAL;
143 	}
144 
145 	return idx + 2;
146 }
147 
148 static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
149 {
150 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
151 	struct device *dev = ndev->dev;
152 
153 	if (pidx != NTB_DEF_PEER_IDX) {
154 		dev_err(dev, "Unsupported Peer ID %d\n", pidx);
155 		return -EINVAL;
156 	}
157 
158 	return ndev->mw_count;
159 }
160 
161 static int ntb_epf_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
162 				resource_size_t *addr_align,
163 				resource_size_t *size_align,
164 				resource_size_t *size_max)
165 {
166 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
167 	struct device *dev = ndev->dev;
168 	int bar;
169 
170 	if (pidx != NTB_DEF_PEER_IDX) {
171 		dev_err(dev, "Unsupported Peer ID %d\n", pidx);
172 		return -EINVAL;
173 	}
174 
175 	bar = ntb_epf_mw_to_bar(ndev, idx);
176 	if (bar < 0)
177 		return bar;
178 
179 	if (addr_align)
180 		*addr_align = SZ_4K;
181 
182 	if (size_align)
183 		*size_align = 1;
184 
185 	if (size_max)
186 		*size_max = pci_resource_len(ndev->ntb.pdev, bar);
187 
188 	return 0;
189 }
190 
191 static u64 ntb_epf_link_is_up(struct ntb_dev *ntb,
192 			      enum ntb_speed *speed,
193 			      enum ntb_width *width)
194 {
195 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
196 	u32 status;
197 
198 	status = readw(ndev->ctrl_reg + NTB_EPF_LINK_STATUS);
199 
200 	return status & LINK_STATUS_UP;
201 }
202 
203 static u32 ntb_epf_spad_read(struct ntb_dev *ntb, int idx)
204 {
205 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
206 	struct device *dev = ndev->dev;
207 	u32 offset;
208 
209 	if (idx < 0 || idx >= ndev->spad_count) {
210 		dev_err(dev, "READ: Invalid ScratchPad Index %d\n", idx);
211 		return 0;
212 	}
213 
214 	offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
215 	offset += (idx << 2);
216 
217 	return readl(ndev->ctrl_reg + offset);
218 }
219 
220 static int ntb_epf_spad_write(struct ntb_dev *ntb,
221 			      int idx, u32 val)
222 {
223 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
224 	struct device *dev = ndev->dev;
225 	u32 offset;
226 
227 	if (idx < 0 || idx >= ndev->spad_count) {
228 		dev_err(dev, "WRITE: Invalid ScratchPad Index %d\n", idx);
229 		return -EINVAL;
230 	}
231 
232 	offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
233 	offset += (idx << 2);
234 	writel(val, ndev->ctrl_reg + offset);
235 
236 	return 0;
237 }
238 
239 static u32 ntb_epf_peer_spad_read(struct ntb_dev *ntb, int pidx, int idx)
240 {
241 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
242 	struct device *dev = ndev->dev;
243 	u32 offset;
244 
245 	if (pidx != NTB_DEF_PEER_IDX) {
246 		dev_err(dev, "Unsupported Peer ID %d\n", pidx);
247 		return -EINVAL;
248 	}
249 
250 	if (idx < 0 || idx >= ndev->spad_count) {
251 		dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx);
252 		return -EINVAL;
253 	}
254 
255 	offset = (idx << 2);
256 	return readl(ndev->peer_spad_reg + offset);
257 }
258 
259 static int ntb_epf_peer_spad_write(struct ntb_dev *ntb, int pidx,
260 				   int idx, u32 val)
261 {
262 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
263 	struct device *dev = ndev->dev;
264 	u32 offset;
265 
266 	if (pidx != NTB_DEF_PEER_IDX) {
267 		dev_err(dev, "Unsupported Peer ID %d\n", pidx);
268 		return -EINVAL;
269 	}
270 
271 	if (idx < 0 || idx >= ndev->spad_count) {
272 		dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx);
273 		return -EINVAL;
274 	}
275 
276 	offset = (idx << 2);
277 	writel(val, ndev->peer_spad_reg + offset);
278 
279 	return 0;
280 }
281 
282 static int ntb_epf_link_enable(struct ntb_dev *ntb,
283 			       enum ntb_speed max_speed,
284 			       enum ntb_width max_width)
285 {
286 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
287 	struct device *dev = ndev->dev;
288 	int ret;
289 
290 	ret = ntb_epf_send_command(ndev, CMD_LINK_UP, 0);
291 	if (ret) {
292 		dev_err(dev, "Fail to enable link\n");
293 		return ret;
294 	}
295 
296 	return 0;
297 }
298 
299 static int ntb_epf_link_disable(struct ntb_dev *ntb)
300 {
301 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
302 	struct device *dev = ndev->dev;
303 	int ret;
304 
305 	ret = ntb_epf_send_command(ndev, CMD_LINK_DOWN, 0);
306 	if (ret) {
307 		dev_err(dev, "Fail to disable link\n");
308 		return ret;
309 	}
310 
311 	return 0;
312 }
313 
314 static irqreturn_t ntb_epf_vec_isr(int irq, void *dev)
315 {
316 	struct ntb_epf_dev *ndev = dev;
317 	int irq_no;
318 
319 	irq_no = irq - pci_irq_vector(ndev->ntb.pdev, 0);
320 	ndev->db_val = irq_no + 1;
321 
322 	if (irq_no == 0)
323 		ntb_link_event(&ndev->ntb);
324 	else
325 		ntb_db_event(&ndev->ntb, irq_no);
326 
327 	return IRQ_HANDLED;
328 }
329 
330 static int ntb_epf_init_isr(struct ntb_epf_dev *ndev, int msi_min, int msi_max)
331 {
332 	struct pci_dev *pdev = ndev->ntb.pdev;
333 	struct device *dev = ndev->dev;
334 	u32 argument = MSIX_ENABLE;
335 	int irq;
336 	int ret;
337 	int i;
338 
339 	irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, PCI_IRQ_MSIX);
340 	if (irq < 0) {
341 		dev_dbg(dev, "Failed to get MSIX interrupts\n");
342 		irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max,
343 					    PCI_IRQ_MSI);
344 		if (irq < 0) {
345 			dev_err(dev, "Failed to get MSI interrupts\n");
346 			return irq;
347 		}
348 		argument &= ~MSIX_ENABLE;
349 	}
350 
351 	for (i = 0; i < irq; i++) {
352 		ret = request_irq(pci_irq_vector(pdev, i), ntb_epf_vec_isr,
353 				  0, "ntb_epf", ndev);
354 		if (ret) {
355 			dev_err(dev, "Failed to request irq\n");
356 			goto err_request_irq;
357 		}
358 	}
359 
360 	ndev->db_count = irq - 1;
361 
362 	ret = ntb_epf_send_command(ndev, CMD_CONFIGURE_DOORBELL,
363 				   argument | irq);
364 	if (ret) {
365 		dev_err(dev, "Failed to configure doorbell\n");
366 		goto err_configure_db;
367 	}
368 
369 	return 0;
370 
371 err_configure_db:
372 	for (i = 0; i < ndev->db_count + 1; i++)
373 		free_irq(pci_irq_vector(pdev, i), ndev);
374 
375 err_request_irq:
376 	pci_free_irq_vectors(pdev);
377 
378 	return ret;
379 }
380 
381 static int ntb_epf_peer_mw_count(struct ntb_dev *ntb)
382 {
383 	return ntb_ndev(ntb)->mw_count;
384 }
385 
386 static int ntb_epf_spad_count(struct ntb_dev *ntb)
387 {
388 	return ntb_ndev(ntb)->spad_count;
389 }
390 
391 static u64 ntb_epf_db_valid_mask(struct ntb_dev *ntb)
392 {
393 	return ntb_ndev(ntb)->db_valid_mask;
394 }
395 
396 static int ntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
397 {
398 	return 0;
399 }
400 
401 static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
402 				dma_addr_t addr, resource_size_t size)
403 {
404 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
405 	struct device *dev = ndev->dev;
406 	resource_size_t mw_size;
407 	int bar;
408 
409 	if (pidx != NTB_DEF_PEER_IDX) {
410 		dev_err(dev, "Unsupported Peer ID %d\n", pidx);
411 		return -EINVAL;
412 	}
413 
414 	bar = idx + NTB_EPF_MW_OFFSET;
415 
416 	mw_size = pci_resource_len(ntb->pdev, bar);
417 
418 	if (size > mw_size) {
419 		dev_err(dev, "Size:%pa is greater than the MW size %pa\n",
420 			&size, &mw_size);
421 		return -EINVAL;
422 	}
423 
424 	writel(lower_32_bits(addr), ndev->ctrl_reg + NTB_EPF_LOWER_ADDR);
425 	writel(upper_32_bits(addr), ndev->ctrl_reg + NTB_EPF_UPPER_ADDR);
426 	writel(lower_32_bits(size), ndev->ctrl_reg + NTB_EPF_LOWER_SIZE);
427 	writel(upper_32_bits(size), ndev->ctrl_reg + NTB_EPF_UPPER_SIZE);
428 	ntb_epf_send_command(ndev, CMD_CONFIGURE_MW, idx);
429 
430 	return 0;
431 }
432 
433 static int ntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
434 {
435 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
436 	struct device *dev = ndev->dev;
437 	int ret = 0;
438 
439 	ntb_epf_send_command(ndev, CMD_TEARDOWN_MW, idx);
440 	if (ret)
441 		dev_err(dev, "Failed to teardown memory window\n");
442 
443 	return ret;
444 }
445 
446 static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
447 				    phys_addr_t *base, resource_size_t *size)
448 {
449 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
450 	u32 offset = 0;
451 	int bar;
452 
453 	if (idx == 0)
454 		offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
455 
456 	bar = idx + NTB_EPF_MW_OFFSET;
457 
458 	if (base)
459 		*base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
460 
461 	if (size)
462 		*size = pci_resource_len(ndev->ntb.pdev, bar) - offset;
463 
464 	return 0;
465 }
466 
467 static int ntb_epf_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
468 {
469 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
470 	u32 interrupt_num = ffs(db_bits) + 1;
471 	struct device *dev = ndev->dev;
472 	u32 db_entry_size;
473 	u32 db_offset;
474 	u32 db_data;
475 
476 	if (interrupt_num > ndev->db_count) {
477 		dev_err(dev, "DB interrupt %d greater than Max Supported %d\n",
478 			interrupt_num, ndev->db_count);
479 		return -EINVAL;
480 	}
481 
482 	db_entry_size = readl(ndev->ctrl_reg + NTB_EPF_DB_ENTRY_SIZE);
483 
484 	db_data = readl(ndev->ctrl_reg + NTB_EPF_DB_DATA(interrupt_num));
485 	db_offset = readl(ndev->ctrl_reg + NTB_EPF_DB_OFFSET(interrupt_num));
486 	writel(db_data, ndev->db_reg + (db_entry_size * interrupt_num) +
487 	       db_offset);
488 
489 	return 0;
490 }
491 
492 static u64 ntb_epf_db_read(struct ntb_dev *ntb)
493 {
494 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
495 
496 	return ndev->db_val;
497 }
498 
499 static int ntb_epf_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
500 {
501 	return 0;
502 }
503 
504 static int ntb_epf_db_clear(struct ntb_dev *ntb, u64 db_bits)
505 {
506 	struct ntb_epf_dev *ndev = ntb_ndev(ntb);
507 
508 	ndev->db_val = 0;
509 
510 	return 0;
511 }
512 
513 static const struct ntb_dev_ops ntb_epf_ops = {
514 	.mw_count		= ntb_epf_mw_count,
515 	.spad_count		= ntb_epf_spad_count,
516 	.peer_mw_count		= ntb_epf_peer_mw_count,
517 	.db_valid_mask		= ntb_epf_db_valid_mask,
518 	.db_set_mask		= ntb_epf_db_set_mask,
519 	.mw_set_trans		= ntb_epf_mw_set_trans,
520 	.mw_clear_trans		= ntb_epf_mw_clear_trans,
521 	.peer_mw_get_addr	= ntb_epf_peer_mw_get_addr,
522 	.link_enable		= ntb_epf_link_enable,
523 	.spad_read		= ntb_epf_spad_read,
524 	.spad_write		= ntb_epf_spad_write,
525 	.peer_spad_read		= ntb_epf_peer_spad_read,
526 	.peer_spad_write	= ntb_epf_peer_spad_write,
527 	.peer_db_set		= ntb_epf_peer_db_set,
528 	.db_read		= ntb_epf_db_read,
529 	.mw_get_align		= ntb_epf_mw_get_align,
530 	.link_is_up		= ntb_epf_link_is_up,
531 	.db_clear_mask		= ntb_epf_db_clear_mask,
532 	.db_clear		= ntb_epf_db_clear,
533 	.link_disable		= ntb_epf_link_disable,
534 };
535 
536 static inline void ntb_epf_init_struct(struct ntb_epf_dev *ndev,
537 				       struct pci_dev *pdev)
538 {
539 	ndev->ntb.pdev = pdev;
540 	ndev->ntb.topo = NTB_TOPO_NONE;
541 	ndev->ntb.ops = &ntb_epf_ops;
542 }
543 
544 static int ntb_epf_init_dev(struct ntb_epf_dev *ndev)
545 {
546 	struct device *dev = ndev->dev;
547 	int ret;
548 
549 	/* One Link interrupt and rest doorbell interrupt */
550 	ret = ntb_epf_init_isr(ndev, NTB_EPF_MIN_DB_COUNT + 1,
551 			       NTB_EPF_MAX_DB_COUNT + 1);
552 	if (ret) {
553 		dev_err(dev, "Failed to init ISR\n");
554 		return ret;
555 	}
556 
557 	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
558 	ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT);
559 	ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
560 
561 	return 0;
562 }
563 
564 static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
565 			    struct pci_dev *pdev)
566 {
567 	struct device *dev = ndev->dev;
568 	int ret;
569 
570 	pci_set_drvdata(pdev, ndev);
571 
572 	ret = pci_enable_device(pdev);
573 	if (ret) {
574 		dev_err(dev, "Cannot enable PCI device\n");
575 		goto err_pci_enable;
576 	}
577 
578 	ret = pci_request_regions(pdev, "ntb");
579 	if (ret) {
580 		dev_err(dev, "Cannot obtain PCI resources\n");
581 		goto err_pci_regions;
582 	}
583 
584 	pci_set_master(pdev);
585 
586 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
587 	if (ret) {
588 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
589 		if (ret) {
590 			dev_err(dev, "Cannot set DMA mask\n");
591 			goto err_dma_mask;
592 		}
593 		dev_warn(&pdev->dev, "Cannot DMA highmem\n");
594 	}
595 
596 	ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0);
597 	if (!ndev->ctrl_reg) {
598 		ret = -EIO;
599 		goto err_dma_mask;
600 	}
601 
602 	ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
603 	if (!ndev->peer_spad_reg) {
604 		ret = -EIO;
605 		goto err_dma_mask;
606 	}
607 
608 	ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
609 	if (!ndev->db_reg) {
610 		ret = -EIO;
611 		goto err_dma_mask;
612 	}
613 
614 	return 0;
615 
616 err_dma_mask:
617 	pci_clear_master(pdev);
618 
619 err_pci_regions:
620 	pci_disable_device(pdev);
621 
622 err_pci_enable:
623 	pci_set_drvdata(pdev, NULL);
624 
625 	return ret;
626 }
627 
628 static void ntb_epf_deinit_pci(struct ntb_epf_dev *ndev)
629 {
630 	struct pci_dev *pdev = ndev->ntb.pdev;
631 
632 	pci_iounmap(pdev, ndev->ctrl_reg);
633 	pci_iounmap(pdev, ndev->peer_spad_reg);
634 	pci_iounmap(pdev, ndev->db_reg);
635 
636 	pci_clear_master(pdev);
637 	pci_release_regions(pdev);
638 	pci_disable_device(pdev);
639 	pci_set_drvdata(pdev, NULL);
640 }
641 
642 static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev)
643 {
644 	struct pci_dev *pdev = ndev->ntb.pdev;
645 	int i;
646 
647 	ntb_epf_send_command(ndev, CMD_TEARDOWN_DOORBELL, ndev->db_count + 1);
648 
649 	for (i = 0; i < ndev->db_count + 1; i++)
650 		free_irq(pci_irq_vector(pdev, i), ndev);
651 	pci_free_irq_vectors(pdev);
652 }
653 
654 static int ntb_epf_pci_probe(struct pci_dev *pdev,
655 			     const struct pci_device_id *id)
656 {
657 	enum pci_barno peer_spad_reg_bar = BAR_1;
658 	enum pci_barno ctrl_reg_bar = BAR_0;
659 	enum pci_barno db_reg_bar = BAR_2;
660 	struct device *dev = &pdev->dev;
661 	struct ntb_epf_data *data;
662 	struct ntb_epf_dev *ndev;
663 	int ret;
664 
665 	if (pci_is_bridge(pdev))
666 		return -ENODEV;
667 
668 	ndev = devm_kzalloc(dev, sizeof(*ndev), GFP_KERNEL);
669 	if (!ndev)
670 		return -ENOMEM;
671 
672 	data = (struct ntb_epf_data *)id->driver_data;
673 	if (data) {
674 		if (data->peer_spad_reg_bar)
675 			peer_spad_reg_bar = data->peer_spad_reg_bar;
676 		if (data->ctrl_reg_bar)
677 			ctrl_reg_bar = data->ctrl_reg_bar;
678 		if (data->db_reg_bar)
679 			db_reg_bar = data->db_reg_bar;
680 	}
681 
682 	ndev->peer_spad_reg_bar = peer_spad_reg_bar;
683 	ndev->ctrl_reg_bar = ctrl_reg_bar;
684 	ndev->db_reg_bar = db_reg_bar;
685 	ndev->dev = dev;
686 
687 	ntb_epf_init_struct(ndev, pdev);
688 	mutex_init(&ndev->cmd_lock);
689 
690 	ret = ntb_epf_init_pci(ndev, pdev);
691 	if (ret) {
692 		dev_err(dev, "Failed to init PCI\n");
693 		return ret;
694 	}
695 
696 	ret = ntb_epf_init_dev(ndev);
697 	if (ret) {
698 		dev_err(dev, "Failed to init device\n");
699 		goto err_init_dev;
700 	}
701 
702 	ret = ntb_register_device(&ndev->ntb);
703 	if (ret) {
704 		dev_err(dev, "Failed to register NTB device\n");
705 		goto err_register_dev;
706 	}
707 
708 	return 0;
709 
710 err_register_dev:
711 	ntb_epf_cleanup_isr(ndev);
712 
713 err_init_dev:
714 	ntb_epf_deinit_pci(ndev);
715 
716 	return ret;
717 }
718 
719 static void ntb_epf_pci_remove(struct pci_dev *pdev)
720 {
721 	struct ntb_epf_dev *ndev = pci_get_drvdata(pdev);
722 
723 	ntb_unregister_device(&ndev->ntb);
724 	ntb_epf_cleanup_isr(ndev);
725 	ntb_epf_deinit_pci(ndev);
726 }
727 
728 static const struct ntb_epf_data j721e_data = {
729 	.ctrl_reg_bar = BAR_0,
730 	.peer_spad_reg_bar = BAR_1,
731 	.db_reg_bar = BAR_2,
732 };
733 
734 static const struct pci_device_id ntb_epf_pci_tbl[] = {
735 	{
736 		PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
737 		.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
738 		.driver_data = (kernel_ulong_t)&j721e_data,
739 	},
740 	{ },
741 };
742 
743 static struct pci_driver ntb_epf_pci_driver = {
744 	.name		= KBUILD_MODNAME,
745 	.id_table	= ntb_epf_pci_tbl,
746 	.probe		= ntb_epf_pci_probe,
747 	.remove		= ntb_epf_pci_remove,
748 };
749 module_pci_driver(ntb_epf_pci_driver);
750 
751 MODULE_DESCRIPTION("PCI ENDPOINT NTB HOST DRIVER");
752 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
753 MODULE_LICENSE("GPL v2");
754