1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 
23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24 MODULE_VERSION("0.1");
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Microsemi Corporation");
27 
28 static ulong max_mw_size = SZ_2M;
29 module_param(max_mw_size, ulong, 0644);
30 MODULE_PARM_DESC(max_mw_size,
31 	"Max memory window size reported to the upper layer");
32 
33 static bool use_lut_mws;
34 module_param(use_lut_mws, bool, 0644);
35 MODULE_PARM_DESC(use_lut_mws,
36 		 "Enable the use of the LUT based memory windows");
37 
38 #ifndef ioread64
39 #ifdef readq
40 #define ioread64 readq
41 #else
42 #define ioread64 _ioread64
43 static inline u64 _ioread64(void __iomem *mmio)
44 {
45 	u64 low, high;
46 
47 	low = ioread32(mmio);
48 	high = ioread32(mmio + sizeof(u32));
49 	return low | (high << 32);
50 }
51 #endif
52 #endif
53 
54 #ifndef iowrite64
55 #ifdef writeq
56 #define iowrite64 writeq
57 #else
58 #define iowrite64 _iowrite64
59 static inline void _iowrite64(u64 val, void __iomem *mmio)
60 {
61 	iowrite32(val, mmio);
62 	iowrite32(val >> 32, mmio + sizeof(u32));
63 }
64 #endif
65 #endif
66 
67 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
68 #define MAX_MWS     128
69 
70 struct shared_mw {
71 	u32 magic;
72 	u32 link_sta;
73 	u32 partition_id;
74 	u64 mw_sizes[MAX_MWS];
75 	u32 spad[128];
76 };
77 
78 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
79 #define LUT_SIZE SZ_64K
80 
81 struct switchtec_ntb {
82 	struct ntb_dev ntb;
83 	struct switchtec_dev *stdev;
84 
85 	int self_partition;
86 	int peer_partition;
87 
88 	int doorbell_irq;
89 	int message_irq;
90 
91 	struct ntb_info_regs __iomem *mmio_ntb;
92 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
93 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
94 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
95 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
96 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
97 
98 	struct shared_mw *self_shared;
99 	struct shared_mw __iomem *peer_shared;
100 	dma_addr_t self_shared_dma;
101 
102 	u64 db_mask;
103 	u64 db_valid_mask;
104 	int db_shift;
105 	int db_peer_shift;
106 
107 	/* synchronize rmw access of db_mask and hw reg */
108 	spinlock_t db_mask_lock;
109 
110 	int nr_direct_mw;
111 	int nr_lut_mw;
112 	int direct_mw_to_bar[MAX_DIRECT_MW];
113 
114 	int peer_nr_direct_mw;
115 	int peer_nr_lut_mw;
116 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
117 
118 	bool link_is_up;
119 	enum ntb_speed link_speed;
120 	enum ntb_width link_width;
121 };
122 
123 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
124 {
125 	return container_of(ntb, struct switchtec_ntb, ntb);
126 }
127 
128 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
129 				 struct ntb_ctrl_regs __iomem *ctl,
130 				 u32 op, int wait_status)
131 {
132 	static const char * const op_text[] = {
133 		[NTB_CTRL_PART_OP_LOCK] = "lock",
134 		[NTB_CTRL_PART_OP_CFG] = "configure",
135 		[NTB_CTRL_PART_OP_RESET] = "reset",
136 	};
137 
138 	int i;
139 	u32 ps;
140 	int status;
141 
142 	switch (op) {
143 	case NTB_CTRL_PART_OP_LOCK:
144 		status = NTB_CTRL_PART_STATUS_LOCKING;
145 		break;
146 	case NTB_CTRL_PART_OP_CFG:
147 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
148 		break;
149 	case NTB_CTRL_PART_OP_RESET:
150 		status = NTB_CTRL_PART_STATUS_RESETTING;
151 		break;
152 	default:
153 		return -EINVAL;
154 	}
155 
156 	iowrite32(op, &ctl->partition_op);
157 
158 	for (i = 0; i < 1000; i++) {
159 		if (msleep_interruptible(50) != 0) {
160 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
161 			return -EINTR;
162 		}
163 
164 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
165 
166 		if (ps != status)
167 			break;
168 	}
169 
170 	if (ps == wait_status)
171 		return 0;
172 
173 	if (ps == status) {
174 		dev_err(&sndev->stdev->dev,
175 			"Timed out while peforming %s (%d). (%08x)",
176 			op_text[op], op,
177 			ioread32(&ctl->partition_status));
178 
179 		return -ETIMEDOUT;
180 	}
181 
182 	return -EIO;
183 }
184 
185 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
186 				  u32 val)
187 {
188 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
189 		return -EINVAL;
190 
191 	iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
192 
193 	return 0;
194 }
195 
196 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
197 {
198 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
199 	int nr_direct_mw = sndev->peer_nr_direct_mw;
200 	int nr_lut_mw = sndev->peer_nr_lut_mw - 1;
201 
202 	if (pidx != NTB_DEF_PEER_IDX)
203 		return -EINVAL;
204 
205 	if (!use_lut_mws)
206 		nr_lut_mw = 0;
207 
208 	return nr_direct_mw + nr_lut_mw;
209 }
210 
211 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
212 {
213 	return mw_idx - sndev->nr_direct_mw + 1;
214 }
215 
216 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
217 {
218 	return mw_idx - sndev->peer_nr_direct_mw + 1;
219 }
220 
221 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
222 				      int widx, resource_size_t *addr_align,
223 				      resource_size_t *size_align,
224 				      resource_size_t *size_max)
225 {
226 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
227 	int lut;
228 	resource_size_t size;
229 
230 	if (pidx != NTB_DEF_PEER_IDX)
231 		return -EINVAL;
232 
233 	lut = widx >= sndev->peer_nr_direct_mw;
234 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
235 
236 	if (size == 0)
237 		return -EINVAL;
238 
239 	if (addr_align)
240 		*addr_align = lut ? size : SZ_4K;
241 
242 	if (size_align)
243 		*size_align = lut ? size : SZ_4K;
244 
245 	if (size_max)
246 		*size_max = size;
247 
248 	return 0;
249 }
250 
251 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
252 {
253 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
254 	int bar = sndev->peer_direct_mw_to_bar[idx];
255 	u32 ctl_val;
256 
257 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
258 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
259 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
260 	iowrite32(0, &ctl->bar_entry[bar].win_size);
261 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
262 }
263 
264 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
265 {
266 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
267 
268 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
269 }
270 
271 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
272 					dma_addr_t addr, resource_size_t size)
273 {
274 	int xlate_pos = ilog2(size);
275 	int bar = sndev->peer_direct_mw_to_bar[idx];
276 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
277 	u32 ctl_val;
278 
279 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
280 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
281 
282 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
283 	iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
284 	iowrite64(sndev->self_partition | addr,
285 		  &ctl->bar_entry[bar].xlate_addr);
286 }
287 
288 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
289 				     dma_addr_t addr, resource_size_t size)
290 {
291 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
292 
293 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
294 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
295 }
296 
297 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
298 				      dma_addr_t addr, resource_size_t size)
299 {
300 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
301 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
302 	int xlate_pos = ilog2(size);
303 	int nr_direct_mw = sndev->peer_nr_direct_mw;
304 	int rc;
305 
306 	if (pidx != NTB_DEF_PEER_IDX)
307 		return -EINVAL;
308 
309 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap",
310 		widx, pidx, &addr, &size);
311 
312 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
313 		return -EINVAL;
314 
315 	if (xlate_pos < 12)
316 		return -EINVAL;
317 
318 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
319 				   NTB_CTRL_PART_STATUS_LOCKED);
320 	if (rc)
321 		return rc;
322 
323 	if (addr == 0 || size == 0) {
324 		if (widx < nr_direct_mw)
325 			switchtec_ntb_mw_clr_direct(sndev, widx);
326 		else
327 			switchtec_ntb_mw_clr_lut(sndev, widx);
328 	} else {
329 		if (widx < nr_direct_mw)
330 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
331 		else
332 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
333 	}
334 
335 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
336 				   NTB_CTRL_PART_STATUS_NORMAL);
337 
338 	if (rc == -EIO) {
339 		dev_err(&sndev->stdev->dev,
340 			"Hardware reported an error configuring mw %d: %08x",
341 			widx, ioread32(&ctl->bar_error));
342 
343 		if (widx < nr_direct_mw)
344 			switchtec_ntb_mw_clr_direct(sndev, widx);
345 		else
346 			switchtec_ntb_mw_clr_lut(sndev, widx);
347 
348 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
349 				      NTB_CTRL_PART_STATUS_NORMAL);
350 	}
351 
352 	return rc;
353 }
354 
355 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
356 {
357 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
358 
359 	return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0);
360 }
361 
362 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
363 					 int idx, phys_addr_t *base,
364 					 resource_size_t *size)
365 {
366 	int bar = sndev->direct_mw_to_bar[idx];
367 	size_t offset = 0;
368 
369 	if (bar < 0)
370 		return -EINVAL;
371 
372 	if (idx == 0) {
373 		/*
374 		 * This is the direct BAR shared with the LUTs
375 		 * which means the actual window will be offset
376 		 * by the size of all the LUT entries.
377 		 */
378 
379 		offset = LUT_SIZE * sndev->nr_lut_mw;
380 	}
381 
382 	if (base)
383 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
384 
385 	if (size) {
386 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
387 		if (offset && *size > offset)
388 			*size = offset;
389 
390 		if (*size > max_mw_size)
391 			*size = max_mw_size;
392 	}
393 
394 	return 0;
395 }
396 
397 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
398 				      int idx, phys_addr_t *base,
399 				      resource_size_t *size)
400 {
401 	int bar = sndev->direct_mw_to_bar[0];
402 	int offset;
403 
404 	offset = LUT_SIZE * lut_index(sndev, idx);
405 
406 	if (base)
407 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
408 
409 	if (size)
410 		*size = LUT_SIZE;
411 
412 	return 0;
413 }
414 
415 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
416 					  phys_addr_t *base,
417 					  resource_size_t *size)
418 {
419 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
420 
421 	if (idx < sndev->nr_direct_mw)
422 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
423 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
424 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
425 	else
426 		return -EINVAL;
427 }
428 
429 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
430 					  int partition,
431 					  enum ntb_speed *speed,
432 					  enum ntb_width *width)
433 {
434 	struct switchtec_dev *stdev = sndev->stdev;
435 
436 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
437 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
438 
439 	if (speed)
440 		*speed = (linksta >> 16) & 0xF;
441 
442 	if (width)
443 		*width = (linksta >> 20) & 0x3F;
444 }
445 
446 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
447 {
448 	enum ntb_speed self_speed, peer_speed;
449 	enum ntb_width self_width, peer_width;
450 
451 	if (!sndev->link_is_up) {
452 		sndev->link_speed = NTB_SPEED_NONE;
453 		sndev->link_width = NTB_WIDTH_NONE;
454 		return;
455 	}
456 
457 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
458 				      &self_speed, &self_width);
459 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
460 				      &peer_speed, &peer_width);
461 
462 	sndev->link_speed = min(self_speed, peer_speed);
463 	sndev->link_width = min(self_width, peer_width);
464 }
465 
466 enum {
467 	LINK_MESSAGE = 0,
468 	MSG_LINK_UP = 1,
469 	MSG_LINK_DOWN = 2,
470 	MSG_CHECK_LINK = 3,
471 };
472 
473 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
474 {
475 	int link_sta;
476 	int old = sndev->link_is_up;
477 
478 	link_sta = sndev->self_shared->link_sta;
479 	if (link_sta) {
480 		u64 peer = ioread64(&sndev->peer_shared->magic);
481 
482 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
483 			link_sta = peer >> 32;
484 		else
485 			link_sta = 0;
486 	}
487 
488 	sndev->link_is_up = link_sta;
489 	switchtec_ntb_set_link_speed(sndev);
490 
491 	if (link_sta != old) {
492 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
493 		ntb_link_event(&sndev->ntb);
494 		dev_info(&sndev->stdev->dev, "ntb link %s",
495 			 link_sta ? "up" : "down");
496 	}
497 }
498 
499 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
500 {
501 	struct switchtec_ntb *sndev = stdev->sndev;
502 
503 	switchtec_ntb_check_link(sndev);
504 }
505 
506 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
507 				    enum ntb_speed *speed,
508 				    enum ntb_width *width)
509 {
510 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
511 
512 	if (speed)
513 		*speed = sndev->link_speed;
514 	if (width)
515 		*width = sndev->link_width;
516 
517 	return sndev->link_is_up;
518 }
519 
520 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
521 				     enum ntb_speed max_speed,
522 				     enum ntb_width max_width)
523 {
524 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
525 
526 	dev_dbg(&sndev->stdev->dev, "enabling link");
527 
528 	sndev->self_shared->link_sta = 1;
529 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
530 
531 	switchtec_ntb_check_link(sndev);
532 
533 	return 0;
534 }
535 
536 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
537 {
538 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
539 
540 	dev_dbg(&sndev->stdev->dev, "disabling link");
541 
542 	sndev->self_shared->link_sta = 0;
543 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
544 
545 	switchtec_ntb_check_link(sndev);
546 
547 	return 0;
548 }
549 
550 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
551 {
552 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
553 
554 	return sndev->db_valid_mask;
555 }
556 
557 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
558 {
559 	return 1;
560 }
561 
562 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
563 {
564 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
565 
566 	if (db_vector < 0 || db_vector > 1)
567 		return 0;
568 
569 	return sndev->db_valid_mask;
570 }
571 
572 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
573 {
574 	u64 ret;
575 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
576 
577 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
578 
579 	return ret & sndev->db_valid_mask;
580 }
581 
582 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
583 {
584 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
585 
586 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
587 
588 	return 0;
589 }
590 
591 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
592 {
593 	unsigned long irqflags;
594 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
595 
596 	if (db_bits & ~sndev->db_valid_mask)
597 		return -EINVAL;
598 
599 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
600 
601 	sndev->db_mask |= db_bits << sndev->db_shift;
602 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
603 
604 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
605 
606 	return 0;
607 }
608 
609 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
610 {
611 	unsigned long irqflags;
612 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
613 
614 	if (db_bits & ~sndev->db_valid_mask)
615 		return -EINVAL;
616 
617 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
618 
619 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
620 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
621 
622 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
623 
624 	return 0;
625 }
626 
627 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
628 {
629 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
630 
631 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
632 }
633 
634 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
635 				      phys_addr_t *db_addr,
636 				      resource_size_t *db_size)
637 {
638 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
639 	unsigned long offset;
640 
641 	offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
642 		(unsigned long)sndev->stdev->mmio;
643 
644 	offset += sndev->db_shift / 8;
645 
646 	if (db_addr)
647 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
648 	if (db_size)
649 		*db_size = sizeof(u32);
650 
651 	return 0;
652 }
653 
654 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
655 {
656 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
657 
658 	iowrite64(db_bits << sndev->db_peer_shift,
659 		  &sndev->mmio_self_dbmsg->odb);
660 
661 	return 0;
662 }
663 
664 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
665 {
666 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
667 
668 	return ARRAY_SIZE(sndev->self_shared->spad);
669 }
670 
671 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
672 {
673 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
674 
675 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
676 		return 0;
677 
678 	if (!sndev->self_shared)
679 		return 0;
680 
681 	return sndev->self_shared->spad[idx];
682 }
683 
684 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
685 {
686 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
687 
688 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
689 		return -EINVAL;
690 
691 	if (!sndev->self_shared)
692 		return -EIO;
693 
694 	sndev->self_shared->spad[idx] = val;
695 
696 	return 0;
697 }
698 
699 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
700 					int sidx)
701 {
702 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
703 
704 	if (pidx != NTB_DEF_PEER_IDX)
705 		return -EINVAL;
706 
707 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
708 		return 0;
709 
710 	if (!sndev->peer_shared)
711 		return 0;
712 
713 	return ioread32(&sndev->peer_shared->spad[sidx]);
714 }
715 
716 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
717 					 int sidx, u32 val)
718 {
719 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
720 
721 	if (pidx != NTB_DEF_PEER_IDX)
722 		return -EINVAL;
723 
724 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
725 		return -EINVAL;
726 
727 	if (!sndev->peer_shared)
728 		return -EIO;
729 
730 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
731 
732 	return 0;
733 }
734 
735 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
736 					int sidx, phys_addr_t *spad_addr)
737 {
738 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
739 	unsigned long offset;
740 
741 	if (pidx != NTB_DEF_PEER_IDX)
742 		return -EINVAL;
743 
744 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
745 		(unsigned long)sndev->stdev->mmio;
746 
747 	if (spad_addr)
748 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
749 
750 	return 0;
751 }
752 
753 static const struct ntb_dev_ops switchtec_ntb_ops = {
754 	.mw_count		= switchtec_ntb_mw_count,
755 	.mw_get_align		= switchtec_ntb_mw_get_align,
756 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
757 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
758 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
759 	.link_is_up		= switchtec_ntb_link_is_up,
760 	.link_enable		= switchtec_ntb_link_enable,
761 	.link_disable		= switchtec_ntb_link_disable,
762 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
763 	.db_vector_count	= switchtec_ntb_db_vector_count,
764 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
765 	.db_read		= switchtec_ntb_db_read,
766 	.db_clear		= switchtec_ntb_db_clear,
767 	.db_set_mask		= switchtec_ntb_db_set_mask,
768 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
769 	.db_read_mask		= switchtec_ntb_db_read_mask,
770 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
771 	.peer_db_set		= switchtec_ntb_peer_db_set,
772 	.spad_count		= switchtec_ntb_spad_count,
773 	.spad_read		= switchtec_ntb_spad_read,
774 	.spad_write		= switchtec_ntb_spad_write,
775 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
776 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
777 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
778 };
779 
780 static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
781 {
782 	u64 part_map;
783 
784 	sndev->ntb.pdev = sndev->stdev->pdev;
785 	sndev->ntb.topo = NTB_TOPO_SWITCH;
786 	sndev->ntb.ops = &switchtec_ntb_ops;
787 
788 	sndev->self_partition = sndev->stdev->partition;
789 
790 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
791 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
792 	part_map &= ~(1 << sndev->self_partition);
793 	sndev->peer_partition = ffs(part_map) - 1;
794 
795 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)",
796 		sndev->self_partition, sndev->stdev->partition_count,
797 		part_map);
798 
799 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
800 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
801 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
802 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
803 
804 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
805 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
806 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
807 }
808 
809 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
810 {
811 	int i;
812 	int cnt = 0;
813 
814 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
815 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
816 
817 		if (r & NTB_CTRL_BAR_VALID)
818 			map[cnt++] = i;
819 	}
820 
821 	return cnt;
822 }
823 
824 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
825 {
826 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
827 				       sndev->mmio_self_ctrl);
828 
829 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
830 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
831 
832 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut",
833 		sndev->nr_direct_mw, sndev->nr_lut_mw);
834 
835 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
836 					    sndev->mmio_peer_ctrl);
837 
838 	sndev->peer_nr_lut_mw =
839 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
840 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
841 
842 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut",
843 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
844 
845 }
846 
847 /*
848  * There are 64 doorbells in the switch hardware but this is
849  * shared among all partitions. So we must split them in half
850  * (32 for each partition). However, the message interrupts are
851  * also shared with the top 4 doorbells so we just limit this to
852  * 28 doorbells per partition
853  */
854 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
855 {
856 	sndev->db_valid_mask = 0x0FFFFFFF;
857 
858 	if (sndev->self_partition < sndev->peer_partition) {
859 		sndev->db_shift = 0;
860 		sndev->db_peer_shift = 32;
861 	} else {
862 		sndev->db_shift = 32;
863 		sndev->db_peer_shift = 0;
864 	}
865 
866 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
867 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
868 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
869 		  &sndev->mmio_self_dbmsg->odb_mask);
870 }
871 
872 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
873 {
874 	int i;
875 	u32 msg_map = 0;
876 
877 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
878 		int m = i | sndev->peer_partition << 2;
879 
880 		msg_map |= m << i * 8;
881 	}
882 
883 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
884 
885 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
886 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
887 			  &sndev->mmio_self_dbmsg->imsg[i]);
888 }
889 
890 static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
891 {
892 	int rc = 0;
893 	u16 req_id;
894 	u32 error;
895 
896 	req_id = ioread16(&sndev->mmio_ntb->requester_id);
897 
898 	if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
899 		dev_err(&sndev->stdev->dev,
900 			"Not enough requester IDs available.");
901 		return -EFAULT;
902 	}
903 
904 	rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
905 				   NTB_CTRL_PART_OP_LOCK,
906 				   NTB_CTRL_PART_STATUS_LOCKED);
907 	if (rc)
908 		return rc;
909 
910 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
911 		  &sndev->mmio_self_ctrl->partition_ctrl);
912 
913 	/*
914 	 * Root Complex Requester ID (which is 0:00.0)
915 	 */
916 	iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
917 		  &sndev->mmio_self_ctrl->req_id_table[0]);
918 
919 	/*
920 	 * Host Bridge Requester ID (as read from the mmap address)
921 	 */
922 	iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
923 		  &sndev->mmio_self_ctrl->req_id_table[1]);
924 
925 	rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
926 				   NTB_CTRL_PART_OP_CFG,
927 				   NTB_CTRL_PART_STATUS_NORMAL);
928 	if (rc == -EIO) {
929 		error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
930 		dev_err(&sndev->stdev->dev,
931 			"Error setting up the requester ID table: %08x",
932 			error);
933 	}
934 
935 	return rc;
936 }
937 
938 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
939 {
940 	int i;
941 
942 	memset(sndev->self_shared, 0, LUT_SIZE);
943 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
944 	sndev->self_shared->partition_id = sndev->stdev->partition;
945 
946 	for (i = 0; i < sndev->nr_direct_mw; i++) {
947 		int bar = sndev->direct_mw_to_bar[i];
948 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
949 
950 		if (i == 0)
951 			sz = min_t(resource_size_t, sz,
952 				   LUT_SIZE * sndev->nr_lut_mw);
953 
954 		sndev->self_shared->mw_sizes[i] = sz;
955 	}
956 
957 	for (i = 0; i < sndev->nr_lut_mw; i++) {
958 		int idx = sndev->nr_direct_mw + i;
959 
960 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
961 	}
962 }
963 
964 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
965 {
966 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
967 	int bar = sndev->direct_mw_to_bar[0];
968 	u32 ctl_val;
969 	int rc;
970 
971 	sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
972 						 LUT_SIZE,
973 						 &sndev->self_shared_dma,
974 						 GFP_KERNEL);
975 	if (!sndev->self_shared) {
976 		dev_err(&sndev->stdev->dev,
977 			"unable to allocate memory for shared mw");
978 		return -ENOMEM;
979 	}
980 
981 	switchtec_ntb_init_shared(sndev);
982 
983 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
984 				   NTB_CTRL_PART_STATUS_LOCKED);
985 	if (rc)
986 		goto unalloc_and_exit;
987 
988 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
989 	ctl_val &= 0xFF;
990 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
991 	ctl_val |= ilog2(LUT_SIZE) << 8;
992 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
993 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
994 
995 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
996 		   sndev->self_shared_dma),
997 		  &ctl->lut_entry[0]);
998 
999 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1000 				   NTB_CTRL_PART_STATUS_NORMAL);
1001 	if (rc) {
1002 		u32 bar_error, lut_error;
1003 
1004 		bar_error = ioread32(&ctl->bar_error);
1005 		lut_error = ioread32(&ctl->lut_error);
1006 		dev_err(&sndev->stdev->dev,
1007 			"Error setting up shared MW: %08x / %08x",
1008 			bar_error, lut_error);
1009 		goto unalloc_and_exit;
1010 	}
1011 
1012 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE);
1013 	if (!sndev->peer_shared) {
1014 		rc = -ENOMEM;
1015 		goto unalloc_and_exit;
1016 	}
1017 
1018 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready");
1019 	return 0;
1020 
1021 unalloc_and_exit:
1022 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1023 			  sndev->self_shared, sndev->self_shared_dma);
1024 
1025 	return rc;
1026 }
1027 
1028 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1029 {
1030 	if (sndev->peer_shared)
1031 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1032 
1033 	if (sndev->self_shared)
1034 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1035 				  sndev->self_shared,
1036 				  sndev->self_shared_dma);
1037 }
1038 
1039 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1040 {
1041 	struct switchtec_ntb *sndev = dev;
1042 
1043 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1044 
1045 	ntb_db_event(&sndev->ntb, 0);
1046 
1047 	return IRQ_HANDLED;
1048 }
1049 
1050 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1051 {
1052 	int i;
1053 	struct switchtec_ntb *sndev = dev;
1054 
1055 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1056 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1057 
1058 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1059 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i,
1060 				(u32)msg);
1061 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1062 
1063 			if (i == LINK_MESSAGE)
1064 				switchtec_ntb_check_link(sndev);
1065 		}
1066 	}
1067 
1068 	return IRQ_HANDLED;
1069 }
1070 
1071 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1072 {
1073 	int i;
1074 	int rc;
1075 	int doorbell_irq = 0;
1076 	int message_irq = 0;
1077 	int event_irq;
1078 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1079 
1080 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1081 
1082 	while (doorbell_irq == event_irq)
1083 		doorbell_irq++;
1084 	while (message_irq == doorbell_irq ||
1085 	       message_irq == event_irq)
1086 		message_irq++;
1087 
1088 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d",
1089 		event_irq, doorbell_irq, message_irq);
1090 
1091 	for (i = 0; i < idb_vecs - 4; i++)
1092 		iowrite8(doorbell_irq,
1093 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1094 
1095 	for (; i < idb_vecs; i++)
1096 		iowrite8(message_irq,
1097 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1098 
1099 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1100 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1101 
1102 	rc = request_irq(sndev->doorbell_irq,
1103 			 switchtec_ntb_doorbell_isr, 0,
1104 			 "switchtec_ntb_doorbell", sndev);
1105 	if (rc)
1106 		return rc;
1107 
1108 	rc = request_irq(sndev->message_irq,
1109 			 switchtec_ntb_message_isr, 0,
1110 			 "switchtec_ntb_message", sndev);
1111 	if (rc) {
1112 		free_irq(sndev->doorbell_irq, sndev);
1113 		return rc;
1114 	}
1115 
1116 	return 0;
1117 }
1118 
1119 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1120 {
1121 	free_irq(sndev->doorbell_irq, sndev);
1122 	free_irq(sndev->message_irq, sndev);
1123 }
1124 
1125 static int switchtec_ntb_add(struct device *dev,
1126 			     struct class_interface *class_intf)
1127 {
1128 	struct switchtec_dev *stdev = to_stdev(dev);
1129 	struct switchtec_ntb *sndev;
1130 	int rc;
1131 
1132 	stdev->sndev = NULL;
1133 
1134 	if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
1135 		return -ENODEV;
1136 
1137 	if (stdev->partition_count != 2)
1138 		dev_warn(dev, "ntb driver only supports 2 partitions");
1139 
1140 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1141 	if (!sndev)
1142 		return -ENOMEM;
1143 
1144 	sndev->stdev = stdev;
1145 	switchtec_ntb_init_sndev(sndev);
1146 	switchtec_ntb_init_mw(sndev);
1147 	switchtec_ntb_init_db(sndev);
1148 	switchtec_ntb_init_msgs(sndev);
1149 
1150 	rc = switchtec_ntb_init_req_id_table(sndev);
1151 	if (rc)
1152 		goto free_and_exit;
1153 
1154 	rc = switchtec_ntb_init_shared_mw(sndev);
1155 	if (rc)
1156 		goto free_and_exit;
1157 
1158 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1159 	if (rc)
1160 		goto deinit_shared_and_exit;
1161 
1162 	rc = ntb_register_device(&sndev->ntb);
1163 	if (rc)
1164 		goto deinit_and_exit;
1165 
1166 	stdev->sndev = sndev;
1167 	stdev->link_notifier = switchtec_ntb_link_notification;
1168 	dev_info(dev, "NTB device registered");
1169 
1170 	return 0;
1171 
1172 deinit_and_exit:
1173 	switchtec_ntb_deinit_db_msg_irq(sndev);
1174 deinit_shared_and_exit:
1175 	switchtec_ntb_deinit_shared_mw(sndev);
1176 free_and_exit:
1177 	kfree(sndev);
1178 	dev_err(dev, "failed to register ntb device: %d", rc);
1179 	return rc;
1180 }
1181 
1182 void switchtec_ntb_remove(struct device *dev,
1183 			  struct class_interface *class_intf)
1184 {
1185 	struct switchtec_dev *stdev = to_stdev(dev);
1186 	struct switchtec_ntb *sndev = stdev->sndev;
1187 
1188 	if (!sndev)
1189 		return;
1190 
1191 	stdev->link_notifier = NULL;
1192 	stdev->sndev = NULL;
1193 	ntb_unregister_device(&sndev->ntb);
1194 	switchtec_ntb_deinit_db_msg_irq(sndev);
1195 	switchtec_ntb_deinit_shared_mw(sndev);
1196 	kfree(sndev);
1197 	dev_info(dev, "ntb device unregistered");
1198 }
1199 
1200 static struct class_interface switchtec_interface  = {
1201 	.add_dev = switchtec_ntb_add,
1202 	.remove_dev = switchtec_ntb_remove,
1203 };
1204 
1205 static int __init switchtec_ntb_init(void)
1206 {
1207 	switchtec_interface.class = switchtec_class;
1208 	return class_interface_register(&switchtec_interface);
1209 }
1210 module_init(switchtec_ntb_init);
1211 
1212 static void __exit switchtec_ntb_exit(void)
1213 {
1214 	class_interface_unregister(&switchtec_interface);
1215 }
1216 module_exit(switchtec_ntb_exit);
1217