1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 
23 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
24 MODULE_VERSION("0.1");
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Microsemi Corporation");
27 
28 static ulong max_mw_size = SZ_2M;
29 module_param(max_mw_size, ulong, 0644);
30 MODULE_PARM_DESC(max_mw_size,
31 	"Max memory window size reported to the upper layer");
32 
33 static bool use_lut_mws;
34 module_param(use_lut_mws, bool, 0644);
35 MODULE_PARM_DESC(use_lut_mws,
36 		 "Enable the use of the LUT based memory windows");
37 
38 #ifndef ioread64
39 #ifdef readq
40 #define ioread64 readq
41 #else
42 #define ioread64 _ioread64
43 static inline u64 _ioread64(void __iomem *mmio)
44 {
45 	u64 low, high;
46 
47 	low = ioread32(mmio);
48 	high = ioread32(mmio + sizeof(u32));
49 	return low | (high << 32);
50 }
51 #endif
52 #endif
53 
54 #ifndef iowrite64
55 #ifdef writeq
56 #define iowrite64 writeq
57 #else
58 #define iowrite64 _iowrite64
59 static inline void _iowrite64(u64 val, void __iomem *mmio)
60 {
61 	iowrite32(val, mmio);
62 	iowrite32(val >> 32, mmio + sizeof(u32));
63 }
64 #endif
65 #endif
66 
67 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
68 #define MAX_MWS     128
69 
70 struct shared_mw {
71 	u32 magic;
72 	u32 link_sta;
73 	u32 partition_id;
74 	u64 mw_sizes[MAX_MWS];
75 	u32 spad[128];
76 };
77 
78 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
79 #define LUT_SIZE SZ_64K
80 
81 struct switchtec_ntb {
82 	struct ntb_dev ntb;
83 	struct switchtec_dev *stdev;
84 
85 	int self_partition;
86 	int peer_partition;
87 
88 	int doorbell_irq;
89 	int message_irq;
90 
91 	struct ntb_info_regs __iomem *mmio_ntb;
92 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
93 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
94 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
95 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
96 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
97 	struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
98 
99 	void __iomem *mmio_xlink_win;
100 
101 	struct shared_mw *self_shared;
102 	struct shared_mw __iomem *peer_shared;
103 	dma_addr_t self_shared_dma;
104 
105 	u64 db_mask;
106 	u64 db_valid_mask;
107 	int db_shift;
108 	int db_peer_shift;
109 
110 	/* synchronize rmw access of db_mask and hw reg */
111 	spinlock_t db_mask_lock;
112 
113 	int nr_direct_mw;
114 	int nr_lut_mw;
115 	int nr_rsvd_luts;
116 	int direct_mw_to_bar[MAX_DIRECT_MW];
117 
118 	int peer_nr_direct_mw;
119 	int peer_nr_lut_mw;
120 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
121 
122 	bool link_is_up;
123 	enum ntb_speed link_speed;
124 	enum ntb_width link_width;
125 	struct work_struct link_reinit_work;
126 };
127 
128 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
129 {
130 	return container_of(ntb, struct switchtec_ntb, ntb);
131 }
132 
133 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
134 				 struct ntb_ctrl_regs __iomem *ctl,
135 				 u32 op, int wait_status)
136 {
137 	static const char * const op_text[] = {
138 		[NTB_CTRL_PART_OP_LOCK] = "lock",
139 		[NTB_CTRL_PART_OP_CFG] = "configure",
140 		[NTB_CTRL_PART_OP_RESET] = "reset",
141 	};
142 
143 	int i;
144 	u32 ps;
145 	int status;
146 
147 	switch (op) {
148 	case NTB_CTRL_PART_OP_LOCK:
149 		status = NTB_CTRL_PART_STATUS_LOCKING;
150 		break;
151 	case NTB_CTRL_PART_OP_CFG:
152 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
153 		break;
154 	case NTB_CTRL_PART_OP_RESET:
155 		status = NTB_CTRL_PART_STATUS_RESETTING;
156 		break;
157 	default:
158 		return -EINVAL;
159 	}
160 
161 	iowrite32(op, &ctl->partition_op);
162 
163 	for (i = 0; i < 1000; i++) {
164 		if (msleep_interruptible(50) != 0) {
165 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
166 			return -EINTR;
167 		}
168 
169 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
170 
171 		if (ps != status)
172 			break;
173 	}
174 
175 	if (ps == wait_status)
176 		return 0;
177 
178 	if (ps == status) {
179 		dev_err(&sndev->stdev->dev,
180 			"Timed out while performing %s (%d). (%08x)\n",
181 			op_text[op], op,
182 			ioread32(&ctl->partition_status));
183 
184 		return -ETIMEDOUT;
185 	}
186 
187 	return -EIO;
188 }
189 
190 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
191 				  u32 val)
192 {
193 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
194 		return -EINVAL;
195 
196 	iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
197 
198 	return 0;
199 }
200 
201 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
202 {
203 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
204 	int nr_direct_mw = sndev->peer_nr_direct_mw;
205 	int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
206 
207 	if (pidx != NTB_DEF_PEER_IDX)
208 		return -EINVAL;
209 
210 	if (!use_lut_mws)
211 		nr_lut_mw = 0;
212 
213 	return nr_direct_mw + nr_lut_mw;
214 }
215 
216 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
217 {
218 	return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
219 }
220 
221 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
222 {
223 	return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
224 }
225 
226 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
227 				      int widx, resource_size_t *addr_align,
228 				      resource_size_t *size_align,
229 				      resource_size_t *size_max)
230 {
231 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
232 	int lut;
233 	resource_size_t size;
234 
235 	if (pidx != NTB_DEF_PEER_IDX)
236 		return -EINVAL;
237 
238 	lut = widx >= sndev->peer_nr_direct_mw;
239 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
240 
241 	if (size == 0)
242 		return -EINVAL;
243 
244 	if (addr_align)
245 		*addr_align = lut ? size : SZ_4K;
246 
247 	if (size_align)
248 		*size_align = lut ? size : SZ_4K;
249 
250 	if (size_max)
251 		*size_max = size;
252 
253 	return 0;
254 }
255 
256 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
257 {
258 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
259 	int bar = sndev->peer_direct_mw_to_bar[idx];
260 	u32 ctl_val;
261 
262 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
263 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
264 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
265 	iowrite32(0, &ctl->bar_entry[bar].win_size);
266 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
267 }
268 
269 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
270 {
271 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
272 
273 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
274 }
275 
276 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
277 					dma_addr_t addr, resource_size_t size)
278 {
279 	int xlate_pos = ilog2(size);
280 	int bar = sndev->peer_direct_mw_to_bar[idx];
281 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
282 	u32 ctl_val;
283 
284 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
285 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
286 
287 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
288 	iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
289 	iowrite64(sndev->self_partition | addr,
290 		  &ctl->bar_entry[bar].xlate_addr);
291 }
292 
293 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
294 				     dma_addr_t addr, resource_size_t size)
295 {
296 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
297 
298 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
299 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
300 }
301 
302 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
303 				      dma_addr_t addr, resource_size_t size)
304 {
305 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
306 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
307 	int xlate_pos = ilog2(size);
308 	int nr_direct_mw = sndev->peer_nr_direct_mw;
309 	int rc;
310 
311 	if (pidx != NTB_DEF_PEER_IDX)
312 		return -EINVAL;
313 
314 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
315 		widx, pidx, &addr, &size);
316 
317 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
318 		return -EINVAL;
319 
320 	if (xlate_pos < 12)
321 		return -EINVAL;
322 
323 	if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
324 		/*
325 		 * In certain circumstances we can get a buffer that is
326 		 * not aligned to its size. (Most of the time
327 		 * dma_alloc_coherent ensures this). This can happen when
328 		 * using large buffers allocated by the CMA
329 		 * (see CMA_CONFIG_ALIGNMENT)
330 		 */
331 		dev_err(&sndev->stdev->dev,
332 			"ERROR: Memory window address is not aligned to it's size!\n");
333 		return -EINVAL;
334 	}
335 
336 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
337 				   NTB_CTRL_PART_STATUS_LOCKED);
338 	if (rc)
339 		return rc;
340 
341 	if (addr == 0 || size == 0) {
342 		if (widx < nr_direct_mw)
343 			switchtec_ntb_mw_clr_direct(sndev, widx);
344 		else
345 			switchtec_ntb_mw_clr_lut(sndev, widx);
346 	} else {
347 		if (widx < nr_direct_mw)
348 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
349 		else
350 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
351 	}
352 
353 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
354 				   NTB_CTRL_PART_STATUS_NORMAL);
355 
356 	if (rc == -EIO) {
357 		dev_err(&sndev->stdev->dev,
358 			"Hardware reported an error configuring mw %d: %08x\n",
359 			widx, ioread32(&ctl->bar_error));
360 
361 		if (widx < nr_direct_mw)
362 			switchtec_ntb_mw_clr_direct(sndev, widx);
363 		else
364 			switchtec_ntb_mw_clr_lut(sndev, widx);
365 
366 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
367 				      NTB_CTRL_PART_STATUS_NORMAL);
368 	}
369 
370 	return rc;
371 }
372 
373 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
374 {
375 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
376 	int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
377 
378 	return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
379 }
380 
381 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
382 					 int idx, phys_addr_t *base,
383 					 resource_size_t *size)
384 {
385 	int bar = sndev->direct_mw_to_bar[idx];
386 	size_t offset = 0;
387 
388 	if (bar < 0)
389 		return -EINVAL;
390 
391 	if (idx == 0) {
392 		/*
393 		 * This is the direct BAR shared with the LUTs
394 		 * which means the actual window will be offset
395 		 * by the size of all the LUT entries.
396 		 */
397 
398 		offset = LUT_SIZE * sndev->nr_lut_mw;
399 	}
400 
401 	if (base)
402 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
403 
404 	if (size) {
405 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
406 		if (offset && *size > offset)
407 			*size = offset;
408 
409 		if (*size > max_mw_size)
410 			*size = max_mw_size;
411 	}
412 
413 	return 0;
414 }
415 
416 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
417 				      int idx, phys_addr_t *base,
418 				      resource_size_t *size)
419 {
420 	int bar = sndev->direct_mw_to_bar[0];
421 	int offset;
422 
423 	offset = LUT_SIZE * lut_index(sndev, idx);
424 
425 	if (base)
426 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
427 
428 	if (size)
429 		*size = LUT_SIZE;
430 
431 	return 0;
432 }
433 
434 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
435 					  phys_addr_t *base,
436 					  resource_size_t *size)
437 {
438 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
439 
440 	if (idx < sndev->nr_direct_mw)
441 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
442 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
443 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
444 	else
445 		return -EINVAL;
446 }
447 
448 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
449 					  int partition,
450 					  enum ntb_speed *speed,
451 					  enum ntb_width *width)
452 {
453 	struct switchtec_dev *stdev = sndev->stdev;
454 
455 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
456 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
457 
458 	if (speed)
459 		*speed = (linksta >> 16) & 0xF;
460 
461 	if (width)
462 		*width = (linksta >> 20) & 0x3F;
463 }
464 
465 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
466 {
467 	enum ntb_speed self_speed, peer_speed;
468 	enum ntb_width self_width, peer_width;
469 
470 	if (!sndev->link_is_up) {
471 		sndev->link_speed = NTB_SPEED_NONE;
472 		sndev->link_width = NTB_WIDTH_NONE;
473 		return;
474 	}
475 
476 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
477 				      &self_speed, &self_width);
478 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
479 				      &peer_speed, &peer_width);
480 
481 	sndev->link_speed = min(self_speed, peer_speed);
482 	sndev->link_width = min(self_width, peer_width);
483 }
484 
485 static int crosslink_is_enabled(struct switchtec_ntb *sndev)
486 {
487 	struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
488 
489 	return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
490 }
491 
492 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
493 {
494 	int i;
495 	u32 msg_map = 0;
496 
497 	if (!crosslink_is_enabled(sndev))
498 		return;
499 
500 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
501 		int m = i | sndev->self_partition << 2;
502 
503 		msg_map |= m << i * 8;
504 	}
505 
506 	iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
507 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
508 		  &sndev->mmio_peer_dbmsg->odb_mask);
509 }
510 
511 enum switchtec_msg {
512 	LINK_MESSAGE = 0,
513 	MSG_LINK_UP = 1,
514 	MSG_LINK_DOWN = 2,
515 	MSG_CHECK_LINK = 3,
516 	MSG_LINK_FORCE_DOWN = 4,
517 };
518 
519 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
520 
521 static void link_reinit_work(struct work_struct *work)
522 {
523 	struct switchtec_ntb *sndev;
524 
525 	sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
526 
527 	switchtec_ntb_reinit_peer(sndev);
528 }
529 
530 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
531 				     enum switchtec_msg msg)
532 {
533 	int link_sta;
534 	int old = sndev->link_is_up;
535 
536 	if (msg == MSG_LINK_FORCE_DOWN) {
537 		schedule_work(&sndev->link_reinit_work);
538 
539 		if (sndev->link_is_up) {
540 			sndev->link_is_up = 0;
541 			ntb_link_event(&sndev->ntb);
542 			dev_info(&sndev->stdev->dev, "ntb link forced down\n");
543 		}
544 
545 		return;
546 	}
547 
548 	link_sta = sndev->self_shared->link_sta;
549 	if (link_sta) {
550 		u64 peer = ioread64(&sndev->peer_shared->magic);
551 
552 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
553 			link_sta = peer >> 32;
554 		else
555 			link_sta = 0;
556 	}
557 
558 	sndev->link_is_up = link_sta;
559 	switchtec_ntb_set_link_speed(sndev);
560 
561 	if (link_sta != old) {
562 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
563 		ntb_link_event(&sndev->ntb);
564 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
565 			 link_sta ? "up" : "down");
566 
567 		if (link_sta)
568 			crosslink_init_dbmsgs(sndev);
569 	}
570 }
571 
572 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
573 {
574 	struct switchtec_ntb *sndev = stdev->sndev;
575 
576 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
577 }
578 
579 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
580 				    enum ntb_speed *speed,
581 				    enum ntb_width *width)
582 {
583 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
584 
585 	if (speed)
586 		*speed = sndev->link_speed;
587 	if (width)
588 		*width = sndev->link_width;
589 
590 	return sndev->link_is_up;
591 }
592 
593 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
594 				     enum ntb_speed max_speed,
595 				     enum ntb_width max_width)
596 {
597 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
598 
599 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
600 
601 	sndev->self_shared->link_sta = 1;
602 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
603 
604 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
605 
606 	return 0;
607 }
608 
609 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
610 {
611 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
612 
613 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
614 
615 	sndev->self_shared->link_sta = 0;
616 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
617 
618 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
619 
620 	return 0;
621 }
622 
623 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
624 {
625 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
626 
627 	return sndev->db_valid_mask;
628 }
629 
630 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
631 {
632 	return 1;
633 }
634 
635 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
636 {
637 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
638 
639 	if (db_vector < 0 || db_vector > 1)
640 		return 0;
641 
642 	return sndev->db_valid_mask;
643 }
644 
645 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
646 {
647 	u64 ret;
648 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
649 
650 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
651 
652 	return ret & sndev->db_valid_mask;
653 }
654 
655 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
656 {
657 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
658 
659 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
660 
661 	return 0;
662 }
663 
664 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
665 {
666 	unsigned long irqflags;
667 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
668 
669 	if (db_bits & ~sndev->db_valid_mask)
670 		return -EINVAL;
671 
672 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
673 
674 	sndev->db_mask |= db_bits << sndev->db_shift;
675 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
676 
677 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
678 
679 	return 0;
680 }
681 
682 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
683 {
684 	unsigned long irqflags;
685 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
686 
687 	if (db_bits & ~sndev->db_valid_mask)
688 		return -EINVAL;
689 
690 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
691 
692 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
693 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
694 
695 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
696 
697 	return 0;
698 }
699 
700 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
701 {
702 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
703 
704 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
705 }
706 
707 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
708 				      phys_addr_t *db_addr,
709 				      resource_size_t *db_size)
710 {
711 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
712 	unsigned long offset;
713 
714 	offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
715 		(unsigned long)sndev->stdev->mmio;
716 
717 	offset += sndev->db_shift / 8;
718 
719 	if (db_addr)
720 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
721 	if (db_size)
722 		*db_size = sizeof(u32);
723 
724 	return 0;
725 }
726 
727 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
728 {
729 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
730 
731 	iowrite64(db_bits << sndev->db_peer_shift,
732 		  &sndev->mmio_peer_dbmsg->odb);
733 
734 	return 0;
735 }
736 
737 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
738 {
739 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
740 
741 	return ARRAY_SIZE(sndev->self_shared->spad);
742 }
743 
744 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
745 {
746 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
747 
748 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
749 		return 0;
750 
751 	if (!sndev->self_shared)
752 		return 0;
753 
754 	return sndev->self_shared->spad[idx];
755 }
756 
757 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
758 {
759 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
760 
761 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
762 		return -EINVAL;
763 
764 	if (!sndev->self_shared)
765 		return -EIO;
766 
767 	sndev->self_shared->spad[idx] = val;
768 
769 	return 0;
770 }
771 
772 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
773 					int sidx)
774 {
775 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
776 
777 	if (pidx != NTB_DEF_PEER_IDX)
778 		return -EINVAL;
779 
780 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
781 		return 0;
782 
783 	if (!sndev->peer_shared)
784 		return 0;
785 
786 	return ioread32(&sndev->peer_shared->spad[sidx]);
787 }
788 
789 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
790 					 int sidx, u32 val)
791 {
792 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
793 
794 	if (pidx != NTB_DEF_PEER_IDX)
795 		return -EINVAL;
796 
797 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
798 		return -EINVAL;
799 
800 	if (!sndev->peer_shared)
801 		return -EIO;
802 
803 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
804 
805 	return 0;
806 }
807 
808 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
809 					int sidx, phys_addr_t *spad_addr)
810 {
811 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
812 	unsigned long offset;
813 
814 	if (pidx != NTB_DEF_PEER_IDX)
815 		return -EINVAL;
816 
817 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
818 		(unsigned long)sndev->stdev->mmio;
819 
820 	if (spad_addr)
821 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
822 
823 	return 0;
824 }
825 
826 static const struct ntb_dev_ops switchtec_ntb_ops = {
827 	.mw_count		= switchtec_ntb_mw_count,
828 	.mw_get_align		= switchtec_ntb_mw_get_align,
829 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
830 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
831 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
832 	.link_is_up		= switchtec_ntb_link_is_up,
833 	.link_enable		= switchtec_ntb_link_enable,
834 	.link_disable		= switchtec_ntb_link_disable,
835 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
836 	.db_vector_count	= switchtec_ntb_db_vector_count,
837 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
838 	.db_read		= switchtec_ntb_db_read,
839 	.db_clear		= switchtec_ntb_db_clear,
840 	.db_set_mask		= switchtec_ntb_db_set_mask,
841 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
842 	.db_read_mask		= switchtec_ntb_db_read_mask,
843 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
844 	.peer_db_set		= switchtec_ntb_peer_db_set,
845 	.spad_count		= switchtec_ntb_spad_count,
846 	.spad_read		= switchtec_ntb_spad_read,
847 	.spad_write		= switchtec_ntb_spad_write,
848 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
849 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
850 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
851 };
852 
853 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
854 {
855 	u64 tpart_vec;
856 	int self;
857 	u64 part_map;
858 	int bit;
859 
860 	sndev->ntb.pdev = sndev->stdev->pdev;
861 	sndev->ntb.topo = NTB_TOPO_SWITCH;
862 	sndev->ntb.ops = &switchtec_ntb_ops;
863 
864 	INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
865 
866 	sndev->self_partition = sndev->stdev->partition;
867 
868 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
869 
870 	self = sndev->self_partition;
871 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
872 	tpart_vec <<= 32;
873 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
874 
875 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
876 	part_map &= ~(1 << sndev->self_partition);
877 
878 	if (!ffs(tpart_vec)) {
879 		if (sndev->stdev->partition_count != 2) {
880 			dev_err(&sndev->stdev->dev,
881 				"ntb target partition not defined\n");
882 			return -ENODEV;
883 		}
884 
885 		bit = ffs(part_map);
886 		if (!bit) {
887 			dev_err(&sndev->stdev->dev,
888 				"peer partition is not NT partition\n");
889 			return -ENODEV;
890 		}
891 
892 		sndev->peer_partition = bit - 1;
893 	} else {
894 		if (ffs(tpart_vec) != fls(tpart_vec)) {
895 			dev_err(&sndev->stdev->dev,
896 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
897 			return -ENODEV;
898 		}
899 
900 		sndev->peer_partition = ffs(tpart_vec) - 1;
901 		if (!(part_map & (1 << sndev->peer_partition))) {
902 			dev_err(&sndev->stdev->dev,
903 				"ntb target partition is not NT partition\n");
904 			return -ENODEV;
905 		}
906 	}
907 
908 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
909 		sndev->self_partition, sndev->stdev->partition_count);
910 
911 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
912 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
913 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
914 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
915 
916 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
917 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
918 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
919 	sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
920 
921 	return 0;
922 }
923 
924 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
925 			       struct ntb_ctrl_regs __iomem *ctl,
926 			       int lut_idx, int partition, u64 addr)
927 {
928 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
929 	u32 ctl_val;
930 	int rc;
931 
932 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
933 				   NTB_CTRL_PART_STATUS_LOCKED);
934 	if (rc)
935 		return rc;
936 
937 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
938 	ctl_val &= 0xFF;
939 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
940 	ctl_val |= ilog2(LUT_SIZE) << 8;
941 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
942 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
943 
944 	iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
945 		  &ctl->lut_entry[lut_idx]);
946 
947 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
948 				   NTB_CTRL_PART_STATUS_NORMAL);
949 	if (rc) {
950 		u32 bar_error, lut_error;
951 
952 		bar_error = ioread32(&ctl->bar_error);
953 		lut_error = ioread32(&ctl->lut_error);
954 		dev_err(&sndev->stdev->dev,
955 			"Error setting up reserved lut window: %08x / %08x\n",
956 			bar_error, lut_error);
957 		return rc;
958 	}
959 
960 	return 0;
961 }
962 
963 static int config_req_id_table(struct switchtec_ntb *sndev,
964 			       struct ntb_ctrl_regs __iomem *mmio_ctrl,
965 			       int *req_ids, int count)
966 {
967 	int i, rc = 0;
968 	u32 error;
969 	u32 proxy_id;
970 
971 	if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
972 		dev_err(&sndev->stdev->dev,
973 			"Not enough requester IDs available.\n");
974 		return -EFAULT;
975 	}
976 
977 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
978 				   NTB_CTRL_PART_OP_LOCK,
979 				   NTB_CTRL_PART_STATUS_LOCKED);
980 	if (rc)
981 		return rc;
982 
983 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
984 		  &mmio_ctrl->partition_ctrl);
985 
986 	for (i = 0; i < count; i++) {
987 		iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
988 			  &mmio_ctrl->req_id_table[i]);
989 
990 		proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
991 		dev_dbg(&sndev->stdev->dev,
992 			"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
993 			req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
994 			req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
995 			(proxy_id >> 1) & 0x7);
996 	}
997 
998 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
999 				   NTB_CTRL_PART_OP_CFG,
1000 				   NTB_CTRL_PART_STATUS_NORMAL);
1001 
1002 	if (rc == -EIO) {
1003 		error = ioread32(&mmio_ctrl->req_id_error);
1004 		dev_err(&sndev->stdev->dev,
1005 			"Error setting up the requester ID table: %08x\n",
1006 			error);
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
1013 			       u64 *mw_addrs, int mw_count)
1014 {
1015 	int rc, i;
1016 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
1017 	u64 addr;
1018 	size_t size, offset;
1019 	int bar;
1020 	int xlate_pos;
1021 	u32 ctl_val;
1022 
1023 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
1024 				   NTB_CTRL_PART_STATUS_LOCKED);
1025 	if (rc)
1026 		return rc;
1027 
1028 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1029 		if (i == ntb_lut_idx)
1030 			continue;
1031 
1032 		addr = mw_addrs[0] + LUT_SIZE * i;
1033 
1034 		iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
1035 			   addr),
1036 			  &ctl->lut_entry[i]);
1037 	}
1038 
1039 	sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
1040 
1041 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1042 		bar = sndev->direct_mw_to_bar[i];
1043 		offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
1044 		addr = mw_addrs[i] + offset;
1045 		size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
1046 		xlate_pos = ilog2(size);
1047 
1048 		if (offset && size > offset)
1049 			size = offset;
1050 
1051 		ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
1052 		ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
1053 
1054 		iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
1055 		iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
1056 		iowrite64(sndev->peer_partition | addr,
1057 			  &ctl->bar_entry[bar].xlate_addr);
1058 	}
1059 
1060 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1061 				   NTB_CTRL_PART_STATUS_NORMAL);
1062 	if (rc) {
1063 		u32 bar_error, lut_error;
1064 
1065 		bar_error = ioread32(&ctl->bar_error);
1066 		lut_error = ioread32(&ctl->lut_error);
1067 		dev_err(&sndev->stdev->dev,
1068 			"Error setting up cross link windows: %08x / %08x\n",
1069 			bar_error, lut_error);
1070 		return rc;
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
1077 	struct ntb_ctrl_regs __iomem *mmio_ctrl)
1078 {
1079 	int req_ids[16];
1080 	int i;
1081 	u32 proxy_id;
1082 
1083 	for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
1084 		proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
1085 
1086 		if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
1087 			break;
1088 
1089 		req_ids[i] = ((proxy_id >> 1) & 0xFF);
1090 	}
1091 
1092 	return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
1093 }
1094 
1095 /*
1096  * In crosslink configuration there is a virtual partition in the
1097  * middle of the two switches. The BARs in this partition have to be
1098  * enumerated and assigned addresses.
1099  */
1100 static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1101 				    u64 *bar_addrs)
1102 {
1103 	struct part_cfg_regs __iomem *part_cfg =
1104 		&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
1105 	u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
1106 	struct pff_csr_regs __iomem *mmio_pff =
1107 		&sndev->stdev->mmio_pff_csr[pff];
1108 	const u64 bar_space = 0x1000000000LL;
1109 	u64 bar_addr;
1110 	int bar_cnt = 0;
1111 	int i;
1112 
1113 	iowrite16(0x6, &mmio_pff->pcicmd);
1114 
1115 	for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
1116 		iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
1117 		bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
1118 		bar_addr &= ~0xf;
1119 
1120 		dev_dbg(&sndev->stdev->dev,
1121 			"Crosslink BAR%d addr: %llx\n",
1122 			i, bar_addr);
1123 
1124 		if (bar_addr != bar_space * i)
1125 			continue;
1126 
1127 		bar_addrs[bar_cnt++] = bar_addr;
1128 	}
1129 
1130 	return bar_cnt;
1131 }
1132 
1133 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
1134 {
1135 	int rc;
1136 	int bar = sndev->direct_mw_to_bar[0];
1137 	const int ntb_lut_idx = 1;
1138 	u64 bar_addrs[6];
1139 	u64 addr;
1140 	int offset;
1141 	int bar_cnt;
1142 
1143 	if (!crosslink_is_enabled(sndev))
1144 		return 0;
1145 
1146 	dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
1147 	sndev->ntb.topo = NTB_TOPO_CROSSLINK;
1148 
1149 	bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
1150 	if (bar_cnt < sndev->nr_direct_mw + 1) {
1151 		dev_err(&sndev->stdev->dev,
1152 			"Error enumerating crosslink partition\n");
1153 		return -EINVAL;
1154 	}
1155 
1156 	addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
1157 		SWITCHTEC_NTB_REG_DBMSG_OFFSET +
1158 		sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
1159 
1160 	offset = addr & (LUT_SIZE - 1);
1161 	addr -= offset;
1162 
1163 	rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
1164 				 sndev->peer_partition, addr);
1165 	if (rc)
1166 		return rc;
1167 
1168 	rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
1169 				 bar_cnt - 1);
1170 	if (rc)
1171 		return rc;
1172 
1173 	rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
1174 	if (rc)
1175 		return rc;
1176 
1177 	sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
1178 						LUT_SIZE, LUT_SIZE);
1179 	if (!sndev->mmio_xlink_win) {
1180 		rc = -ENOMEM;
1181 		return rc;
1182 	}
1183 
1184 	sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
1185 	sndev->nr_rsvd_luts++;
1186 
1187 	crosslink_init_dbmsgs(sndev);
1188 
1189 	return 0;
1190 }
1191 
1192 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
1193 {
1194 	if (sndev->mmio_xlink_win)
1195 		pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
1196 }
1197 
1198 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
1199 {
1200 	int i;
1201 	int cnt = 0;
1202 
1203 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
1204 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
1205 
1206 		if (r & NTB_CTRL_BAR_VALID)
1207 			map[cnt++] = i;
1208 	}
1209 
1210 	return cnt;
1211 }
1212 
1213 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
1214 {
1215 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
1216 				       sndev->mmio_self_ctrl);
1217 
1218 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
1219 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
1220 
1221 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
1222 		sndev->nr_direct_mw, sndev->nr_lut_mw);
1223 
1224 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
1225 					    sndev->mmio_peer_ctrl);
1226 
1227 	sndev->peer_nr_lut_mw =
1228 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
1229 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
1230 
1231 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
1232 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
1233 
1234 }
1235 
1236 /*
1237  * There are 64 doorbells in the switch hardware but this is
1238  * shared among all partitions. So we must split them in half
1239  * (32 for each partition). However, the message interrupts are
1240  * also shared with the top 4 doorbells so we just limit this to
1241  * 28 doorbells per partition.
1242  *
1243  * In crosslink mode, each side has it's own dbmsg register so
1244  * they can each use all 60 of the available doorbells.
1245  */
1246 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
1247 {
1248 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
1249 
1250 	if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
1251 		sndev->db_shift = 0;
1252 		sndev->db_peer_shift = 0;
1253 		sndev->db_valid_mask = sndev->db_mask;
1254 	} else if (sndev->self_partition < sndev->peer_partition) {
1255 		sndev->db_shift = 0;
1256 		sndev->db_peer_shift = 32;
1257 		sndev->db_valid_mask = 0x0FFFFFFF;
1258 	} else {
1259 		sndev->db_shift = 32;
1260 		sndev->db_peer_shift = 0;
1261 		sndev->db_valid_mask = 0x0FFFFFFF;
1262 	}
1263 
1264 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
1265 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
1266 		  &sndev->mmio_peer_dbmsg->odb_mask);
1267 
1268 	dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
1269 		sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
1270 }
1271 
1272 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1273 {
1274 	int i;
1275 	u32 msg_map = 0;
1276 
1277 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1278 		int m = i | sndev->peer_partition << 2;
1279 
1280 		msg_map |= m << i * 8;
1281 	}
1282 
1283 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1284 
1285 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1286 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1287 			  &sndev->mmio_self_dbmsg->imsg[i]);
1288 }
1289 
1290 static int
1291 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1292 {
1293 	int req_ids[2];
1294 
1295 	/*
1296 	 * Root Complex Requester ID (which is 0:00.0)
1297 	 */
1298 	req_ids[0] = 0;
1299 
1300 	/*
1301 	 * Host Bridge Requester ID (as read from the mmap address)
1302 	 */
1303 	req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1304 
1305 	return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1306 				   ARRAY_SIZE(req_ids));
1307 }
1308 
1309 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1310 {
1311 	int i;
1312 
1313 	memset(sndev->self_shared, 0, LUT_SIZE);
1314 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1315 	sndev->self_shared->partition_id = sndev->stdev->partition;
1316 
1317 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1318 		int bar = sndev->direct_mw_to_bar[i];
1319 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1320 
1321 		if (i == 0)
1322 			sz = min_t(resource_size_t, sz,
1323 				   LUT_SIZE * sndev->nr_lut_mw);
1324 
1325 		sndev->self_shared->mw_sizes[i] = sz;
1326 	}
1327 
1328 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1329 		int idx = sndev->nr_direct_mw + i;
1330 
1331 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1332 	}
1333 }
1334 
1335 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1336 {
1337 	int self_bar = sndev->direct_mw_to_bar[0];
1338 	int rc;
1339 
1340 	sndev->nr_rsvd_luts++;
1341 	sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
1342 						 LUT_SIZE,
1343 						 &sndev->self_shared_dma,
1344 						 GFP_KERNEL);
1345 	if (!sndev->self_shared) {
1346 		dev_err(&sndev->stdev->dev,
1347 			"unable to allocate memory for shared mw\n");
1348 		return -ENOMEM;
1349 	}
1350 
1351 	switchtec_ntb_init_shared(sndev);
1352 
1353 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1354 				 sndev->self_partition,
1355 				 sndev->self_shared_dma);
1356 	if (rc)
1357 		goto unalloc_and_exit;
1358 
1359 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1360 	if (!sndev->peer_shared) {
1361 		rc = -ENOMEM;
1362 		goto unalloc_and_exit;
1363 	}
1364 
1365 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1366 	return 0;
1367 
1368 unalloc_and_exit:
1369 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1370 			  sndev->self_shared, sndev->self_shared_dma);
1371 
1372 	return rc;
1373 }
1374 
1375 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1376 {
1377 	if (sndev->peer_shared)
1378 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1379 
1380 	if (sndev->self_shared)
1381 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1382 				  sndev->self_shared,
1383 				  sndev->self_shared_dma);
1384 	sndev->nr_rsvd_luts--;
1385 }
1386 
1387 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1388 {
1389 	struct switchtec_ntb *sndev = dev;
1390 
1391 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1392 
1393 	ntb_db_event(&sndev->ntb, 0);
1394 
1395 	return IRQ_HANDLED;
1396 }
1397 
1398 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1399 {
1400 	int i;
1401 	struct switchtec_ntb *sndev = dev;
1402 
1403 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1404 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1405 
1406 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1407 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1408 				i, (u32)msg);
1409 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1410 
1411 			if (i == LINK_MESSAGE)
1412 				switchtec_ntb_check_link(sndev, msg);
1413 		}
1414 	}
1415 
1416 	return IRQ_HANDLED;
1417 }
1418 
1419 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1420 {
1421 	int i;
1422 	int rc;
1423 	int doorbell_irq = 0;
1424 	int message_irq = 0;
1425 	int event_irq;
1426 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1427 
1428 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1429 
1430 	while (doorbell_irq == event_irq)
1431 		doorbell_irq++;
1432 	while (message_irq == doorbell_irq ||
1433 	       message_irq == event_irq)
1434 		message_irq++;
1435 
1436 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1437 		event_irq, doorbell_irq, message_irq);
1438 
1439 	for (i = 0; i < idb_vecs - 4; i++)
1440 		iowrite8(doorbell_irq,
1441 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1442 
1443 	for (; i < idb_vecs; i++)
1444 		iowrite8(message_irq,
1445 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1446 
1447 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1448 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1449 
1450 	rc = request_irq(sndev->doorbell_irq,
1451 			 switchtec_ntb_doorbell_isr, 0,
1452 			 "switchtec_ntb_doorbell", sndev);
1453 	if (rc)
1454 		return rc;
1455 
1456 	rc = request_irq(sndev->message_irq,
1457 			 switchtec_ntb_message_isr, 0,
1458 			 "switchtec_ntb_message", sndev);
1459 	if (rc) {
1460 		free_irq(sndev->doorbell_irq, sndev);
1461 		return rc;
1462 	}
1463 
1464 	return 0;
1465 }
1466 
1467 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1468 {
1469 	free_irq(sndev->doorbell_irq, sndev);
1470 	free_irq(sndev->message_irq, sndev);
1471 }
1472 
1473 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
1474 {
1475 	dev_info(&sndev->stdev->dev, "peer reinitialized\n");
1476 	switchtec_ntb_deinit_shared_mw(sndev);
1477 	switchtec_ntb_init_mw(sndev);
1478 	return switchtec_ntb_init_shared_mw(sndev);
1479 }
1480 
1481 static int switchtec_ntb_add(struct device *dev,
1482 			     struct class_interface *class_intf)
1483 {
1484 	struct switchtec_dev *stdev = to_stdev(dev);
1485 	struct switchtec_ntb *sndev;
1486 	int rc;
1487 
1488 	stdev->sndev = NULL;
1489 
1490 	if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
1491 		return -ENODEV;
1492 
1493 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1494 	if (!sndev)
1495 		return -ENOMEM;
1496 
1497 	sndev->stdev = stdev;
1498 	rc = switchtec_ntb_init_sndev(sndev);
1499 	if (rc)
1500 		goto free_and_exit;
1501 
1502 	switchtec_ntb_init_mw(sndev);
1503 
1504 	rc = switchtec_ntb_init_req_id_table(sndev);
1505 	if (rc)
1506 		goto free_and_exit;
1507 
1508 	rc = switchtec_ntb_init_crosslink(sndev);
1509 	if (rc)
1510 		goto free_and_exit;
1511 
1512 	switchtec_ntb_init_db(sndev);
1513 	switchtec_ntb_init_msgs(sndev);
1514 
1515 	rc = switchtec_ntb_init_shared_mw(sndev);
1516 	if (rc)
1517 		goto deinit_crosslink;
1518 
1519 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1520 	if (rc)
1521 		goto deinit_shared_and_exit;
1522 
1523 	/*
1524 	 * If this host crashed, the other host may think the link is
1525 	 * still up. Tell them to force it down (it will go back up
1526 	 * once we register the ntb device).
1527 	 */
1528 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
1529 
1530 	rc = ntb_register_device(&sndev->ntb);
1531 	if (rc)
1532 		goto deinit_and_exit;
1533 
1534 	stdev->sndev = sndev;
1535 	stdev->link_notifier = switchtec_ntb_link_notification;
1536 	dev_info(dev, "NTB device registered\n");
1537 
1538 	return 0;
1539 
1540 deinit_and_exit:
1541 	switchtec_ntb_deinit_db_msg_irq(sndev);
1542 deinit_shared_and_exit:
1543 	switchtec_ntb_deinit_shared_mw(sndev);
1544 deinit_crosslink:
1545 	switchtec_ntb_deinit_crosslink(sndev);
1546 free_and_exit:
1547 	kfree(sndev);
1548 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1549 	return rc;
1550 }
1551 
1552 static void switchtec_ntb_remove(struct device *dev,
1553 				 struct class_interface *class_intf)
1554 {
1555 	struct switchtec_dev *stdev = to_stdev(dev);
1556 	struct switchtec_ntb *sndev = stdev->sndev;
1557 
1558 	if (!sndev)
1559 		return;
1560 
1561 	stdev->link_notifier = NULL;
1562 	stdev->sndev = NULL;
1563 	ntb_unregister_device(&sndev->ntb);
1564 	switchtec_ntb_deinit_db_msg_irq(sndev);
1565 	switchtec_ntb_deinit_shared_mw(sndev);
1566 	switchtec_ntb_deinit_crosslink(sndev);
1567 	kfree(sndev);
1568 	dev_info(dev, "ntb device unregistered\n");
1569 }
1570 
1571 static struct class_interface switchtec_interface  = {
1572 	.add_dev = switchtec_ntb_add,
1573 	.remove_dev = switchtec_ntb_remove,
1574 };
1575 
1576 static int __init switchtec_ntb_init(void)
1577 {
1578 	switchtec_interface.class = switchtec_class;
1579 	return class_interface_register(&switchtec_interface);
1580 }
1581 module_init(switchtec_ntb_init);
1582 
1583 static void __exit switchtec_ntb_exit(void)
1584 {
1585 	class_interface_unregister(&switchtec_interface);
1586 }
1587 module_exit(switchtec_ntb_exit);
1588