1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/interrupt.h>
17 #include <linux/io-64-nonatomic-lo-hi.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/module.h>
21 #include <linux/ntb.h>
22 #include <linux/pci.h>
23 #include <linux/switchtec.h>
24 
25 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
26 MODULE_VERSION("0.1");
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Microsemi Corporation");
29 
30 static ulong max_mw_size = SZ_2M;
31 module_param(max_mw_size, ulong, 0644);
32 MODULE_PARM_DESC(max_mw_size,
33 	"Max memory window size reported to the upper layer");
34 
35 static bool use_lut_mws;
36 module_param(use_lut_mws, bool, 0644);
37 MODULE_PARM_DESC(use_lut_mws,
38 		 "Enable the use of the LUT based memory windows");
39 
40 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
41 #define MAX_MWS     128
42 
43 struct shared_mw {
44 	u32 magic;
45 	u32 link_sta;
46 	u32 partition_id;
47 	u64 mw_sizes[MAX_MWS];
48 	u32 spad[128];
49 };
50 
51 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
52 #define LUT_SIZE SZ_64K
53 
54 struct switchtec_ntb {
55 	struct ntb_dev ntb;
56 	struct switchtec_dev *stdev;
57 
58 	int self_partition;
59 	int peer_partition;
60 
61 	int doorbell_irq;
62 	int message_irq;
63 
64 	struct ntb_info_regs __iomem *mmio_ntb;
65 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
66 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
67 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
68 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
69 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
70 	struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
71 
72 	void __iomem *mmio_xlink_win;
73 
74 	struct shared_mw *self_shared;
75 	struct shared_mw __iomem *peer_shared;
76 	dma_addr_t self_shared_dma;
77 
78 	u64 db_mask;
79 	u64 db_valid_mask;
80 	int db_shift;
81 	int db_peer_shift;
82 
83 	/* synchronize rmw access of db_mask and hw reg */
84 	spinlock_t db_mask_lock;
85 
86 	int nr_direct_mw;
87 	int nr_lut_mw;
88 	int nr_rsvd_luts;
89 	int direct_mw_to_bar[MAX_DIRECT_MW];
90 
91 	int peer_nr_direct_mw;
92 	int peer_nr_lut_mw;
93 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
94 
95 	bool link_is_up;
96 	enum ntb_speed link_speed;
97 	enum ntb_width link_width;
98 	struct work_struct link_reinit_work;
99 };
100 
101 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
102 {
103 	return container_of(ntb, struct switchtec_ntb, ntb);
104 }
105 
106 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
107 				 struct ntb_ctrl_regs __iomem *ctl,
108 				 u32 op, int wait_status)
109 {
110 	static const char * const op_text[] = {
111 		[NTB_CTRL_PART_OP_LOCK] = "lock",
112 		[NTB_CTRL_PART_OP_CFG] = "configure",
113 		[NTB_CTRL_PART_OP_RESET] = "reset",
114 	};
115 
116 	int i;
117 	u32 ps;
118 	int status;
119 
120 	switch (op) {
121 	case NTB_CTRL_PART_OP_LOCK:
122 		status = NTB_CTRL_PART_STATUS_LOCKING;
123 		break;
124 	case NTB_CTRL_PART_OP_CFG:
125 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
126 		break;
127 	case NTB_CTRL_PART_OP_RESET:
128 		status = NTB_CTRL_PART_STATUS_RESETTING;
129 		break;
130 	default:
131 		return -EINVAL;
132 	}
133 
134 	iowrite32(op, &ctl->partition_op);
135 
136 	for (i = 0; i < 1000; i++) {
137 		if (msleep_interruptible(50) != 0) {
138 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
139 			return -EINTR;
140 		}
141 
142 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
143 
144 		if (ps != status)
145 			break;
146 	}
147 
148 	if (ps == wait_status)
149 		return 0;
150 
151 	if (ps == status) {
152 		dev_err(&sndev->stdev->dev,
153 			"Timed out while performing %s (%d). (%08x)\n",
154 			op_text[op], op,
155 			ioread32(&ctl->partition_status));
156 
157 		return -ETIMEDOUT;
158 	}
159 
160 	return -EIO;
161 }
162 
163 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
164 				  u32 val)
165 {
166 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
167 		return -EINVAL;
168 
169 	iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
170 
171 	return 0;
172 }
173 
174 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
175 {
176 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
177 	int nr_direct_mw = sndev->peer_nr_direct_mw;
178 	int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
179 
180 	if (pidx != NTB_DEF_PEER_IDX)
181 		return -EINVAL;
182 
183 	if (!use_lut_mws)
184 		nr_lut_mw = 0;
185 
186 	return nr_direct_mw + nr_lut_mw;
187 }
188 
189 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
190 {
191 	return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
192 }
193 
194 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
195 {
196 	return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
197 }
198 
199 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
200 				      int widx, resource_size_t *addr_align,
201 				      resource_size_t *size_align,
202 				      resource_size_t *size_max)
203 {
204 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
205 	int lut;
206 	resource_size_t size;
207 
208 	if (pidx != NTB_DEF_PEER_IDX)
209 		return -EINVAL;
210 
211 	lut = widx >= sndev->peer_nr_direct_mw;
212 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
213 
214 	if (size == 0)
215 		return -EINVAL;
216 
217 	if (addr_align)
218 		*addr_align = lut ? size : SZ_4K;
219 
220 	if (size_align)
221 		*size_align = lut ? size : SZ_4K;
222 
223 	if (size_max)
224 		*size_max = size;
225 
226 	return 0;
227 }
228 
229 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
230 {
231 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
232 	int bar = sndev->peer_direct_mw_to_bar[idx];
233 	u32 ctl_val;
234 
235 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
236 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
237 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
238 	iowrite32(0, &ctl->bar_entry[bar].win_size);
239 	iowrite32(0, &ctl->bar_ext_entry[bar].win_size);
240 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
241 }
242 
243 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
244 {
245 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
246 
247 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
248 }
249 
250 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
251 					dma_addr_t addr, resource_size_t size)
252 {
253 	int xlate_pos = ilog2(size);
254 	int bar = sndev->peer_direct_mw_to_bar[idx];
255 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
256 	u32 ctl_val;
257 
258 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
259 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
260 
261 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
262 	iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
263 		  &ctl->bar_entry[bar].win_size);
264 	iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
265 	iowrite64(sndev->self_partition | addr,
266 		  &ctl->bar_entry[bar].xlate_addr);
267 }
268 
269 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
270 				     dma_addr_t addr, resource_size_t size)
271 {
272 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
273 
274 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
275 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
276 }
277 
278 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
279 				      dma_addr_t addr, resource_size_t size)
280 {
281 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
282 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
283 	int xlate_pos = ilog2(size);
284 	int nr_direct_mw = sndev->peer_nr_direct_mw;
285 	int rc;
286 
287 	if (pidx != NTB_DEF_PEER_IDX)
288 		return -EINVAL;
289 
290 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
291 		widx, pidx, &addr, &size);
292 
293 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
294 		return -EINVAL;
295 
296 	if (xlate_pos < 12)
297 		return -EINVAL;
298 
299 	if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
300 		/*
301 		 * In certain circumstances we can get a buffer that is
302 		 * not aligned to its size. (Most of the time
303 		 * dma_alloc_coherent ensures this). This can happen when
304 		 * using large buffers allocated by the CMA
305 		 * (see CMA_CONFIG_ALIGNMENT)
306 		 */
307 		dev_err(&sndev->stdev->dev,
308 			"ERROR: Memory window address is not aligned to it's size!\n");
309 		return -EINVAL;
310 	}
311 
312 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
313 				   NTB_CTRL_PART_STATUS_LOCKED);
314 	if (rc)
315 		return rc;
316 
317 	if (addr == 0 || size == 0) {
318 		if (widx < nr_direct_mw)
319 			switchtec_ntb_mw_clr_direct(sndev, widx);
320 		else
321 			switchtec_ntb_mw_clr_lut(sndev, widx);
322 	} else {
323 		if (widx < nr_direct_mw)
324 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
325 		else
326 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
327 	}
328 
329 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
330 				   NTB_CTRL_PART_STATUS_NORMAL);
331 
332 	if (rc == -EIO) {
333 		dev_err(&sndev->stdev->dev,
334 			"Hardware reported an error configuring mw %d: %08x\n",
335 			widx, ioread32(&ctl->bar_error));
336 
337 		if (widx < nr_direct_mw)
338 			switchtec_ntb_mw_clr_direct(sndev, widx);
339 		else
340 			switchtec_ntb_mw_clr_lut(sndev, widx);
341 
342 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
343 				      NTB_CTRL_PART_STATUS_NORMAL);
344 	}
345 
346 	return rc;
347 }
348 
349 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
350 {
351 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
352 	int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
353 
354 	return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
355 }
356 
357 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
358 					 int idx, phys_addr_t *base,
359 					 resource_size_t *size)
360 {
361 	int bar = sndev->direct_mw_to_bar[idx];
362 	size_t offset = 0;
363 
364 	if (bar < 0)
365 		return -EINVAL;
366 
367 	if (idx == 0) {
368 		/*
369 		 * This is the direct BAR shared with the LUTs
370 		 * which means the actual window will be offset
371 		 * by the size of all the LUT entries.
372 		 */
373 
374 		offset = LUT_SIZE * sndev->nr_lut_mw;
375 	}
376 
377 	if (base)
378 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
379 
380 	if (size) {
381 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
382 		if (offset && *size > offset)
383 			*size = offset;
384 
385 		if (*size > max_mw_size)
386 			*size = max_mw_size;
387 	}
388 
389 	return 0;
390 }
391 
392 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
393 				      int idx, phys_addr_t *base,
394 				      resource_size_t *size)
395 {
396 	int bar = sndev->direct_mw_to_bar[0];
397 	int offset;
398 
399 	offset = LUT_SIZE * lut_index(sndev, idx);
400 
401 	if (base)
402 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
403 
404 	if (size)
405 		*size = LUT_SIZE;
406 
407 	return 0;
408 }
409 
410 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
411 					  phys_addr_t *base,
412 					  resource_size_t *size)
413 {
414 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
415 
416 	if (idx < sndev->nr_direct_mw)
417 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
418 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
419 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
420 	else
421 		return -EINVAL;
422 }
423 
424 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
425 					  int partition,
426 					  enum ntb_speed *speed,
427 					  enum ntb_width *width)
428 {
429 	struct switchtec_dev *stdev = sndev->stdev;
430 
431 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
432 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
433 
434 	if (speed)
435 		*speed = (linksta >> 16) & 0xF;
436 
437 	if (width)
438 		*width = (linksta >> 20) & 0x3F;
439 }
440 
441 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
442 {
443 	enum ntb_speed self_speed, peer_speed;
444 	enum ntb_width self_width, peer_width;
445 
446 	if (!sndev->link_is_up) {
447 		sndev->link_speed = NTB_SPEED_NONE;
448 		sndev->link_width = NTB_WIDTH_NONE;
449 		return;
450 	}
451 
452 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
453 				      &self_speed, &self_width);
454 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
455 				      &peer_speed, &peer_width);
456 
457 	sndev->link_speed = min(self_speed, peer_speed);
458 	sndev->link_width = min(self_width, peer_width);
459 }
460 
461 static int crosslink_is_enabled(struct switchtec_ntb *sndev)
462 {
463 	struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
464 
465 	return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
466 }
467 
468 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
469 {
470 	int i;
471 	u32 msg_map = 0;
472 
473 	if (!crosslink_is_enabled(sndev))
474 		return;
475 
476 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
477 		int m = i | sndev->self_partition << 2;
478 
479 		msg_map |= m << i * 8;
480 	}
481 
482 	iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
483 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
484 		  &sndev->mmio_peer_dbmsg->odb_mask);
485 }
486 
487 enum switchtec_msg {
488 	LINK_MESSAGE = 0,
489 	MSG_LINK_UP = 1,
490 	MSG_LINK_DOWN = 2,
491 	MSG_CHECK_LINK = 3,
492 	MSG_LINK_FORCE_DOWN = 4,
493 };
494 
495 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
496 
497 static void link_reinit_work(struct work_struct *work)
498 {
499 	struct switchtec_ntb *sndev;
500 
501 	sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
502 
503 	switchtec_ntb_reinit_peer(sndev);
504 }
505 
506 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
507 				     enum switchtec_msg msg)
508 {
509 	int link_sta;
510 	int old = sndev->link_is_up;
511 
512 	if (msg == MSG_LINK_FORCE_DOWN) {
513 		schedule_work(&sndev->link_reinit_work);
514 
515 		if (sndev->link_is_up) {
516 			sndev->link_is_up = 0;
517 			ntb_link_event(&sndev->ntb);
518 			dev_info(&sndev->stdev->dev, "ntb link forced down\n");
519 		}
520 
521 		return;
522 	}
523 
524 	link_sta = sndev->self_shared->link_sta;
525 	if (link_sta) {
526 		u64 peer = ioread64(&sndev->peer_shared->magic);
527 
528 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
529 			link_sta = peer >> 32;
530 		else
531 			link_sta = 0;
532 	}
533 
534 	sndev->link_is_up = link_sta;
535 	switchtec_ntb_set_link_speed(sndev);
536 
537 	if (link_sta != old) {
538 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
539 		ntb_link_event(&sndev->ntb);
540 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
541 			 link_sta ? "up" : "down");
542 
543 		if (link_sta)
544 			crosslink_init_dbmsgs(sndev);
545 	}
546 }
547 
548 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
549 {
550 	struct switchtec_ntb *sndev = stdev->sndev;
551 
552 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
553 }
554 
555 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
556 				    enum ntb_speed *speed,
557 				    enum ntb_width *width)
558 {
559 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
560 
561 	if (speed)
562 		*speed = sndev->link_speed;
563 	if (width)
564 		*width = sndev->link_width;
565 
566 	return sndev->link_is_up;
567 }
568 
569 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
570 				     enum ntb_speed max_speed,
571 				     enum ntb_width max_width)
572 {
573 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
574 
575 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
576 
577 	sndev->self_shared->link_sta = 1;
578 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
579 
580 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
581 
582 	return 0;
583 }
584 
585 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
586 {
587 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
588 
589 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
590 
591 	sndev->self_shared->link_sta = 0;
592 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
593 
594 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
595 
596 	return 0;
597 }
598 
599 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
600 {
601 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
602 
603 	return sndev->db_valid_mask;
604 }
605 
606 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
607 {
608 	return 1;
609 }
610 
611 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
612 {
613 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
614 
615 	if (db_vector < 0 || db_vector > 1)
616 		return 0;
617 
618 	return sndev->db_valid_mask;
619 }
620 
621 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
622 {
623 	u64 ret;
624 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
625 
626 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
627 
628 	return ret & sndev->db_valid_mask;
629 }
630 
631 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
632 {
633 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
634 
635 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
636 
637 	return 0;
638 }
639 
640 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
641 {
642 	unsigned long irqflags;
643 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
644 
645 	if (db_bits & ~sndev->db_valid_mask)
646 		return -EINVAL;
647 
648 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
649 
650 	sndev->db_mask |= db_bits << sndev->db_shift;
651 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
652 
653 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
654 
655 	return 0;
656 }
657 
658 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
659 {
660 	unsigned long irqflags;
661 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
662 
663 	if (db_bits & ~sndev->db_valid_mask)
664 		return -EINVAL;
665 
666 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
667 
668 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
669 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
670 
671 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
672 
673 	return 0;
674 }
675 
676 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
677 {
678 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
679 
680 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
681 }
682 
683 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
684 				      phys_addr_t *db_addr,
685 				      resource_size_t *db_size,
686 				      u64 *db_data,
687 				      int db_bit)
688 {
689 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
690 	unsigned long offset;
691 
692 	if (unlikely(db_bit >= BITS_PER_LONG_LONG))
693 		return -EINVAL;
694 
695 	offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
696 		(unsigned long)sndev->stdev->mmio;
697 
698 	offset += sndev->db_shift / 8;
699 
700 	if (db_addr)
701 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
702 	if (db_size)
703 		*db_size = sizeof(u32);
704 	if (db_data)
705 		*db_data = BIT_ULL(db_bit) << sndev->db_peer_shift;
706 
707 	return 0;
708 }
709 
710 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
711 {
712 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
713 
714 	iowrite64(db_bits << sndev->db_peer_shift,
715 		  &sndev->mmio_peer_dbmsg->odb);
716 
717 	return 0;
718 }
719 
720 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
721 {
722 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
723 
724 	return ARRAY_SIZE(sndev->self_shared->spad);
725 }
726 
727 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
728 {
729 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
730 
731 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
732 		return 0;
733 
734 	if (!sndev->self_shared)
735 		return 0;
736 
737 	return sndev->self_shared->spad[idx];
738 }
739 
740 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
741 {
742 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
743 
744 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
745 		return -EINVAL;
746 
747 	if (!sndev->self_shared)
748 		return -EIO;
749 
750 	sndev->self_shared->spad[idx] = val;
751 
752 	return 0;
753 }
754 
755 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
756 					int sidx)
757 {
758 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
759 
760 	if (pidx != NTB_DEF_PEER_IDX)
761 		return -EINVAL;
762 
763 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
764 		return 0;
765 
766 	if (!sndev->peer_shared)
767 		return 0;
768 
769 	return ioread32(&sndev->peer_shared->spad[sidx]);
770 }
771 
772 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
773 					 int sidx, u32 val)
774 {
775 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
776 
777 	if (pidx != NTB_DEF_PEER_IDX)
778 		return -EINVAL;
779 
780 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
781 		return -EINVAL;
782 
783 	if (!sndev->peer_shared)
784 		return -EIO;
785 
786 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
787 
788 	return 0;
789 }
790 
791 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
792 					int sidx, phys_addr_t *spad_addr)
793 {
794 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
795 	unsigned long offset;
796 
797 	if (pidx != NTB_DEF_PEER_IDX)
798 		return -EINVAL;
799 
800 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
801 		(unsigned long)sndev->stdev->mmio;
802 
803 	if (spad_addr)
804 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
805 
806 	return 0;
807 }
808 
809 static const struct ntb_dev_ops switchtec_ntb_ops = {
810 	.mw_count		= switchtec_ntb_mw_count,
811 	.mw_get_align		= switchtec_ntb_mw_get_align,
812 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
813 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
814 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
815 	.link_is_up		= switchtec_ntb_link_is_up,
816 	.link_enable		= switchtec_ntb_link_enable,
817 	.link_disable		= switchtec_ntb_link_disable,
818 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
819 	.db_vector_count	= switchtec_ntb_db_vector_count,
820 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
821 	.db_read		= switchtec_ntb_db_read,
822 	.db_clear		= switchtec_ntb_db_clear,
823 	.db_set_mask		= switchtec_ntb_db_set_mask,
824 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
825 	.db_read_mask		= switchtec_ntb_db_read_mask,
826 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
827 	.peer_db_set		= switchtec_ntb_peer_db_set,
828 	.spad_count		= switchtec_ntb_spad_count,
829 	.spad_read		= switchtec_ntb_spad_read,
830 	.spad_write		= switchtec_ntb_spad_write,
831 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
832 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
833 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
834 };
835 
836 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
837 {
838 	u64 tpart_vec;
839 	int self;
840 	u64 part_map;
841 	int bit;
842 
843 	sndev->ntb.pdev = sndev->stdev->pdev;
844 	sndev->ntb.topo = NTB_TOPO_SWITCH;
845 	sndev->ntb.ops = &switchtec_ntb_ops;
846 
847 	INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
848 
849 	sndev->self_partition = sndev->stdev->partition;
850 
851 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
852 
853 	self = sndev->self_partition;
854 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
855 	tpart_vec <<= 32;
856 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
857 
858 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
859 	part_map &= ~(1 << sndev->self_partition);
860 
861 	if (!ffs(tpart_vec)) {
862 		if (sndev->stdev->partition_count != 2) {
863 			dev_err(&sndev->stdev->dev,
864 				"ntb target partition not defined\n");
865 			return -ENODEV;
866 		}
867 
868 		bit = ffs(part_map);
869 		if (!bit) {
870 			dev_err(&sndev->stdev->dev,
871 				"peer partition is not NT partition\n");
872 			return -ENODEV;
873 		}
874 
875 		sndev->peer_partition = bit - 1;
876 	} else {
877 		if (ffs(tpart_vec) != fls(tpart_vec)) {
878 			dev_err(&sndev->stdev->dev,
879 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
880 			return -ENODEV;
881 		}
882 
883 		sndev->peer_partition = ffs(tpart_vec) - 1;
884 		if (!(part_map & (1 << sndev->peer_partition))) {
885 			dev_err(&sndev->stdev->dev,
886 				"ntb target partition is not NT partition\n");
887 			return -ENODEV;
888 		}
889 	}
890 
891 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
892 		sndev->self_partition, sndev->stdev->partition_count);
893 
894 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
895 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
896 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
897 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
898 
899 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
900 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
901 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
902 	sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
903 
904 	return 0;
905 }
906 
907 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
908 			       struct ntb_ctrl_regs __iomem *ctl,
909 			       int lut_idx, int partition, u64 addr)
910 {
911 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
912 	u32 ctl_val;
913 	int rc;
914 
915 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
916 				   NTB_CTRL_PART_STATUS_LOCKED);
917 	if (rc)
918 		return rc;
919 
920 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
921 	ctl_val &= 0xFF;
922 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
923 	ctl_val |= ilog2(LUT_SIZE) << 8;
924 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
925 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
926 
927 	iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
928 		  &ctl->lut_entry[lut_idx]);
929 
930 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
931 				   NTB_CTRL_PART_STATUS_NORMAL);
932 	if (rc) {
933 		u32 bar_error, lut_error;
934 
935 		bar_error = ioread32(&ctl->bar_error);
936 		lut_error = ioread32(&ctl->lut_error);
937 		dev_err(&sndev->stdev->dev,
938 			"Error setting up reserved lut window: %08x / %08x\n",
939 			bar_error, lut_error);
940 		return rc;
941 	}
942 
943 	return 0;
944 }
945 
946 static int config_req_id_table(struct switchtec_ntb *sndev,
947 			       struct ntb_ctrl_regs __iomem *mmio_ctrl,
948 			       int *req_ids, int count)
949 {
950 	int i, rc = 0;
951 	u32 error;
952 	u32 proxy_id;
953 
954 	if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
955 		dev_err(&sndev->stdev->dev,
956 			"Not enough requester IDs available.\n");
957 		return -EFAULT;
958 	}
959 
960 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
961 				   NTB_CTRL_PART_OP_LOCK,
962 				   NTB_CTRL_PART_STATUS_LOCKED);
963 	if (rc)
964 		return rc;
965 
966 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
967 		  &mmio_ctrl->partition_ctrl);
968 
969 	for (i = 0; i < count; i++) {
970 		iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
971 			  &mmio_ctrl->req_id_table[i]);
972 
973 		proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
974 		dev_dbg(&sndev->stdev->dev,
975 			"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
976 			req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
977 			req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
978 			(proxy_id >> 1) & 0x7);
979 	}
980 
981 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
982 				   NTB_CTRL_PART_OP_CFG,
983 				   NTB_CTRL_PART_STATUS_NORMAL);
984 
985 	if (rc == -EIO) {
986 		error = ioread32(&mmio_ctrl->req_id_error);
987 		dev_err(&sndev->stdev->dev,
988 			"Error setting up the requester ID table: %08x\n",
989 			error);
990 	}
991 
992 	return 0;
993 }
994 
995 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
996 			       u64 *mw_addrs, int mw_count)
997 {
998 	int rc, i;
999 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
1000 	u64 addr;
1001 	size_t size, offset;
1002 	int bar;
1003 	int xlate_pos;
1004 	u32 ctl_val;
1005 
1006 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
1007 				   NTB_CTRL_PART_STATUS_LOCKED);
1008 	if (rc)
1009 		return rc;
1010 
1011 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1012 		if (i == ntb_lut_idx)
1013 			continue;
1014 
1015 		addr = mw_addrs[0] + LUT_SIZE * i;
1016 
1017 		iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
1018 			   addr),
1019 			  &ctl->lut_entry[i]);
1020 	}
1021 
1022 	sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
1023 
1024 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1025 		bar = sndev->direct_mw_to_bar[i];
1026 		offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
1027 		addr = mw_addrs[i] + offset;
1028 		size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
1029 		xlate_pos = ilog2(size);
1030 
1031 		if (offset && size > offset)
1032 			size = offset;
1033 
1034 		ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
1035 		ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
1036 
1037 		iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
1038 		iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
1039 			  &ctl->bar_entry[bar].win_size);
1040 		iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
1041 		iowrite64(sndev->peer_partition | addr,
1042 			  &ctl->bar_entry[bar].xlate_addr);
1043 	}
1044 
1045 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1046 				   NTB_CTRL_PART_STATUS_NORMAL);
1047 	if (rc) {
1048 		u32 bar_error, lut_error;
1049 
1050 		bar_error = ioread32(&ctl->bar_error);
1051 		lut_error = ioread32(&ctl->lut_error);
1052 		dev_err(&sndev->stdev->dev,
1053 			"Error setting up cross link windows: %08x / %08x\n",
1054 			bar_error, lut_error);
1055 		return rc;
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
1062 	struct ntb_ctrl_regs __iomem *mmio_ctrl)
1063 {
1064 	int req_ids[16];
1065 	int i;
1066 	u32 proxy_id;
1067 
1068 	for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
1069 		proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
1070 
1071 		if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
1072 			break;
1073 
1074 		req_ids[i] = ((proxy_id >> 1) & 0xFF);
1075 	}
1076 
1077 	return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
1078 }
1079 
1080 /*
1081  * In crosslink configuration there is a virtual partition in the
1082  * middle of the two switches. The BARs in this partition have to be
1083  * enumerated and assigned addresses.
1084  */
1085 static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1086 				    u64 *bar_addrs)
1087 {
1088 	struct part_cfg_regs __iomem *part_cfg =
1089 		&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
1090 	u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
1091 	struct pff_csr_regs __iomem *mmio_pff =
1092 		&sndev->stdev->mmio_pff_csr[pff];
1093 	const u64 bar_space = 0x1000000000LL;
1094 	u64 bar_addr;
1095 	int bar_cnt = 0;
1096 	int i;
1097 
1098 	iowrite16(0x6, &mmio_pff->pcicmd);
1099 
1100 	for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
1101 		iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
1102 		bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
1103 		bar_addr &= ~0xf;
1104 
1105 		dev_dbg(&sndev->stdev->dev,
1106 			"Crosslink BAR%d addr: %llx\n",
1107 			i*2, bar_addr);
1108 
1109 		if (bar_addr != bar_space * i)
1110 			continue;
1111 
1112 		bar_addrs[bar_cnt++] = bar_addr;
1113 	}
1114 
1115 	return bar_cnt;
1116 }
1117 
1118 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
1119 {
1120 	int rc;
1121 	int bar = sndev->direct_mw_to_bar[0];
1122 	const int ntb_lut_idx = 1;
1123 	u64 bar_addrs[6];
1124 	u64 addr;
1125 	int offset;
1126 	int bar_cnt;
1127 
1128 	if (!crosslink_is_enabled(sndev))
1129 		return 0;
1130 
1131 	dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
1132 	sndev->ntb.topo = NTB_TOPO_CROSSLINK;
1133 
1134 	bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
1135 	if (bar_cnt < sndev->nr_direct_mw + 1) {
1136 		dev_err(&sndev->stdev->dev,
1137 			"Error enumerating crosslink partition\n");
1138 		return -EINVAL;
1139 	}
1140 
1141 	addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
1142 		SWITCHTEC_NTB_REG_DBMSG_OFFSET +
1143 		sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
1144 
1145 	offset = addr & (LUT_SIZE - 1);
1146 	addr -= offset;
1147 
1148 	rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
1149 				 sndev->peer_partition, addr);
1150 	if (rc)
1151 		return rc;
1152 
1153 	rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
1154 				 bar_cnt - 1);
1155 	if (rc)
1156 		return rc;
1157 
1158 	rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
1159 	if (rc)
1160 		return rc;
1161 
1162 	sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
1163 						LUT_SIZE, LUT_SIZE);
1164 	if (!sndev->mmio_xlink_win) {
1165 		rc = -ENOMEM;
1166 		return rc;
1167 	}
1168 
1169 	sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
1170 	sndev->nr_rsvd_luts++;
1171 
1172 	crosslink_init_dbmsgs(sndev);
1173 
1174 	return 0;
1175 }
1176 
1177 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
1178 {
1179 	if (sndev->mmio_xlink_win)
1180 		pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
1181 }
1182 
1183 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
1184 {
1185 	int i;
1186 	int cnt = 0;
1187 
1188 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
1189 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
1190 
1191 		if (r & NTB_CTRL_BAR_VALID)
1192 			map[cnt++] = i;
1193 	}
1194 
1195 	return cnt;
1196 }
1197 
1198 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
1199 {
1200 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
1201 				       sndev->mmio_self_ctrl);
1202 
1203 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
1204 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
1205 
1206 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
1207 		sndev->nr_direct_mw, sndev->nr_lut_mw);
1208 
1209 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
1210 					    sndev->mmio_peer_ctrl);
1211 
1212 	sndev->peer_nr_lut_mw =
1213 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
1214 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
1215 
1216 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
1217 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
1218 
1219 }
1220 
1221 /*
1222  * There are 64 doorbells in the switch hardware but this is
1223  * shared among all partitions. So we must split them in half
1224  * (32 for each partition). However, the message interrupts are
1225  * also shared with the top 4 doorbells so we just limit this to
1226  * 28 doorbells per partition.
1227  *
1228  * In crosslink mode, each side has it's own dbmsg register so
1229  * they can each use all 60 of the available doorbells.
1230  */
1231 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
1232 {
1233 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
1234 
1235 	if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
1236 		sndev->db_shift = 0;
1237 		sndev->db_peer_shift = 0;
1238 		sndev->db_valid_mask = sndev->db_mask;
1239 	} else if (sndev->self_partition < sndev->peer_partition) {
1240 		sndev->db_shift = 0;
1241 		sndev->db_peer_shift = 32;
1242 		sndev->db_valid_mask = 0x0FFFFFFF;
1243 	} else {
1244 		sndev->db_shift = 32;
1245 		sndev->db_peer_shift = 0;
1246 		sndev->db_valid_mask = 0x0FFFFFFF;
1247 	}
1248 
1249 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
1250 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
1251 		  &sndev->mmio_peer_dbmsg->odb_mask);
1252 
1253 	dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
1254 		sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
1255 }
1256 
1257 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1258 {
1259 	int i;
1260 	u32 msg_map = 0;
1261 
1262 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1263 		int m = i | sndev->peer_partition << 2;
1264 
1265 		msg_map |= m << i * 8;
1266 	}
1267 
1268 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1269 
1270 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1271 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1272 			  &sndev->mmio_self_dbmsg->imsg[i]);
1273 }
1274 
1275 static int
1276 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1277 {
1278 	int req_ids[2];
1279 
1280 	/*
1281 	 * Root Complex Requester ID (which is 0:00.0)
1282 	 */
1283 	req_ids[0] = 0;
1284 
1285 	/*
1286 	 * Host Bridge Requester ID (as read from the mmap address)
1287 	 */
1288 	req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1289 
1290 	return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1291 				   ARRAY_SIZE(req_ids));
1292 }
1293 
1294 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1295 {
1296 	int i;
1297 
1298 	memset(sndev->self_shared, 0, LUT_SIZE);
1299 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1300 	sndev->self_shared->partition_id = sndev->stdev->partition;
1301 
1302 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1303 		int bar = sndev->direct_mw_to_bar[i];
1304 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1305 
1306 		if (i == 0)
1307 			sz = min_t(resource_size_t, sz,
1308 				   LUT_SIZE * sndev->nr_lut_mw);
1309 
1310 		sndev->self_shared->mw_sizes[i] = sz;
1311 	}
1312 
1313 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1314 		int idx = sndev->nr_direct_mw + i;
1315 
1316 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1317 	}
1318 }
1319 
1320 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1321 {
1322 	int self_bar = sndev->direct_mw_to_bar[0];
1323 	int rc;
1324 
1325 	sndev->nr_rsvd_luts++;
1326 	sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
1327 						LUT_SIZE,
1328 						&sndev->self_shared_dma,
1329 						GFP_KERNEL);
1330 	if (!sndev->self_shared) {
1331 		dev_err(&sndev->stdev->dev,
1332 			"unable to allocate memory for shared mw\n");
1333 		return -ENOMEM;
1334 	}
1335 
1336 	switchtec_ntb_init_shared(sndev);
1337 
1338 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1339 				 sndev->self_partition,
1340 				 sndev->self_shared_dma);
1341 	if (rc)
1342 		goto unalloc_and_exit;
1343 
1344 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1345 	if (!sndev->peer_shared) {
1346 		rc = -ENOMEM;
1347 		goto unalloc_and_exit;
1348 	}
1349 
1350 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1351 	return 0;
1352 
1353 unalloc_and_exit:
1354 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1355 			  sndev->self_shared, sndev->self_shared_dma);
1356 
1357 	return rc;
1358 }
1359 
1360 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1361 {
1362 	if (sndev->peer_shared)
1363 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1364 
1365 	if (sndev->self_shared)
1366 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1367 				  sndev->self_shared,
1368 				  sndev->self_shared_dma);
1369 	sndev->nr_rsvd_luts--;
1370 }
1371 
1372 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1373 {
1374 	struct switchtec_ntb *sndev = dev;
1375 
1376 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1377 
1378 	ntb_db_event(&sndev->ntb, 0);
1379 
1380 	return IRQ_HANDLED;
1381 }
1382 
1383 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1384 {
1385 	int i;
1386 	struct switchtec_ntb *sndev = dev;
1387 
1388 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1389 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1390 
1391 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1392 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1393 				i, (u32)msg);
1394 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1395 
1396 			if (i == LINK_MESSAGE)
1397 				switchtec_ntb_check_link(sndev, msg);
1398 		}
1399 	}
1400 
1401 	return IRQ_HANDLED;
1402 }
1403 
1404 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1405 {
1406 	int i;
1407 	int rc;
1408 	int doorbell_irq = 0;
1409 	int message_irq = 0;
1410 	int event_irq;
1411 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1412 
1413 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1414 
1415 	while (doorbell_irq == event_irq)
1416 		doorbell_irq++;
1417 	while (message_irq == doorbell_irq ||
1418 	       message_irq == event_irq)
1419 		message_irq++;
1420 
1421 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1422 		event_irq, doorbell_irq, message_irq);
1423 
1424 	for (i = 0; i < idb_vecs - 4; i++)
1425 		iowrite8(doorbell_irq,
1426 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1427 
1428 	for (; i < idb_vecs; i++)
1429 		iowrite8(message_irq,
1430 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1431 
1432 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1433 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1434 
1435 	rc = request_irq(sndev->doorbell_irq,
1436 			 switchtec_ntb_doorbell_isr, 0,
1437 			 "switchtec_ntb_doorbell", sndev);
1438 	if (rc)
1439 		return rc;
1440 
1441 	rc = request_irq(sndev->message_irq,
1442 			 switchtec_ntb_message_isr, 0,
1443 			 "switchtec_ntb_message", sndev);
1444 	if (rc) {
1445 		free_irq(sndev->doorbell_irq, sndev);
1446 		return rc;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1453 {
1454 	free_irq(sndev->doorbell_irq, sndev);
1455 	free_irq(sndev->message_irq, sndev);
1456 }
1457 
1458 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
1459 {
1460 	dev_info(&sndev->stdev->dev, "peer reinitialized\n");
1461 	switchtec_ntb_deinit_shared_mw(sndev);
1462 	switchtec_ntb_init_mw(sndev);
1463 	return switchtec_ntb_init_shared_mw(sndev);
1464 }
1465 
1466 static int switchtec_ntb_add(struct device *dev,
1467 			     struct class_interface *class_intf)
1468 {
1469 	struct switchtec_dev *stdev = to_stdev(dev);
1470 	struct switchtec_ntb *sndev;
1471 	int rc;
1472 
1473 	stdev->sndev = NULL;
1474 
1475 	if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
1476 		return -ENODEV;
1477 
1478 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1479 	if (!sndev)
1480 		return -ENOMEM;
1481 
1482 	sndev->stdev = stdev;
1483 	rc = switchtec_ntb_init_sndev(sndev);
1484 	if (rc)
1485 		goto free_and_exit;
1486 
1487 	switchtec_ntb_init_mw(sndev);
1488 
1489 	rc = switchtec_ntb_init_req_id_table(sndev);
1490 	if (rc)
1491 		goto free_and_exit;
1492 
1493 	rc = switchtec_ntb_init_crosslink(sndev);
1494 	if (rc)
1495 		goto free_and_exit;
1496 
1497 	switchtec_ntb_init_db(sndev);
1498 	switchtec_ntb_init_msgs(sndev);
1499 
1500 	rc = switchtec_ntb_init_shared_mw(sndev);
1501 	if (rc)
1502 		goto deinit_crosslink;
1503 
1504 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1505 	if (rc)
1506 		goto deinit_shared_and_exit;
1507 
1508 	/*
1509 	 * If this host crashed, the other host may think the link is
1510 	 * still up. Tell them to force it down (it will go back up
1511 	 * once we register the ntb device).
1512 	 */
1513 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
1514 
1515 	rc = ntb_register_device(&sndev->ntb);
1516 	if (rc)
1517 		goto deinit_and_exit;
1518 
1519 	stdev->sndev = sndev;
1520 	stdev->link_notifier = switchtec_ntb_link_notification;
1521 	dev_info(dev, "NTB device registered\n");
1522 
1523 	return 0;
1524 
1525 deinit_and_exit:
1526 	switchtec_ntb_deinit_db_msg_irq(sndev);
1527 deinit_shared_and_exit:
1528 	switchtec_ntb_deinit_shared_mw(sndev);
1529 deinit_crosslink:
1530 	switchtec_ntb_deinit_crosslink(sndev);
1531 free_and_exit:
1532 	kfree(sndev);
1533 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1534 	return rc;
1535 }
1536 
1537 static void switchtec_ntb_remove(struct device *dev,
1538 				 struct class_interface *class_intf)
1539 {
1540 	struct switchtec_dev *stdev = to_stdev(dev);
1541 	struct switchtec_ntb *sndev = stdev->sndev;
1542 
1543 	if (!sndev)
1544 		return;
1545 
1546 	stdev->link_notifier = NULL;
1547 	stdev->sndev = NULL;
1548 	ntb_unregister_device(&sndev->ntb);
1549 	switchtec_ntb_deinit_db_msg_irq(sndev);
1550 	switchtec_ntb_deinit_shared_mw(sndev);
1551 	switchtec_ntb_deinit_crosslink(sndev);
1552 	kfree(sndev);
1553 	dev_info(dev, "ntb device unregistered\n");
1554 }
1555 
1556 static struct class_interface switchtec_interface  = {
1557 	.add_dev = switchtec_ntb_add,
1558 	.remove_dev = switchtec_ntb_remove,
1559 };
1560 
1561 static int __init switchtec_ntb_init(void)
1562 {
1563 	switchtec_interface.class = switchtec_class;
1564 	return class_interface_register(&switchtec_interface);
1565 }
1566 module_init(switchtec_ntb_init);
1567 
1568 static void __exit switchtec_ntb_exit(void)
1569 {
1570 	class_interface_unregister(&switchtec_interface);
1571 }
1572 module_exit(switchtec_ntb_exit);
1573