1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp6000_pcie.c
6  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
7  *          Jason McMullan <jason.mcmullan@netronome.com>
8  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
9  *
10  * Multiplexes the NFP BARs between NFP internal resources and
11  * implements the PCIe specific interface for generic CPP bus access.
12  *
13  * The BARs are managed with refcounts and are allocated/acquired
14  * using target, token and offset/size matching.  The generic CPP bus
15  * abstraction builds upon this BAR interface.
16  */
17 
18 #include <asm/unaligned.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/kref.h>
22 #include <linux/io.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/sort.h>
26 #include <linux/sched.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 
30 #include "nfp_cpp.h"
31 
32 #include "nfp6000/nfp6000.h"
33 
34 #include "nfp6000_pcie.h"
35 
36 #define NFP_PCIE_BAR(_pf)	(0x30000 + ((_pf) & 7) * 0xc0)
37 #define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \
38 	(0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
39 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x)     (((_x) & 0x3) << 30)
40 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x)  (((_x) >> 30) & 0x3)
41 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x)          (((_x) & 0x3) << 28)
42 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x)       (((_x) >> 28) & 0x3)
43 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x)        (((_x) & 0xffffff) << 0)
44 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x)     (((_x) >> 0) & 0xffffff)
45 #define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \
46 	(0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
47 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x)      (((_x) & 0x7f) << 24)
48 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x)   (((_x) >> 24) & 0x7f)
49 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x)     (((_x) & 0x3ff) << 14)
50 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x)  (((_x) >> 14) & 0x3ff)
51 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x)        (((_x) & 0x3fff) << 0)
52 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x)     (((_x) >> 0) & 0x3fff)
53 #define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \
54 	(0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
55 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x)         (((_x) & 0xf) << 28)
56 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x)      (((_x) >> 28) & 0xf)
57 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x)         (((_x) & 0x1f) << 23)
58 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x)      (((_x) >> 23) & 0x1f)
59 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x)         (((_x) & 0x1f) << 18)
60 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x)      (((_x) >> 18) & 0x1f)
61 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x)       (((_x) & 0xff) << 10)
62 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x)    (((_x) >> 10) & 0xff)
63 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x)   (((_x) & 0x3ff) << 0)
64 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff)
65 
66 #define   NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x)  (((_x) & 0x1f) << 16)
67 #define   NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f)
68 #define   NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x)         (((_x) & 0xffff) << 0)
69 #define   NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x)      (((_x) >> 0) & 0xffff)
70 #define   NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x)        (((_x) & 0x3) << 27)
71 #define   NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x)     (((_x) >> 27) & 0x3)
72 #define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT    0
73 #define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT    1
74 #define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE    3
75 #define   NFP_PCIE_BAR_PCIE2CPP_MapType(_x)             (((_x) & 0x7) << 29)
76 #define   NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x)          (((_x) >> 29) & 0x7)
77 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED         0
78 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_BULK          1
79 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET        2
80 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL       3
81 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0     4
82 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1     5
83 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2     6
84 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3     7
85 #define   NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x)  (((_x) & 0xf) << 23)
86 #define   NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf)
87 #define   NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x)   (((_x) & 0x3) << 21)
88 #define   NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3)
89 #define NFP_PCIE_EM                                     0x020000
90 #define NFP_PCIE_SRAM                                   0x000000
91 
92 /* Minimal size of the PCIe cfg memory we depend on being mapped,
93  * queue controller and DMA controller don't have to be covered.
94  */
95 #define NFP_PCI_MIN_MAP_SIZE				0x080000
96 
97 #define NFP_PCIE_P2C_FIXED_SIZE(bar)               (1 << (bar)->bitsize)
98 #define NFP_PCIE_P2C_BULK_SIZE(bar)                (1 << (bar)->bitsize)
99 #define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
100 #define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
101 #define NFP_PCIE_P2C_GENERAL_SIZE(bar)             (1 << ((bar)->bitsize - 4))
102 
103 #define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
104 	(0x400 + ((bar) * 8 + (slot)) * 4)
105 
106 #define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
107 	(((bar) * 8 + (slot)) * 4)
108 
109 /* The number of explicit BARs to reserve.
110  * Minimum is 0, maximum is 4 on the NFP6000.
111  * The NFP3800 can have only one per PF.
112  */
113 #define NFP_PCIE_EXPLICIT_BARS		2
114 
115 struct nfp6000_pcie;
116 struct nfp6000_area_priv;
117 
118 /**
119  * struct nfp_bar - describes BAR configuration and usage
120  * @nfp:	backlink to owner
121  * @barcfg:	cached contents of BAR config CSR
122  * @base:	the BAR's base CPP offset
123  * @mask:       mask for the BAR aperture (read only)
124  * @bitsize:	bitsize of BAR aperture (read only)
125  * @index:	index of the BAR
126  * @refcnt:	number of current users
127  * @iomem:	mapped IO memory
128  * @resource:	iomem resource window
129  */
130 struct nfp_bar {
131 	struct nfp6000_pcie *nfp;
132 	u32 barcfg;
133 	u64 base;          /* CPP address base */
134 	u64 mask;          /* Bit mask of the bar */
135 	u32 bitsize;       /* Bit size of the bar */
136 	int index;
137 	atomic_t refcnt;
138 
139 	void __iomem *iomem;
140 	struct resource *resource;
141 };
142 
143 #define NFP_PCI_BAR_MAX    (PCI_64BIT_BAR_COUNT * 8)
144 
145 struct nfp6000_pcie {
146 	struct pci_dev *pdev;
147 	struct device *dev;
148 
149 	/* PCI BAR management */
150 	spinlock_t bar_lock;		/* Protect the PCI2CPP BAR cache */
151 	int bars;
152 	struct nfp_bar bar[NFP_PCI_BAR_MAX];
153 	wait_queue_head_t bar_waiters;
154 
155 	/* Reserved BAR access */
156 	struct {
157 		void __iomem *csr;
158 		void __iomem *em;
159 		void __iomem *expl[4];
160 	} iomem;
161 
162 	/* Explicit IO access */
163 	struct {
164 		struct mutex mutex; /* Lock access to this explicit group */
165 		u8 master_id;
166 		u8 signal_ref;
167 		void __iomem *data;
168 		struct {
169 			void __iomem *addr;
170 			int bitsize;
171 			int free[4];
172 		} group[4];
173 	} expl;
174 };
175 
176 static u32 nfp_bar_maptype(struct nfp_bar *bar)
177 {
178 	return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
179 }
180 
181 static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar)
182 {
183 	return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8;
184 }
185 
186 static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
187 {
188 	return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2)
189 		+ nfp_bar_resource_len(bar) * (bar->index & 7);
190 }
191 
192 #define TARGET_WIDTH_32    4
193 #define TARGET_WIDTH_64    8
194 
195 static int
196 compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
197 	    u32 *bar_config, u64 *bar_base,
198 	    int tgt, int act, int tok, u64 offset, size_t size, int width)
199 {
200 	int bitsize;
201 	u32 newcfg;
202 
203 	if (tgt >= NFP_CPP_NUM_TARGETS)
204 		return -EINVAL;
205 
206 	switch (width) {
207 	case 8:
208 		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
209 			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT);
210 		break;
211 	case 4:
212 		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
213 			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
214 		break;
215 	case 0:
216 		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
217 			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE);
218 		break;
219 	default:
220 		return -EINVAL;
221 	}
222 
223 	if (act != NFP_CPP_ACTION_RW && act != 0) {
224 		/* Fixed CPP mapping with specific action */
225 		u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
226 
227 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
228 			  NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED);
229 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
230 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act);
231 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
232 
233 		if ((offset & mask) != ((offset + size - 1) & mask))
234 			return -EINVAL;
235 		offset &= mask;
236 
237 		bitsize = 40 - 16;
238 	} else {
239 		u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
240 
241 		/* Bulk mapping */
242 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
243 			NFP_PCIE_BAR_PCIE2CPP_MapType_BULK);
244 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
245 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
246 
247 		if ((offset & mask) != ((offset + size - 1) & mask))
248 			return -EINVAL;
249 
250 		offset &= mask;
251 
252 		bitsize = 40 - 21;
253 	}
254 
255 	if (bar->bitsize < bitsize)
256 		return -EINVAL;
257 
258 	newcfg |= offset >> bitsize;
259 
260 	if (bar_base)
261 		*bar_base = offset;
262 
263 	if (bar_config)
264 		*bar_config = newcfg;
265 
266 	return 0;
267 }
268 
269 static int
270 nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
271 {
272 	int base, slot;
273 	int xbar;
274 
275 	base = bar->index >> 3;
276 	slot = bar->index & 7;
277 
278 	if (nfp->iomem.csr) {
279 		xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
280 		writel(newcfg, nfp->iomem.csr + xbar);
281 		/* Readback to ensure BAR is flushed */
282 		readl(nfp->iomem.csr + xbar);
283 	} else {
284 		xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
285 		pci_write_config_dword(nfp->pdev, xbar, newcfg);
286 	}
287 
288 	bar->barcfg = newcfg;
289 
290 	return 0;
291 }
292 
293 static int
294 reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
295 		int tgt, int act, int tok, u64 offset, size_t size, int width)
296 {
297 	u64 newbase;
298 	u32 newcfg;
299 	int err;
300 
301 	err = compute_bar(nfp, bar, &newcfg, &newbase,
302 			  tgt, act, tok, offset, size, width);
303 	if (err)
304 		return err;
305 
306 	bar->base = newbase;
307 
308 	return nfp6000_bar_write(nfp, bar, newcfg);
309 }
310 
311 /* Check if BAR can be used with the given parameters. */
312 static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
313 			u64 offset, size_t size, int width)
314 {
315 	int bartgt, baract, bartok;
316 	int barwidth;
317 	u32 maptype;
318 
319 	maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
320 	bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg);
321 	bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg);
322 	baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg);
323 
324 	barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg);
325 	switch (barwidth) {
326 	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT:
327 		barwidth = 4;
328 		break;
329 	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT:
330 		barwidth = 8;
331 		break;
332 	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE:
333 		barwidth = 0;
334 		break;
335 	default:
336 		barwidth = -1;
337 		break;
338 	}
339 
340 	switch (maptype) {
341 	case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
342 		bartok = -1;
343 		fallthrough;
344 	case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
345 		baract = NFP_CPP_ACTION_RW;
346 		if (act == 0)
347 			act = NFP_CPP_ACTION_RW;
348 		fallthrough;
349 	case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
350 		break;
351 	default:
352 		/* We don't match explicit bars through the area interface */
353 		return 0;
354 	}
355 
356 	/* Make sure to match up the width */
357 	if (barwidth != width)
358 		return 0;
359 
360 	if ((bartgt < 0 || bartgt == tgt) &&
361 	    (bartok < 0 || bartok == tok) &&
362 	    (baract == act) &&
363 	    bar->base <= offset &&
364 	    (bar->base + (1 << bar->bitsize)) >= (offset + size))
365 		return 1;
366 
367 	/* No match */
368 	return 0;
369 }
370 
371 static int
372 find_matching_bar(struct nfp6000_pcie *nfp,
373 		  u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
374 {
375 	int n;
376 
377 	for (n = 0; n < nfp->bars; n++) {
378 		struct nfp_bar *bar = &nfp->bar[n];
379 
380 		if (matching_bar(bar, tgt, act, tok, offset, size, width))
381 			return n;
382 	}
383 
384 	return -1;
385 }
386 
387 /* Return EAGAIN if no resource is available */
388 static int
389 find_unused_bar_noblock(const struct nfp6000_pcie *nfp,
390 			int tgt, int act, int tok,
391 			u64 offset, size_t size, int width)
392 {
393 	int n, busy = 0;
394 
395 	for (n = 0; n < nfp->bars; n++) {
396 		const struct nfp_bar *bar = &nfp->bar[n];
397 		int err;
398 
399 		if (!bar->bitsize)
400 			continue;
401 
402 		/* Just check to see if we can make it fit... */
403 		err = compute_bar(nfp, bar, NULL, NULL,
404 				  tgt, act, tok, offset, size, width);
405 		if (err)
406 			continue;
407 
408 		if (!atomic_read(&bar->refcnt))
409 			return n;
410 
411 		busy++;
412 	}
413 
414 	if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n",
415 		 tgt, act, tok, offset, size, width))
416 		return -EINVAL;
417 
418 	return -EAGAIN;
419 }
420 
421 static int
422 find_unused_bar_and_lock(struct nfp6000_pcie *nfp,
423 			 int tgt, int act, int tok,
424 			 u64 offset, size_t size, int width)
425 {
426 	unsigned long flags;
427 	int n;
428 
429 	spin_lock_irqsave(&nfp->bar_lock, flags);
430 
431 	n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
432 	if (n < 0)
433 		spin_unlock_irqrestore(&nfp->bar_lock, flags);
434 	else
435 		__release(&nfp->bar_lock);
436 
437 	return n;
438 }
439 
440 static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
441 {
442 	atomic_inc(&bar->refcnt);
443 }
444 
445 static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
446 {
447 	if (atomic_dec_and_test(&bar->refcnt))
448 		wake_up_interruptible(&nfp->bar_waiters);
449 }
450 
451 static int
452 nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum,
453 		 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
454 {
455 	return wait_event_interruptible(nfp->bar_waiters,
456 		(*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
457 						    offset, size, width))
458 					!= -EAGAIN);
459 }
460 
461 static int
462 nfp_alloc_bar(struct nfp6000_pcie *nfp,
463 	      u32 tgt, u32 act, u32 tok,
464 	      u64 offset, size_t size, int width, int nonblocking)
465 {
466 	unsigned long irqflags;
467 	int barnum, retval;
468 
469 	if (size > (1 << 24))
470 		return -EINVAL;
471 
472 	spin_lock_irqsave(&nfp->bar_lock, irqflags);
473 	barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
474 	if (barnum >= 0) {
475 		/* Found a perfect match. */
476 		nfp_bar_get(nfp, &nfp->bar[barnum]);
477 		spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
478 		return barnum;
479 	}
480 
481 	barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
482 					 offset, size, width);
483 	if (barnum < 0) {
484 		if (nonblocking)
485 			goto err_nobar;
486 
487 		/* Wait until a BAR becomes available.  The
488 		 * find_unused_bar function will reclaim the bar_lock
489 		 * if a free BAR is found.
490 		 */
491 		spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
492 		retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
493 					  offset, size, width);
494 		if (retval)
495 			return retval;
496 		__acquire(&nfp->bar_lock);
497 	}
498 
499 	nfp_bar_get(nfp, &nfp->bar[barnum]);
500 	retval = reconfigure_bar(nfp, &nfp->bar[barnum],
501 				 tgt, act, tok, offset, size, width);
502 	if (retval < 0) {
503 		nfp_bar_put(nfp, &nfp->bar[barnum]);
504 		barnum = retval;
505 	}
506 
507 err_nobar:
508 	spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
509 	return barnum;
510 }
511 
512 static void disable_bars(struct nfp6000_pcie *nfp);
513 
514 static int bar_cmp(const void *aptr, const void *bptr)
515 {
516 	const struct nfp_bar *a = aptr, *b = bptr;
517 
518 	if (a->bitsize == b->bitsize)
519 		return a->index - b->index;
520 	else
521 		return a->bitsize - b->bitsize;
522 }
523 
524 /* Map all PCI bars and fetch the actual BAR configurations from the
525  * board.  We assume that the BAR with the PCIe config block is
526  * already mapped.
527  *
528  * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
529  * BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA)
530  * BAR0.2: --
531  * BAR0.3: --
532  * BAR0.4: Reserved for Explicit 0.0-0.3 access
533  * BAR0.5: Reserved for Explicit 1.0-1.3 access
534  * BAR0.6: Reserved for Explicit 2.0-2.3 access
535  * BAR0.7: Reserved for Explicit 3.0-3.3 access
536  *
537  * BAR1.0-BAR1.7: --
538  * BAR2.0-BAR2.7: --
539  */
540 static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
541 {
542 	const u32 barcfg_msix_general =
543 		NFP_PCIE_BAR_PCIE2CPP_MapType(
544 			NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
545 		NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
546 	const u32 barcfg_msix_xpb =
547 		NFP_PCIE_BAR_PCIE2CPP_MapType(
548 			NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
549 		NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
550 		NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
551 			NFP_CPP_TARGET_ISLAND_XPB);
552 	const u32 barcfg_explicit[4] = {
553 		NFP_PCIE_BAR_PCIE2CPP_MapType(
554 			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0),
555 		NFP_PCIE_BAR_PCIE2CPP_MapType(
556 			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1),
557 		NFP_PCIE_BAR_PCIE2CPP_MapType(
558 			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2),
559 		NFP_PCIE_BAR_PCIE2CPP_MapType(
560 			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
561 	};
562 	char status_msg[196] = {};
563 	int i, err, bars_free;
564 	struct nfp_bar *bar;
565 	int expl_groups;
566 	char *msg, *end;
567 
568 	msg = status_msg +
569 		snprintf(status_msg, sizeof(status_msg) - 1, "RESERVED BARs: ");
570 	end = status_msg + sizeof(status_msg) - 1;
571 
572 	bar = &nfp->bar[0];
573 	for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
574 		struct resource *res;
575 
576 		res = &nfp->pdev->resource[(i >> 3) * 2];
577 
578 		/* Skip over BARs that are not IORESOURCE_MEM */
579 		if (!(resource_type(res) & IORESOURCE_MEM)) {
580 			bar--;
581 			continue;
582 		}
583 
584 		bar->resource = res;
585 		bar->barcfg = 0;
586 
587 		bar->nfp = nfp;
588 		bar->index = i;
589 		bar->mask = nfp_bar_resource_len(bar) - 1;
590 		bar->bitsize = fls(bar->mask);
591 		bar->base = 0;
592 		bar->iomem = NULL;
593 	}
594 
595 	nfp->bars = bar - &nfp->bar[0];
596 	if (nfp->bars < 8) {
597 		dev_err(nfp->dev, "No usable BARs found!\n");
598 		return -EINVAL;
599 	}
600 
601 	bars_free = nfp->bars;
602 
603 	/* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70)
604 	 */
605 	mutex_init(&nfp->expl.mutex);
606 
607 	nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4)
608 		<< 4;
609 	nfp->expl.signal_ref = 0x10;
610 
611 	/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
612 	bar = &nfp->bar[0];
613 	if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
614 		bar->iomem = ioremap(nfp_bar_resource_start(bar),
615 					     nfp_bar_resource_len(bar));
616 	if (bar->iomem) {
617 		int pf;
618 
619 		msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
620 		atomic_inc(&bar->refcnt);
621 		bars_free--;
622 
623 		nfp6000_bar_write(nfp, bar, barcfg_msix_general);
624 
625 		nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
626 
627 		switch (nfp->pdev->device) {
628 		case PCI_DEVICE_ID_NETRONOME_NFP3800:
629 			pf = nfp->pdev->devfn & 7;
630 			nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
631 			break;
632 		case PCI_DEVICE_ID_NETRONOME_NFP4000:
633 		case PCI_DEVICE_ID_NETRONOME_NFP5000:
634 		case PCI_DEVICE_ID_NETRONOME_NFP6000:
635 			nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
636 			break;
637 		default:
638 			dev_err(nfp->dev, "Unsupported device ID: %04hx!\n",
639 				nfp->pdev->device);
640 			err = -EINVAL;
641 			goto err_unmap_bar0;
642 		}
643 		nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
644 	}
645 
646 	switch (nfp->pdev->device) {
647 	case PCI_DEVICE_ID_NETRONOME_NFP3800:
648 		expl_groups = 1;
649 		break;
650 	case PCI_DEVICE_ID_NETRONOME_NFP4000:
651 	case PCI_DEVICE_ID_NETRONOME_NFP5000:
652 	case PCI_DEVICE_ID_NETRONOME_NFP6000:
653 		expl_groups = 4;
654 		break;
655 	default:
656 		dev_err(nfp->dev, "Unsupported device ID: %04hx!\n",
657 			nfp->pdev->device);
658 		err = -EINVAL;
659 		goto err_unmap_bar0;
660 	}
661 
662 	/* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
663 	bar = &nfp->bar[1];
664 	msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
665 	atomic_inc(&bar->refcnt);
666 	bars_free--;
667 
668 	nfp6000_bar_write(nfp, bar, barcfg_msix_xpb);
669 
670 	/* Use BAR0.4..BAR0.7 for EXPL IO */
671 	for (i = 0; i < 4; i++) {
672 		int j;
673 
674 		if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) {
675 			nfp->expl.group[i].bitsize = 0;
676 			continue;
677 		}
678 
679 		bar = &nfp->bar[4 + i];
680 		bar->iomem = ioremap(nfp_bar_resource_start(bar),
681 					     nfp_bar_resource_len(bar));
682 		if (bar->iomem) {
683 			msg += scnprintf(msg, end - msg,
684 					 "0.%d: Explicit%d, ", 4 + i, i);
685 			atomic_inc(&bar->refcnt);
686 			bars_free--;
687 
688 			nfp->expl.group[i].bitsize = bar->bitsize;
689 			nfp->expl.group[i].addr = bar->iomem;
690 			nfp6000_bar_write(nfp, bar, barcfg_explicit[i]);
691 
692 			for (j = 0; j < 4; j++)
693 				nfp->expl.group[i].free[j] = true;
694 		}
695 		nfp->iomem.expl[i] = bar->iomem;
696 	}
697 
698 	/* Sort bars by bit size - use the smallest possible first. */
699 	sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
700 	     bar_cmp, NULL);
701 
702 	dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars);
703 
704 	return 0;
705 
706 err_unmap_bar0:
707 	if (nfp->bar[0].iomem)
708 		iounmap(nfp->bar[0].iomem);
709 	return err;
710 }
711 
712 static void disable_bars(struct nfp6000_pcie *nfp)
713 {
714 	struct nfp_bar *bar = &nfp->bar[0];
715 	int n;
716 
717 	for (n = 0; n < nfp->bars; n++, bar++) {
718 		if (bar->iomem) {
719 			iounmap(bar->iomem);
720 			bar->iomem = NULL;
721 		}
722 	}
723 }
724 
725 /*
726  * Generic CPP bus access interface.
727  */
728 
729 struct nfp6000_area_priv {
730 	atomic_t refcnt;
731 
732 	struct nfp_bar *bar;
733 	u32 bar_offset;
734 
735 	u32 target;
736 	u32 action;
737 	u32 token;
738 	u64 offset;
739 	struct {
740 		int read;
741 		int write;
742 		int bar;
743 	} width;
744 	size_t size;
745 
746 	void __iomem *iomem;
747 	phys_addr_t phys;
748 	struct resource resource;
749 };
750 
751 static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest,
752 			     unsigned long long address, unsigned long size)
753 {
754 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
755 	u32 target = NFP_CPP_ID_TARGET_of(dest);
756 	u32 action = NFP_CPP_ID_ACTION_of(dest);
757 	u32 token = NFP_CPP_ID_TOKEN_of(dest);
758 	int pp;
759 
760 	pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address);
761 	if (pp < 0)
762 		return pp;
763 
764 	priv->width.read = PUSH_WIDTH(pp);
765 	priv->width.write = PULL_WIDTH(pp);
766 	if (priv->width.read > 0 &&
767 	    priv->width.write > 0 &&
768 	    priv->width.read != priv->width.write) {
769 		return -EINVAL;
770 	}
771 
772 	if (priv->width.read > 0)
773 		priv->width.bar = priv->width.read;
774 	else
775 		priv->width.bar = priv->width.write;
776 
777 	atomic_set(&priv->refcnt, 0);
778 	priv->bar = NULL;
779 
780 	priv->target = target;
781 	priv->action = action;
782 	priv->token = token;
783 	priv->offset = address;
784 	priv->size = size;
785 	memset(&priv->resource, 0, sizeof(priv->resource));
786 
787 	return 0;
788 }
789 
790 static void nfp6000_area_cleanup(struct nfp_cpp_area *area)
791 {
792 }
793 
794 static void priv_area_get(struct nfp_cpp_area *area)
795 {
796 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
797 
798 	atomic_inc(&priv->refcnt);
799 }
800 
801 static int priv_area_put(struct nfp_cpp_area *area)
802 {
803 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
804 
805 	if (WARN_ON(!atomic_read(&priv->refcnt)))
806 		return 0;
807 
808 	return atomic_dec_and_test(&priv->refcnt);
809 }
810 
811 static int nfp6000_area_acquire(struct nfp_cpp_area *area)
812 {
813 	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
814 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
815 	int barnum, err;
816 
817 	if (priv->bar) {
818 		/* Already allocated. */
819 		priv_area_get(area);
820 		return 0;
821 	}
822 
823 	barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token,
824 			       priv->offset, priv->size, priv->width.bar, 1);
825 
826 	if (barnum < 0) {
827 		err = barnum;
828 		goto err_alloc_bar;
829 	}
830 	priv->bar = &nfp->bar[barnum];
831 
832 	/* Calculate offset into BAR. */
833 	if (nfp_bar_maptype(priv->bar) ==
834 	    NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) {
835 		priv->bar_offset = priv->offset &
836 			(NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
837 		priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(
838 			priv->bar, priv->target);
839 		priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(
840 			priv->bar, priv->token);
841 	} else {
842 		priv->bar_offset = priv->offset & priv->bar->mask;
843 	}
844 
845 	/* We don't actually try to acquire the resource area using
846 	 * request_resource.  This would prevent sharing the mapped
847 	 * BAR between multiple CPP areas and prevent us from
848 	 * effectively utilizing the limited amount of BAR resources.
849 	 */
850 	priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset;
851 	priv->resource.name = nfp_cpp_area_name(area);
852 	priv->resource.start = priv->phys;
853 	priv->resource.end = priv->resource.start + priv->size - 1;
854 	priv->resource.flags = IORESOURCE_MEM;
855 
856 	/* If the bar is already mapped in, use its mapping */
857 	if (priv->bar->iomem)
858 		priv->iomem = priv->bar->iomem + priv->bar_offset;
859 	else
860 		/* Must have been too big. Sub-allocate. */
861 		priv->iomem = ioremap(priv->phys, priv->size);
862 
863 	if (IS_ERR_OR_NULL(priv->iomem)) {
864 		dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
865 			(int)priv->size, priv->bar->index);
866 		err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem);
867 		priv->iomem = NULL;
868 		goto err_iomem_remap;
869 	}
870 
871 	priv_area_get(area);
872 	return 0;
873 
874 err_iomem_remap:
875 	nfp_bar_put(nfp, priv->bar);
876 	priv->bar = NULL;
877 err_alloc_bar:
878 	return err;
879 }
880 
881 static void nfp6000_area_release(struct nfp_cpp_area *area)
882 {
883 	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
884 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
885 
886 	if (!priv_area_put(area))
887 		return;
888 
889 	if (!priv->bar->iomem)
890 		iounmap(priv->iomem);
891 
892 	nfp_bar_put(nfp, priv->bar);
893 
894 	priv->bar = NULL;
895 	priv->iomem = NULL;
896 }
897 
898 static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area)
899 {
900 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
901 
902 	return priv->phys;
903 }
904 
905 static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area)
906 {
907 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
908 
909 	return priv->iomem;
910 }
911 
912 static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area)
913 {
914 	/* Use the BAR resource as the resource for the CPP area.
915 	 * This enables us to share the BAR among multiple CPP areas
916 	 * without resource conflicts.
917 	 */
918 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
919 
920 	return priv->bar->resource;
921 }
922 
923 static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
924 			     unsigned long offset, unsigned int length)
925 {
926 	u64 __maybe_unused *wrptr64 = kernel_vaddr;
927 	const u64 __iomem __maybe_unused *rdptr64;
928 	struct nfp6000_area_priv *priv;
929 	u32 *wrptr32 = kernel_vaddr;
930 	const u32 __iomem *rdptr32;
931 	int n, width;
932 
933 	priv = nfp_cpp_area_priv(area);
934 	rdptr64 = priv->iomem + offset;
935 	rdptr32 = priv->iomem + offset;
936 
937 	if (offset + length > priv->size)
938 		return -EFAULT;
939 
940 	width = priv->width.read;
941 	if (width <= 0)
942 		return -EINVAL;
943 
944 	/* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */
945 	if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) &&
946 	    priv->action == NFP_CPP_ACTION_RW &&
947 	    (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
948 		width = TARGET_WIDTH_32;
949 
950 	/* Unaligned? Translate to an explicit access */
951 	if ((priv->offset + offset) & (width - 1))
952 		return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
953 					     NFP_CPP_ID(priv->target,
954 							priv->action,
955 							priv->token),
956 					     priv->offset + offset,
957 					     kernel_vaddr, length, width);
958 
959 	if (WARN_ON(!priv->bar))
960 		return -EFAULT;
961 
962 	switch (width) {
963 	case TARGET_WIDTH_32:
964 		if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
965 			return -EINVAL;
966 
967 		for (n = 0; n < length; n += sizeof(u32))
968 			*wrptr32++ = __raw_readl(rdptr32++);
969 		return n;
970 #ifdef __raw_readq
971 	case TARGET_WIDTH_64:
972 		if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
973 			return -EINVAL;
974 
975 		for (n = 0; n < length; n += sizeof(u64))
976 			*wrptr64++ = __raw_readq(rdptr64++);
977 		return n;
978 #endif
979 	default:
980 		return -EINVAL;
981 	}
982 }
983 
984 static int
985 nfp6000_area_write(struct nfp_cpp_area *area,
986 		   const void *kernel_vaddr,
987 		   unsigned long offset, unsigned int length)
988 {
989 	const u64 __maybe_unused *rdptr64 = kernel_vaddr;
990 	u64 __iomem __maybe_unused *wrptr64;
991 	const u32 *rdptr32 = kernel_vaddr;
992 	struct nfp6000_area_priv *priv;
993 	u32 __iomem *wrptr32;
994 	int n, width;
995 
996 	priv = nfp_cpp_area_priv(area);
997 	wrptr64 = priv->iomem + offset;
998 	wrptr32 = priv->iomem + offset;
999 
1000 	if (offset + length > priv->size)
1001 		return -EFAULT;
1002 
1003 	width = priv->width.write;
1004 	if (width <= 0)
1005 		return -EINVAL;
1006 
1007 	/* MU writes via a PCIe2CPP BAR support 32bit (and other) lengths */
1008 	if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
1009 	    priv->action == NFP_CPP_ACTION_RW &&
1010 	    (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
1011 		width = TARGET_WIDTH_32;
1012 
1013 	/* Unaligned? Translate to an explicit access */
1014 	if ((priv->offset + offset) & (width - 1))
1015 		return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
1016 					      NFP_CPP_ID(priv->target,
1017 							 priv->action,
1018 							 priv->token),
1019 					      priv->offset + offset,
1020 					      kernel_vaddr, length, width);
1021 
1022 	if (WARN_ON(!priv->bar))
1023 		return -EFAULT;
1024 
1025 	switch (width) {
1026 	case TARGET_WIDTH_32:
1027 		if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
1028 			return -EINVAL;
1029 
1030 		for (n = 0; n < length; n += sizeof(u32)) {
1031 			__raw_writel(*rdptr32++, wrptr32++);
1032 			wmb();
1033 		}
1034 		return n;
1035 #ifdef __raw_writeq
1036 	case TARGET_WIDTH_64:
1037 		if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
1038 			return -EINVAL;
1039 
1040 		for (n = 0; n < length; n += sizeof(u64)) {
1041 			__raw_writeq(*rdptr64++, wrptr64++);
1042 			wmb();
1043 		}
1044 		return n;
1045 #endif
1046 	default:
1047 		return -EINVAL;
1048 	}
1049 }
1050 
1051 struct nfp6000_explicit_priv {
1052 	struct nfp6000_pcie *nfp;
1053 	struct {
1054 		int group;
1055 		int area;
1056 	} bar;
1057 	int bitsize;
1058 	void __iomem *data;
1059 	void __iomem *addr;
1060 };
1061 
1062 static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl)
1063 {
1064 	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl));
1065 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1066 	int i, j;
1067 
1068 	mutex_lock(&nfp->expl.mutex);
1069 	for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) {
1070 		if (!nfp->expl.group[i].bitsize)
1071 			continue;
1072 
1073 		for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) {
1074 			u16 data_offset;
1075 
1076 			if (!nfp->expl.group[i].free[j])
1077 				continue;
1078 
1079 			priv->nfp = nfp;
1080 			priv->bar.group = i;
1081 			priv->bar.area = j;
1082 			priv->bitsize = nfp->expl.group[i].bitsize - 2;
1083 
1084 			data_offset = (priv->bar.group << 9) +
1085 				(priv->bar.area << 7);
1086 			priv->data = nfp->expl.data + data_offset;
1087 			priv->addr = nfp->expl.group[i].addr +
1088 				(priv->bar.area << priv->bitsize);
1089 			nfp->expl.group[i].free[j] = false;
1090 
1091 			mutex_unlock(&nfp->expl.mutex);
1092 			return 0;
1093 		}
1094 	}
1095 	mutex_unlock(&nfp->expl.mutex);
1096 
1097 	return -EAGAIN;
1098 }
1099 
1100 static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl)
1101 {
1102 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1103 	struct nfp6000_pcie *nfp = priv->nfp;
1104 
1105 	mutex_lock(&nfp->expl.mutex);
1106 	nfp->expl.group[priv->bar.group].free[priv->bar.area] = true;
1107 	mutex_unlock(&nfp->expl.mutex);
1108 }
1109 
1110 static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl,
1111 				const void *buff, size_t len)
1112 {
1113 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1114 	const u32 *src = buff;
1115 	size_t i;
1116 
1117 	for (i = 0; i < len; i += sizeof(u32))
1118 		writel(*(src++), priv->data + i);
1119 
1120 	return i;
1121 }
1122 
1123 static int
1124 nfp6000_explicit_do(struct nfp_cpp_explicit *expl,
1125 		    const struct nfp_cpp_explicit_command *cmd, u64 address)
1126 {
1127 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1128 	u8 signal_master, signal_ref, data_master;
1129 	struct nfp6000_pcie *nfp = priv->nfp;
1130 	int sigmask = 0;
1131 	u16 data_ref;
1132 	u32 csr[3];
1133 
1134 	if (cmd->siga_mode)
1135 		sigmask |= 1 << cmd->siga;
1136 	if (cmd->sigb_mode)
1137 		sigmask |= 1 << cmd->sigb;
1138 
1139 	signal_master = cmd->signal_master;
1140 	if (!signal_master)
1141 		signal_master = nfp->expl.master_id;
1142 
1143 	signal_ref = cmd->signal_ref;
1144 	if (signal_master == nfp->expl.master_id)
1145 		signal_ref = nfp->expl.signal_ref +
1146 			((priv->bar.group * 4 + priv->bar.area) << 1);
1147 
1148 	data_master = cmd->data_master;
1149 	if (!data_master)
1150 		data_master = nfp->expl.master_id;
1151 
1152 	data_ref = cmd->data_ref;
1153 	if (data_master == nfp->expl.master_id)
1154 		data_ref = 0x1000 +
1155 			(priv->bar.group << 9) + (priv->bar.area << 7);
1156 
1157 	csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) |
1158 		NFP_PCIE_BAR_EXPLICIT_BAR0_Token(
1159 			NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) |
1160 		NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16);
1161 
1162 	csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) |
1163 		NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) |
1164 		NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref);
1165 
1166 	csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target(
1167 			NFP_CPP_ID_TARGET_of(cmd->cpp_id)) |
1168 		NFP_PCIE_BAR_EXPLICIT_BAR2_Action(
1169 			NFP_CPP_ID_ACTION_of(cmd->cpp_id)) |
1170 		NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) |
1171 		NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) |
1172 		NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master);
1173 
1174 	if (nfp->iomem.csr) {
1175 		writel(csr[0], nfp->iomem.csr +
1176 		       NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1177 						  priv->bar.area));
1178 		writel(csr[1], nfp->iomem.csr +
1179 		       NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1180 						  priv->bar.area));
1181 		writel(csr[2], nfp->iomem.csr +
1182 		       NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1183 						  priv->bar.area));
1184 		/* Readback to ensure BAR is flushed */
1185 		readl(nfp->iomem.csr +
1186 		      NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1187 						 priv->bar.area));
1188 		readl(nfp->iomem.csr +
1189 		      NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1190 						 priv->bar.area));
1191 		readl(nfp->iomem.csr +
1192 		      NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1193 						 priv->bar.area));
1194 	} else {
1195 		pci_write_config_dword(nfp->pdev, 0x400 +
1196 				       NFP_PCIE_BAR_EXPLICIT_BAR0(
1197 					       priv->bar.group, priv->bar.area),
1198 				       csr[0]);
1199 
1200 		pci_write_config_dword(nfp->pdev, 0x400 +
1201 				       NFP_PCIE_BAR_EXPLICIT_BAR1(
1202 					       priv->bar.group, priv->bar.area),
1203 				       csr[1]);
1204 
1205 		pci_write_config_dword(nfp->pdev, 0x400 +
1206 				       NFP_PCIE_BAR_EXPLICIT_BAR2(
1207 					       priv->bar.group, priv->bar.area),
1208 				       csr[2]);
1209 	}
1210 
1211 	/* Issue the 'kickoff' transaction */
1212 	readb(priv->addr + (address & ((1 << priv->bitsize) - 1)));
1213 
1214 	return sigmask;
1215 }
1216 
1217 static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl,
1218 				void *buff, size_t len)
1219 {
1220 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1221 	u32 *dst = buff;
1222 	size_t i;
1223 
1224 	for (i = 0; i < len; i += sizeof(u32))
1225 		*(dst++) = readl(priv->data + i);
1226 
1227 	return i;
1228 }
1229 
1230 static int nfp6000_init(struct nfp_cpp *cpp)
1231 {
1232 	nfp_cpp_area_cache_add(cpp, SZ_64K);
1233 	nfp_cpp_area_cache_add(cpp, SZ_64K);
1234 	nfp_cpp_area_cache_add(cpp, SZ_256K);
1235 
1236 	return 0;
1237 }
1238 
1239 static void nfp6000_free(struct nfp_cpp *cpp)
1240 {
1241 	struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp);
1242 
1243 	disable_bars(nfp);
1244 	kfree(nfp);
1245 }
1246 
1247 static int nfp6000_read_serial(struct device *dev, u8 *serial)
1248 {
1249 	struct pci_dev *pdev = to_pci_dev(dev);
1250 	u64 dsn;
1251 
1252 	dsn = pci_get_dsn(pdev);
1253 	if (!dsn) {
1254 		dev_err(dev, "can't find PCIe Serial Number Capability\n");
1255 		return -EINVAL;
1256 	}
1257 
1258 	put_unaligned_be32((u32)(dsn >> 32), serial);
1259 	put_unaligned_be16((u16)(dsn >> 16), serial + 4);
1260 
1261 	return 0;
1262 }
1263 
1264 static int nfp6000_get_interface(struct device *dev)
1265 {
1266 	struct pci_dev *pdev = to_pci_dev(dev);
1267 	u64 dsn;
1268 
1269 	dsn = pci_get_dsn(pdev);
1270 	if (!dsn) {
1271 		dev_err(dev, "can't find PCIe Serial Number Capability\n");
1272 		return -EINVAL;
1273 	}
1274 
1275 	return dsn & 0xffff;
1276 }
1277 
1278 static const struct nfp_cpp_operations nfp6000_pcie_ops = {
1279 	.owner			= THIS_MODULE,
1280 
1281 	.init			= nfp6000_init,
1282 	.free			= nfp6000_free,
1283 
1284 	.read_serial		= nfp6000_read_serial,
1285 	.get_interface		= nfp6000_get_interface,
1286 
1287 	.area_priv_size		= sizeof(struct nfp6000_area_priv),
1288 	.area_init		= nfp6000_area_init,
1289 	.area_cleanup		= nfp6000_area_cleanup,
1290 	.area_acquire		= nfp6000_area_acquire,
1291 	.area_release		= nfp6000_area_release,
1292 	.area_phys		= nfp6000_area_phys,
1293 	.area_iomem		= nfp6000_area_iomem,
1294 	.area_resource		= nfp6000_area_resource,
1295 	.area_read		= nfp6000_area_read,
1296 	.area_write		= nfp6000_area_write,
1297 
1298 	.explicit_priv_size	= sizeof(struct nfp6000_explicit_priv),
1299 	.explicit_acquire	= nfp6000_explicit_acquire,
1300 	.explicit_release	= nfp6000_explicit_release,
1301 	.explicit_put		= nfp6000_explicit_put,
1302 	.explicit_do		= nfp6000_explicit_do,
1303 	.explicit_get		= nfp6000_explicit_get,
1304 };
1305 
1306 /**
1307  * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
1308  * @pdev:	NFP6000 PCI device
1309  *
1310  * Return: NFP CPP handle
1311  */
1312 struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
1313 {
1314 	struct nfp6000_pcie *nfp;
1315 	u16 interface;
1316 	int err;
1317 
1318 	/*  Finished with card initialization. */
1319 	dev_info(&pdev->dev,
1320 		 "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n");
1321 	pcie_print_link_status(pdev);
1322 
1323 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
1324 	if (!nfp) {
1325 		err = -ENOMEM;
1326 		goto err_ret;
1327 	}
1328 
1329 	nfp->dev = &pdev->dev;
1330 	nfp->pdev = pdev;
1331 	init_waitqueue_head(&nfp->bar_waiters);
1332 	spin_lock_init(&nfp->bar_lock);
1333 
1334 	interface = nfp6000_get_interface(&pdev->dev);
1335 
1336 	if (NFP_CPP_INTERFACE_TYPE_of(interface) !=
1337 	    NFP_CPP_INTERFACE_TYPE_PCI) {
1338 		dev_err(&pdev->dev,
1339 			"Interface type %d is not the expected %d\n",
1340 			NFP_CPP_INTERFACE_TYPE_of(interface),
1341 			NFP_CPP_INTERFACE_TYPE_PCI);
1342 		err = -ENODEV;
1343 		goto err_free_nfp;
1344 	}
1345 
1346 	if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
1347 	    NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
1348 		dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n",
1349 			NFP_CPP_INTERFACE_CHANNEL_of(interface),
1350 			NFP_CPP_INTERFACE_CHANNEL_PEROPENER);
1351 		err = -ENODEV;
1352 		goto err_free_nfp;
1353 	}
1354 
1355 	err = enable_bars(nfp, interface);
1356 	if (err)
1357 		goto err_free_nfp;
1358 
1359 	/* Probe for all the common NFP devices */
1360 	return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp);
1361 
1362 err_free_nfp:
1363 	kfree(nfp);
1364 err_ret:
1365 	dev_err(&pdev->dev, "NFP6000 PCI setup failed\n");
1366 	return ERR_PTR(err);
1367 }
1368