1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp6000_pcie.c
6  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
7  *          Jason McMullan <jason.mcmullan@netronome.com>
8  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
9  *
10  * Multiplexes the NFP BARs between NFP internal resources and
11  * implements the PCIe specific interface for generic CPP bus access.
12  *
13  * The BARs are managed with refcounts and are allocated/acquired
14  * using target, token and offset/size matching.  The generic CPP bus
15  * abstraction builds upon this BAR interface.
16  */
17 
18 #include <asm/unaligned.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/kref.h>
22 #include <linux/io.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/sort.h>
26 #include <linux/sched.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 
30 #include "nfp_cpp.h"
31 #include "nfp_dev.h"
32 
33 #include "nfp6000/nfp6000.h"
34 
35 #include "nfp6000_pcie.h"
36 
37 #define NFP_PCIE_BAR(_pf)	(0x30000 + ((_pf) & 7) * 0xc0)
38 #define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \
39 	(0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
40 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x)     (((_x) & 0x3) << 30)
41 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x)  (((_x) >> 30) & 0x3)
42 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x)          (((_x) & 0x3) << 28)
43 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x)       (((_x) >> 28) & 0x3)
44 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x)        (((_x) & 0xffffff) << 0)
45 #define   NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x)     (((_x) >> 0) & 0xffffff)
46 #define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \
47 	(0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
48 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x)      (((_x) & 0x7f) << 24)
49 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x)   (((_x) >> 24) & 0x7f)
50 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x)     (((_x) & 0x3ff) << 14)
51 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x)  (((_x) >> 14) & 0x3ff)
52 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x)        (((_x) & 0x3fff) << 0)
53 #define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x)     (((_x) >> 0) & 0x3fff)
54 #define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \
55 	(0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
56 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x)         (((_x) & 0xf) << 28)
57 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x)      (((_x) >> 28) & 0xf)
58 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x)         (((_x) & 0x1f) << 23)
59 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x)      (((_x) >> 23) & 0x1f)
60 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x)         (((_x) & 0x1f) << 18)
61 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x)      (((_x) >> 18) & 0x1f)
62 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x)       (((_x) & 0xff) << 10)
63 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x)    (((_x) >> 10) & 0xff)
64 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x)   (((_x) & 0x3ff) << 0)
65 #define   NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff)
66 
67 #define   NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x)  (((_x) & 0x1f) << 16)
68 #define   NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f)
69 #define   NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x)         (((_x) & 0xffff) << 0)
70 #define   NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x)      (((_x) >> 0) & 0xffff)
71 #define   NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x)        (((_x) & 0x3) << 27)
72 #define   NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x)     (((_x) >> 27) & 0x3)
73 #define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT    0
74 #define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT    1
75 #define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE    3
76 #define   NFP_PCIE_BAR_PCIE2CPP_MapType(_x)             (((_x) & 0x7) << 29)
77 #define   NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x)          (((_x) >> 29) & 0x7)
78 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED         0
79 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_BULK          1
80 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET        2
81 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL       3
82 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0     4
83 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1     5
84 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2     6
85 #define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3     7
86 #define   NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x)  (((_x) & 0xf) << 23)
87 #define   NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf)
88 #define   NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x)   (((_x) & 0x3) << 21)
89 #define   NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3)
90 #define NFP_PCIE_EM                                     0x020000
91 #define NFP_PCIE_SRAM                                   0x000000
92 
93 /* Minimal size of the PCIe cfg memory we depend on being mapped,
94  * queue controller and DMA controller don't have to be covered.
95  */
96 #define NFP_PCI_MIN_MAP_SIZE				0x080000
97 
98 #define NFP_PCIE_P2C_FIXED_SIZE(bar)               (1 << (bar)->bitsize)
99 #define NFP_PCIE_P2C_BULK_SIZE(bar)                (1 << (bar)->bitsize)
100 #define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
101 #define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
102 #define NFP_PCIE_P2C_GENERAL_SIZE(bar)             (1 << ((bar)->bitsize - 4))
103 
104 #define NFP_PCIE_P2C_EXPBAR_OFFSET(bar_index)		((bar_index) * 4)
105 
106 /* The number of explicit BARs to reserve.
107  * Minimum is 0, maximum is 4 on the NFP6000.
108  * The NFP3800 can have only one per PF.
109  */
110 #define NFP_PCIE_EXPLICIT_BARS		2
111 
112 struct nfp6000_pcie;
113 struct nfp6000_area_priv;
114 
115 /**
116  * struct nfp_bar - describes BAR configuration and usage
117  * @nfp:	backlink to owner
118  * @barcfg:	cached contents of BAR config CSR
119  * @base:	the BAR's base CPP offset
120  * @mask:       mask for the BAR aperture (read only)
121  * @bitsize:	bitsize of BAR aperture (read only)
122  * @index:	index of the BAR
123  * @refcnt:	number of current users
124  * @iomem:	mapped IO memory
125  * @resource:	iomem resource window
126  */
127 struct nfp_bar {
128 	struct nfp6000_pcie *nfp;
129 	u32 barcfg;
130 	u64 base;          /* CPP address base */
131 	u64 mask;          /* Bit mask of the bar */
132 	u32 bitsize;       /* Bit size of the bar */
133 	int index;
134 	atomic_t refcnt;
135 
136 	void __iomem *iomem;
137 	struct resource *resource;
138 };
139 
140 #define NFP_PCI_BAR_MAX    (PCI_64BIT_BAR_COUNT * 8)
141 
142 struct nfp6000_pcie {
143 	struct pci_dev *pdev;
144 	struct device *dev;
145 	const struct nfp_dev_info *dev_info;
146 
147 	/* PCI BAR management */
148 	spinlock_t bar_lock;		/* Protect the PCI2CPP BAR cache */
149 	int bars;
150 	struct nfp_bar bar[NFP_PCI_BAR_MAX];
151 	wait_queue_head_t bar_waiters;
152 
153 	/* Reserved BAR access */
154 	struct {
155 		void __iomem *csr;
156 		void __iomem *em;
157 		void __iomem *expl[4];
158 	} iomem;
159 
160 	/* Explicit IO access */
161 	struct {
162 		struct mutex mutex; /* Lock access to this explicit group */
163 		u8 master_id;
164 		u8 signal_ref;
165 		void __iomem *data;
166 		struct {
167 			void __iomem *addr;
168 			int bitsize;
169 			int free[4];
170 		} group[4];
171 	} expl;
172 };
173 
174 static u32 nfp_bar_maptype(struct nfp_bar *bar)
175 {
176 	return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
177 }
178 
179 static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar)
180 {
181 	return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8;
182 }
183 
184 static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
185 {
186 	return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2)
187 		+ nfp_bar_resource_len(bar) * (bar->index & 7);
188 }
189 
190 #define TARGET_WIDTH_32    4
191 #define TARGET_WIDTH_64    8
192 
193 static int
194 compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
195 	    u32 *bar_config, u64 *bar_base,
196 	    int tgt, int act, int tok, u64 offset, size_t size, int width)
197 {
198 	int bitsize;
199 	u32 newcfg;
200 
201 	if (tgt >= NFP_CPP_NUM_TARGETS)
202 		return -EINVAL;
203 
204 	switch (width) {
205 	case 8:
206 		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
207 			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT);
208 		break;
209 	case 4:
210 		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
211 			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
212 		break;
213 	case 0:
214 		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
215 			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE);
216 		break;
217 	default:
218 		return -EINVAL;
219 	}
220 
221 	if (act != NFP_CPP_ACTION_RW && act != 0) {
222 		/* Fixed CPP mapping with specific action */
223 		u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
224 
225 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
226 			  NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED);
227 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
228 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act);
229 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
230 
231 		if ((offset & mask) != ((offset + size - 1) & mask))
232 			return -EINVAL;
233 		offset &= mask;
234 
235 		bitsize = 40 - 16;
236 	} else {
237 		u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
238 
239 		/* Bulk mapping */
240 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
241 			NFP_PCIE_BAR_PCIE2CPP_MapType_BULK);
242 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
243 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
244 
245 		if ((offset & mask) != ((offset + size - 1) & mask))
246 			return -EINVAL;
247 
248 		offset &= mask;
249 
250 		bitsize = 40 - 21;
251 	}
252 
253 	if (bar->bitsize < bitsize)
254 		return -EINVAL;
255 
256 	newcfg |= offset >> bitsize;
257 
258 	if (bar_base)
259 		*bar_base = offset;
260 
261 	if (bar_config)
262 		*bar_config = newcfg;
263 
264 	return 0;
265 }
266 
267 static int
268 nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
269 {
270 	unsigned int xbar;
271 
272 	xbar = NFP_PCIE_P2C_EXPBAR_OFFSET(bar->index);
273 
274 	if (nfp->iomem.csr) {
275 		writel(newcfg, nfp->iomem.csr + xbar);
276 		/* Readback to ensure BAR is flushed */
277 		readl(nfp->iomem.csr + xbar);
278 	} else {
279 		xbar += nfp->dev_info->pcie_cfg_expbar_offset;
280 		pci_write_config_dword(nfp->pdev, xbar, newcfg);
281 	}
282 
283 	bar->barcfg = newcfg;
284 
285 	return 0;
286 }
287 
288 static int
289 reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
290 		int tgt, int act, int tok, u64 offset, size_t size, int width)
291 {
292 	u64 newbase;
293 	u32 newcfg;
294 	int err;
295 
296 	err = compute_bar(nfp, bar, &newcfg, &newbase,
297 			  tgt, act, tok, offset, size, width);
298 	if (err)
299 		return err;
300 
301 	bar->base = newbase;
302 
303 	return nfp6000_bar_write(nfp, bar, newcfg);
304 }
305 
306 /* Check if BAR can be used with the given parameters. */
307 static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
308 			u64 offset, size_t size, int width)
309 {
310 	int bartgt, baract, bartok;
311 	int barwidth;
312 	u32 maptype;
313 
314 	maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
315 	bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg);
316 	bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg);
317 	baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg);
318 
319 	barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg);
320 	switch (barwidth) {
321 	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT:
322 		barwidth = 4;
323 		break;
324 	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT:
325 		barwidth = 8;
326 		break;
327 	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE:
328 		barwidth = 0;
329 		break;
330 	default:
331 		barwidth = -1;
332 		break;
333 	}
334 
335 	switch (maptype) {
336 	case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
337 		bartok = -1;
338 		fallthrough;
339 	case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
340 		baract = NFP_CPP_ACTION_RW;
341 		if (act == 0)
342 			act = NFP_CPP_ACTION_RW;
343 		fallthrough;
344 	case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
345 		break;
346 	default:
347 		/* We don't match explicit bars through the area interface */
348 		return 0;
349 	}
350 
351 	/* Make sure to match up the width */
352 	if (barwidth != width)
353 		return 0;
354 
355 	if ((bartgt < 0 || bartgt == tgt) &&
356 	    (bartok < 0 || bartok == tok) &&
357 	    (baract == act) &&
358 	    bar->base <= offset &&
359 	    (bar->base + (1 << bar->bitsize)) >= (offset + size))
360 		return 1;
361 
362 	/* No match */
363 	return 0;
364 }
365 
366 static int
367 find_matching_bar(struct nfp6000_pcie *nfp,
368 		  u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
369 {
370 	int n;
371 
372 	for (n = 0; n < nfp->bars; n++) {
373 		struct nfp_bar *bar = &nfp->bar[n];
374 
375 		if (matching_bar(bar, tgt, act, tok, offset, size, width))
376 			return n;
377 	}
378 
379 	return -1;
380 }
381 
382 /* Return EAGAIN if no resource is available */
383 static int
384 find_unused_bar_noblock(const struct nfp6000_pcie *nfp,
385 			int tgt, int act, int tok,
386 			u64 offset, size_t size, int width)
387 {
388 	int n, busy = 0;
389 
390 	for (n = 0; n < nfp->bars; n++) {
391 		const struct nfp_bar *bar = &nfp->bar[n];
392 		int err;
393 
394 		if (!bar->bitsize)
395 			continue;
396 
397 		/* Just check to see if we can make it fit... */
398 		err = compute_bar(nfp, bar, NULL, NULL,
399 				  tgt, act, tok, offset, size, width);
400 		if (err)
401 			continue;
402 
403 		if (!atomic_read(&bar->refcnt))
404 			return n;
405 
406 		busy++;
407 	}
408 
409 	if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n",
410 		 tgt, act, tok, offset, size, width))
411 		return -EINVAL;
412 
413 	return -EAGAIN;
414 }
415 
416 static int
417 find_unused_bar_and_lock(struct nfp6000_pcie *nfp,
418 			 int tgt, int act, int tok,
419 			 u64 offset, size_t size, int width)
420 {
421 	unsigned long flags;
422 	int n;
423 
424 	spin_lock_irqsave(&nfp->bar_lock, flags);
425 
426 	n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
427 	if (n < 0)
428 		spin_unlock_irqrestore(&nfp->bar_lock, flags);
429 	else
430 		__release(&nfp->bar_lock);
431 
432 	return n;
433 }
434 
435 static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
436 {
437 	atomic_inc(&bar->refcnt);
438 }
439 
440 static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
441 {
442 	if (atomic_dec_and_test(&bar->refcnt))
443 		wake_up_interruptible(&nfp->bar_waiters);
444 }
445 
446 static int
447 nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum,
448 		 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
449 {
450 	return wait_event_interruptible(nfp->bar_waiters,
451 		(*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
452 						    offset, size, width))
453 					!= -EAGAIN);
454 }
455 
456 static int
457 nfp_alloc_bar(struct nfp6000_pcie *nfp,
458 	      u32 tgt, u32 act, u32 tok,
459 	      u64 offset, size_t size, int width, int nonblocking)
460 {
461 	unsigned long irqflags;
462 	int barnum, retval;
463 
464 	if (size > (1 << 24))
465 		return -EINVAL;
466 
467 	spin_lock_irqsave(&nfp->bar_lock, irqflags);
468 	barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
469 	if (barnum >= 0) {
470 		/* Found a perfect match. */
471 		nfp_bar_get(nfp, &nfp->bar[barnum]);
472 		spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
473 		return barnum;
474 	}
475 
476 	barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
477 					 offset, size, width);
478 	if (barnum < 0) {
479 		if (nonblocking)
480 			goto err_nobar;
481 
482 		/* Wait until a BAR becomes available.  The
483 		 * find_unused_bar function will reclaim the bar_lock
484 		 * if a free BAR is found.
485 		 */
486 		spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
487 		retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
488 					  offset, size, width);
489 		if (retval)
490 			return retval;
491 		__acquire(&nfp->bar_lock);
492 	}
493 
494 	nfp_bar_get(nfp, &nfp->bar[barnum]);
495 	retval = reconfigure_bar(nfp, &nfp->bar[barnum],
496 				 tgt, act, tok, offset, size, width);
497 	if (retval < 0) {
498 		nfp_bar_put(nfp, &nfp->bar[barnum]);
499 		barnum = retval;
500 	}
501 
502 err_nobar:
503 	spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
504 	return barnum;
505 }
506 
507 static void disable_bars(struct nfp6000_pcie *nfp);
508 
509 static int bar_cmp(const void *aptr, const void *bptr)
510 {
511 	const struct nfp_bar *a = aptr, *b = bptr;
512 
513 	if (a->bitsize == b->bitsize)
514 		return a->index - b->index;
515 	else
516 		return a->bitsize - b->bitsize;
517 }
518 
519 /* Map all PCI bars and fetch the actual BAR configurations from the
520  * board.  We assume that the BAR with the PCIe config block is
521  * already mapped.
522  *
523  * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
524  * BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA)
525  * BAR0.2: --
526  * BAR0.3: --
527  * BAR0.4: Reserved for Explicit 0.0-0.3 access
528  * BAR0.5: Reserved for Explicit 1.0-1.3 access
529  * BAR0.6: Reserved for Explicit 2.0-2.3 access
530  * BAR0.7: Reserved for Explicit 3.0-3.3 access
531  *
532  * BAR1.0-BAR1.7: --
533  * BAR2.0-BAR2.7: --
534  */
535 static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
536 {
537 	const u32 barcfg_msix_general =
538 		NFP_PCIE_BAR_PCIE2CPP_MapType(
539 			NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
540 		NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
541 	const u32 barcfg_msix_xpb =
542 		NFP_PCIE_BAR_PCIE2CPP_MapType(
543 			NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
544 		NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
545 		NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
546 			NFP_CPP_TARGET_ISLAND_XPB);
547 	const u32 barcfg_explicit[4] = {
548 		NFP_PCIE_BAR_PCIE2CPP_MapType(
549 			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0),
550 		NFP_PCIE_BAR_PCIE2CPP_MapType(
551 			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1),
552 		NFP_PCIE_BAR_PCIE2CPP_MapType(
553 			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2),
554 		NFP_PCIE_BAR_PCIE2CPP_MapType(
555 			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
556 	};
557 	char status_msg[196] = {};
558 	int i, err, bars_free;
559 	struct nfp_bar *bar;
560 	int expl_groups;
561 	char *msg, *end;
562 
563 	msg = status_msg +
564 		snprintf(status_msg, sizeof(status_msg) - 1, "RESERVED BARs: ");
565 	end = status_msg + sizeof(status_msg) - 1;
566 
567 	bar = &nfp->bar[0];
568 	for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
569 		struct resource *res;
570 
571 		res = &nfp->pdev->resource[(i >> 3) * 2];
572 
573 		/* Skip over BARs that are not IORESOURCE_MEM */
574 		if (!(resource_type(res) & IORESOURCE_MEM)) {
575 			bar--;
576 			continue;
577 		}
578 
579 		bar->resource = res;
580 		bar->barcfg = 0;
581 
582 		bar->nfp = nfp;
583 		bar->index = i;
584 		bar->mask = nfp_bar_resource_len(bar) - 1;
585 		bar->bitsize = fls(bar->mask);
586 		bar->base = 0;
587 		bar->iomem = NULL;
588 	}
589 
590 	nfp->bars = bar - &nfp->bar[0];
591 	if (nfp->bars < 8) {
592 		dev_err(nfp->dev, "No usable BARs found!\n");
593 		return -EINVAL;
594 	}
595 
596 	bars_free = nfp->bars;
597 
598 	/* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70)
599 	 */
600 	mutex_init(&nfp->expl.mutex);
601 
602 	nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4)
603 		<< 4;
604 	nfp->expl.signal_ref = 0x10;
605 
606 	/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
607 	bar = &nfp->bar[0];
608 	if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
609 		bar->iomem = ioremap(nfp_bar_resource_start(bar),
610 					     nfp_bar_resource_len(bar));
611 	if (bar->iomem) {
612 		int pf;
613 
614 		msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
615 		atomic_inc(&bar->refcnt);
616 		bars_free--;
617 
618 		nfp6000_bar_write(nfp, bar, barcfg_msix_general);
619 
620 		nfp->expl.data = bar->iomem + NFP_PCIE_SRAM +
621 			nfp->dev_info->pcie_expl_offset;
622 
623 		switch (nfp->pdev->device) {
624 		case PCI_DEVICE_ID_NFP3800:
625 			pf = nfp->pdev->devfn & 7;
626 			nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
627 			break;
628 		case PCI_DEVICE_ID_NFP4000:
629 		case PCI_DEVICE_ID_NFP5000:
630 		case PCI_DEVICE_ID_NFP6000:
631 			nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
632 			break;
633 		default:
634 			dev_err(nfp->dev, "Unsupported device ID: %04hx!\n",
635 				nfp->pdev->device);
636 			err = -EINVAL;
637 			goto err_unmap_bar0;
638 		}
639 		nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
640 	}
641 
642 	switch (nfp->pdev->device) {
643 	case PCI_DEVICE_ID_NFP3800:
644 		expl_groups = 1;
645 		break;
646 	case PCI_DEVICE_ID_NFP4000:
647 	case PCI_DEVICE_ID_NFP5000:
648 	case PCI_DEVICE_ID_NFP6000:
649 		expl_groups = 4;
650 		break;
651 	default:
652 		dev_err(nfp->dev, "Unsupported device ID: %04hx!\n",
653 			nfp->pdev->device);
654 		err = -EINVAL;
655 		goto err_unmap_bar0;
656 	}
657 
658 	/* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
659 	bar = &nfp->bar[1];
660 	msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
661 	atomic_inc(&bar->refcnt);
662 	bars_free--;
663 
664 	nfp6000_bar_write(nfp, bar, barcfg_msix_xpb);
665 
666 	/* Use BAR0.4..BAR0.7 for EXPL IO */
667 	for (i = 0; i < 4; i++) {
668 		int j;
669 
670 		if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) {
671 			nfp->expl.group[i].bitsize = 0;
672 			continue;
673 		}
674 
675 		bar = &nfp->bar[4 + i];
676 		bar->iomem = ioremap(nfp_bar_resource_start(bar),
677 					     nfp_bar_resource_len(bar));
678 		if (bar->iomem) {
679 			msg += scnprintf(msg, end - msg,
680 					 "0.%d: Explicit%d, ", 4 + i, i);
681 			atomic_inc(&bar->refcnt);
682 			bars_free--;
683 
684 			nfp->expl.group[i].bitsize = bar->bitsize;
685 			nfp->expl.group[i].addr = bar->iomem;
686 			nfp6000_bar_write(nfp, bar, barcfg_explicit[i]);
687 
688 			for (j = 0; j < 4; j++)
689 				nfp->expl.group[i].free[j] = true;
690 		}
691 		nfp->iomem.expl[i] = bar->iomem;
692 	}
693 
694 	/* Sort bars by bit size - use the smallest possible first. */
695 	sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
696 	     bar_cmp, NULL);
697 
698 	dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars);
699 
700 	return 0;
701 
702 err_unmap_bar0:
703 	if (nfp->bar[0].iomem)
704 		iounmap(nfp->bar[0].iomem);
705 	return err;
706 }
707 
708 static void disable_bars(struct nfp6000_pcie *nfp)
709 {
710 	struct nfp_bar *bar = &nfp->bar[0];
711 	int n;
712 
713 	for (n = 0; n < nfp->bars; n++, bar++) {
714 		if (bar->iomem) {
715 			iounmap(bar->iomem);
716 			bar->iomem = NULL;
717 		}
718 	}
719 }
720 
721 /*
722  * Generic CPP bus access interface.
723  */
724 
725 struct nfp6000_area_priv {
726 	atomic_t refcnt;
727 
728 	struct nfp_bar *bar;
729 	u32 bar_offset;
730 
731 	u32 target;
732 	u32 action;
733 	u32 token;
734 	u64 offset;
735 	struct {
736 		int read;
737 		int write;
738 		int bar;
739 	} width;
740 	size_t size;
741 
742 	void __iomem *iomem;
743 	phys_addr_t phys;
744 	struct resource resource;
745 };
746 
747 static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest,
748 			     unsigned long long address, unsigned long size)
749 {
750 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
751 	u32 target = NFP_CPP_ID_TARGET_of(dest);
752 	u32 action = NFP_CPP_ID_ACTION_of(dest);
753 	u32 token = NFP_CPP_ID_TOKEN_of(dest);
754 	int pp;
755 
756 	pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address);
757 	if (pp < 0)
758 		return pp;
759 
760 	priv->width.read = PUSH_WIDTH(pp);
761 	priv->width.write = PULL_WIDTH(pp);
762 	if (priv->width.read > 0 &&
763 	    priv->width.write > 0 &&
764 	    priv->width.read != priv->width.write) {
765 		return -EINVAL;
766 	}
767 
768 	if (priv->width.read > 0)
769 		priv->width.bar = priv->width.read;
770 	else
771 		priv->width.bar = priv->width.write;
772 
773 	atomic_set(&priv->refcnt, 0);
774 	priv->bar = NULL;
775 
776 	priv->target = target;
777 	priv->action = action;
778 	priv->token = token;
779 	priv->offset = address;
780 	priv->size = size;
781 	memset(&priv->resource, 0, sizeof(priv->resource));
782 
783 	return 0;
784 }
785 
786 static void nfp6000_area_cleanup(struct nfp_cpp_area *area)
787 {
788 }
789 
790 static void priv_area_get(struct nfp_cpp_area *area)
791 {
792 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
793 
794 	atomic_inc(&priv->refcnt);
795 }
796 
797 static int priv_area_put(struct nfp_cpp_area *area)
798 {
799 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
800 
801 	if (WARN_ON(!atomic_read(&priv->refcnt)))
802 		return 0;
803 
804 	return atomic_dec_and_test(&priv->refcnt);
805 }
806 
807 static int nfp6000_area_acquire(struct nfp_cpp_area *area)
808 {
809 	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
810 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
811 	int barnum, err;
812 
813 	if (priv->bar) {
814 		/* Already allocated. */
815 		priv_area_get(area);
816 		return 0;
817 	}
818 
819 	barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token,
820 			       priv->offset, priv->size, priv->width.bar, 1);
821 
822 	if (barnum < 0) {
823 		err = barnum;
824 		goto err_alloc_bar;
825 	}
826 	priv->bar = &nfp->bar[barnum];
827 
828 	/* Calculate offset into BAR. */
829 	if (nfp_bar_maptype(priv->bar) ==
830 	    NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) {
831 		priv->bar_offset = priv->offset &
832 			(NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
833 		priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(
834 			priv->bar, priv->target);
835 		priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(
836 			priv->bar, priv->token);
837 	} else {
838 		priv->bar_offset = priv->offset & priv->bar->mask;
839 	}
840 
841 	/* We don't actually try to acquire the resource area using
842 	 * request_resource.  This would prevent sharing the mapped
843 	 * BAR between multiple CPP areas and prevent us from
844 	 * effectively utilizing the limited amount of BAR resources.
845 	 */
846 	priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset;
847 	priv->resource.name = nfp_cpp_area_name(area);
848 	priv->resource.start = priv->phys;
849 	priv->resource.end = priv->resource.start + priv->size - 1;
850 	priv->resource.flags = IORESOURCE_MEM;
851 
852 	/* If the bar is already mapped in, use its mapping */
853 	if (priv->bar->iomem)
854 		priv->iomem = priv->bar->iomem + priv->bar_offset;
855 	else
856 		/* Must have been too big. Sub-allocate. */
857 		priv->iomem = ioremap(priv->phys, priv->size);
858 
859 	if (IS_ERR_OR_NULL(priv->iomem)) {
860 		dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
861 			(int)priv->size, priv->bar->index);
862 		err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem);
863 		priv->iomem = NULL;
864 		goto err_iomem_remap;
865 	}
866 
867 	priv_area_get(area);
868 	return 0;
869 
870 err_iomem_remap:
871 	nfp_bar_put(nfp, priv->bar);
872 	priv->bar = NULL;
873 err_alloc_bar:
874 	return err;
875 }
876 
877 static void nfp6000_area_release(struct nfp_cpp_area *area)
878 {
879 	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
880 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
881 
882 	if (!priv_area_put(area))
883 		return;
884 
885 	if (!priv->bar->iomem)
886 		iounmap(priv->iomem);
887 
888 	nfp_bar_put(nfp, priv->bar);
889 
890 	priv->bar = NULL;
891 	priv->iomem = NULL;
892 }
893 
894 static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area)
895 {
896 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
897 
898 	return priv->phys;
899 }
900 
901 static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area)
902 {
903 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
904 
905 	return priv->iomem;
906 }
907 
908 static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area)
909 {
910 	/* Use the BAR resource as the resource for the CPP area.
911 	 * This enables us to share the BAR among multiple CPP areas
912 	 * without resource conflicts.
913 	 */
914 	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
915 
916 	return priv->bar->resource;
917 }
918 
919 static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
920 			     unsigned long offset, unsigned int length)
921 {
922 	u64 __maybe_unused *wrptr64 = kernel_vaddr;
923 	const u64 __iomem __maybe_unused *rdptr64;
924 	struct nfp6000_area_priv *priv;
925 	u32 *wrptr32 = kernel_vaddr;
926 	const u32 __iomem *rdptr32;
927 	int n, width;
928 
929 	priv = nfp_cpp_area_priv(area);
930 	rdptr64 = priv->iomem + offset;
931 	rdptr32 = priv->iomem + offset;
932 
933 	if (offset + length > priv->size)
934 		return -EFAULT;
935 
936 	width = priv->width.read;
937 	if (width <= 0)
938 		return -EINVAL;
939 
940 	/* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */
941 	if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) &&
942 	    priv->action == NFP_CPP_ACTION_RW &&
943 	    (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
944 		width = TARGET_WIDTH_32;
945 
946 	/* Unaligned? Translate to an explicit access */
947 	if ((priv->offset + offset) & (width - 1))
948 		return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
949 					     NFP_CPP_ID(priv->target,
950 							priv->action,
951 							priv->token),
952 					     priv->offset + offset,
953 					     kernel_vaddr, length, width);
954 
955 	if (WARN_ON(!priv->bar))
956 		return -EFAULT;
957 
958 	switch (width) {
959 	case TARGET_WIDTH_32:
960 		if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
961 			return -EINVAL;
962 
963 		for (n = 0; n < length; n += sizeof(u32))
964 			*wrptr32++ = __raw_readl(rdptr32++);
965 		return n;
966 #ifdef __raw_readq
967 	case TARGET_WIDTH_64:
968 		if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
969 			return -EINVAL;
970 
971 		for (n = 0; n < length; n += sizeof(u64))
972 			*wrptr64++ = __raw_readq(rdptr64++);
973 		return n;
974 #endif
975 	default:
976 		return -EINVAL;
977 	}
978 }
979 
980 static int
981 nfp6000_area_write(struct nfp_cpp_area *area,
982 		   const void *kernel_vaddr,
983 		   unsigned long offset, unsigned int length)
984 {
985 	const u64 __maybe_unused *rdptr64 = kernel_vaddr;
986 	u64 __iomem __maybe_unused *wrptr64;
987 	const u32 *rdptr32 = kernel_vaddr;
988 	struct nfp6000_area_priv *priv;
989 	u32 __iomem *wrptr32;
990 	int n, width;
991 
992 	priv = nfp_cpp_area_priv(area);
993 	wrptr64 = priv->iomem + offset;
994 	wrptr32 = priv->iomem + offset;
995 
996 	if (offset + length > priv->size)
997 		return -EFAULT;
998 
999 	width = priv->width.write;
1000 	if (width <= 0)
1001 		return -EINVAL;
1002 
1003 	/* MU writes via a PCIe2CPP BAR support 32bit (and other) lengths */
1004 	if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
1005 	    priv->action == NFP_CPP_ACTION_RW &&
1006 	    (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
1007 		width = TARGET_WIDTH_32;
1008 
1009 	/* Unaligned? Translate to an explicit access */
1010 	if ((priv->offset + offset) & (width - 1))
1011 		return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
1012 					      NFP_CPP_ID(priv->target,
1013 							 priv->action,
1014 							 priv->token),
1015 					      priv->offset + offset,
1016 					      kernel_vaddr, length, width);
1017 
1018 	if (WARN_ON(!priv->bar))
1019 		return -EFAULT;
1020 
1021 	switch (width) {
1022 	case TARGET_WIDTH_32:
1023 		if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
1024 			return -EINVAL;
1025 
1026 		for (n = 0; n < length; n += sizeof(u32)) {
1027 			__raw_writel(*rdptr32++, wrptr32++);
1028 			wmb();
1029 		}
1030 		return n;
1031 #ifdef __raw_writeq
1032 	case TARGET_WIDTH_64:
1033 		if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
1034 			return -EINVAL;
1035 
1036 		for (n = 0; n < length; n += sizeof(u64)) {
1037 			__raw_writeq(*rdptr64++, wrptr64++);
1038 			wmb();
1039 		}
1040 		return n;
1041 #endif
1042 	default:
1043 		return -EINVAL;
1044 	}
1045 }
1046 
1047 struct nfp6000_explicit_priv {
1048 	struct nfp6000_pcie *nfp;
1049 	struct {
1050 		int group;
1051 		int area;
1052 	} bar;
1053 	int bitsize;
1054 	void __iomem *data;
1055 	void __iomem *addr;
1056 };
1057 
1058 static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl)
1059 {
1060 	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl));
1061 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1062 	int i, j;
1063 
1064 	mutex_lock(&nfp->expl.mutex);
1065 	for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) {
1066 		if (!nfp->expl.group[i].bitsize)
1067 			continue;
1068 
1069 		for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) {
1070 			u16 data_offset;
1071 
1072 			if (!nfp->expl.group[i].free[j])
1073 				continue;
1074 
1075 			priv->nfp = nfp;
1076 			priv->bar.group = i;
1077 			priv->bar.area = j;
1078 			priv->bitsize = nfp->expl.group[i].bitsize - 2;
1079 
1080 			data_offset = (priv->bar.group << 9) +
1081 				(priv->bar.area << 7);
1082 			priv->data = nfp->expl.data + data_offset;
1083 			priv->addr = nfp->expl.group[i].addr +
1084 				(priv->bar.area << priv->bitsize);
1085 			nfp->expl.group[i].free[j] = false;
1086 
1087 			mutex_unlock(&nfp->expl.mutex);
1088 			return 0;
1089 		}
1090 	}
1091 	mutex_unlock(&nfp->expl.mutex);
1092 
1093 	return -EAGAIN;
1094 }
1095 
1096 static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl)
1097 {
1098 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1099 	struct nfp6000_pcie *nfp = priv->nfp;
1100 
1101 	mutex_lock(&nfp->expl.mutex);
1102 	nfp->expl.group[priv->bar.group].free[priv->bar.area] = true;
1103 	mutex_unlock(&nfp->expl.mutex);
1104 }
1105 
1106 static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl,
1107 				const void *buff, size_t len)
1108 {
1109 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1110 	const u32 *src = buff;
1111 	size_t i;
1112 
1113 	for (i = 0; i < len; i += sizeof(u32))
1114 		writel(*(src++), priv->data + i);
1115 
1116 	return i;
1117 }
1118 
1119 static int
1120 nfp6000_explicit_do(struct nfp_cpp_explicit *expl,
1121 		    const struct nfp_cpp_explicit_command *cmd, u64 address)
1122 {
1123 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1124 	u8 signal_master, signal_ref, data_master;
1125 	struct nfp6000_pcie *nfp = priv->nfp;
1126 	int sigmask = 0;
1127 	u16 data_ref;
1128 	u32 csr[3];
1129 
1130 	if (cmd->siga_mode)
1131 		sigmask |= 1 << cmd->siga;
1132 	if (cmd->sigb_mode)
1133 		sigmask |= 1 << cmd->sigb;
1134 
1135 	signal_master = cmd->signal_master;
1136 	if (!signal_master)
1137 		signal_master = nfp->expl.master_id;
1138 
1139 	signal_ref = cmd->signal_ref;
1140 	if (signal_master == nfp->expl.master_id)
1141 		signal_ref = nfp->expl.signal_ref +
1142 			((priv->bar.group * 4 + priv->bar.area) << 1);
1143 
1144 	data_master = cmd->data_master;
1145 	if (!data_master)
1146 		data_master = nfp->expl.master_id;
1147 
1148 	data_ref = cmd->data_ref;
1149 	if (data_master == nfp->expl.master_id)
1150 		data_ref = 0x1000 +
1151 			(priv->bar.group << 9) + (priv->bar.area << 7);
1152 
1153 	csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) |
1154 		NFP_PCIE_BAR_EXPLICIT_BAR0_Token(
1155 			NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) |
1156 		NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16);
1157 
1158 	csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) |
1159 		NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) |
1160 		NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref);
1161 
1162 	csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target(
1163 			NFP_CPP_ID_TARGET_of(cmd->cpp_id)) |
1164 		NFP_PCIE_BAR_EXPLICIT_BAR2_Action(
1165 			NFP_CPP_ID_ACTION_of(cmd->cpp_id)) |
1166 		NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) |
1167 		NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) |
1168 		NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master);
1169 
1170 	if (nfp->iomem.csr) {
1171 		writel(csr[0], nfp->iomem.csr +
1172 		       NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1173 						  priv->bar.area));
1174 		writel(csr[1], nfp->iomem.csr +
1175 		       NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1176 						  priv->bar.area));
1177 		writel(csr[2], nfp->iomem.csr +
1178 		       NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1179 						  priv->bar.area));
1180 		/* Readback to ensure BAR is flushed */
1181 		readl(nfp->iomem.csr +
1182 		      NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1183 						 priv->bar.area));
1184 		readl(nfp->iomem.csr +
1185 		      NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1186 						 priv->bar.area));
1187 		readl(nfp->iomem.csr +
1188 		      NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1189 						 priv->bar.area));
1190 	} else {
1191 		pci_write_config_dword(nfp->pdev, 0x400 +
1192 				       NFP_PCIE_BAR_EXPLICIT_BAR0(
1193 					       priv->bar.group, priv->bar.area),
1194 				       csr[0]);
1195 
1196 		pci_write_config_dword(nfp->pdev, 0x400 +
1197 				       NFP_PCIE_BAR_EXPLICIT_BAR1(
1198 					       priv->bar.group, priv->bar.area),
1199 				       csr[1]);
1200 
1201 		pci_write_config_dword(nfp->pdev, 0x400 +
1202 				       NFP_PCIE_BAR_EXPLICIT_BAR2(
1203 					       priv->bar.group, priv->bar.area),
1204 				       csr[2]);
1205 	}
1206 
1207 	/* Issue the 'kickoff' transaction */
1208 	readb(priv->addr + (address & ((1 << priv->bitsize) - 1)));
1209 
1210 	return sigmask;
1211 }
1212 
1213 static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl,
1214 				void *buff, size_t len)
1215 {
1216 	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1217 	u32 *dst = buff;
1218 	size_t i;
1219 
1220 	for (i = 0; i < len; i += sizeof(u32))
1221 		*(dst++) = readl(priv->data + i);
1222 
1223 	return i;
1224 }
1225 
1226 static int nfp6000_init(struct nfp_cpp *cpp)
1227 {
1228 	nfp_cpp_area_cache_add(cpp, SZ_64K);
1229 	nfp_cpp_area_cache_add(cpp, SZ_64K);
1230 	nfp_cpp_area_cache_add(cpp, SZ_256K);
1231 
1232 	return 0;
1233 }
1234 
1235 static void nfp6000_free(struct nfp_cpp *cpp)
1236 {
1237 	struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp);
1238 
1239 	disable_bars(nfp);
1240 	kfree(nfp);
1241 }
1242 
1243 static int nfp6000_read_serial(struct device *dev, u8 *serial)
1244 {
1245 	struct pci_dev *pdev = to_pci_dev(dev);
1246 	u64 dsn;
1247 
1248 	dsn = pci_get_dsn(pdev);
1249 	if (!dsn) {
1250 		dev_err(dev, "can't find PCIe Serial Number Capability\n");
1251 		return -EINVAL;
1252 	}
1253 
1254 	put_unaligned_be32((u32)(dsn >> 32), serial);
1255 	put_unaligned_be16((u16)(dsn >> 16), serial + 4);
1256 
1257 	return 0;
1258 }
1259 
1260 static int nfp6000_get_interface(struct device *dev)
1261 {
1262 	struct pci_dev *pdev = to_pci_dev(dev);
1263 	u64 dsn;
1264 
1265 	dsn = pci_get_dsn(pdev);
1266 	if (!dsn) {
1267 		dev_err(dev, "can't find PCIe Serial Number Capability\n");
1268 		return -EINVAL;
1269 	}
1270 
1271 	return dsn & 0xffff;
1272 }
1273 
1274 static const struct nfp_cpp_operations nfp6000_pcie_ops = {
1275 	.owner			= THIS_MODULE,
1276 
1277 	.init			= nfp6000_init,
1278 	.free			= nfp6000_free,
1279 
1280 	.read_serial		= nfp6000_read_serial,
1281 	.get_interface		= nfp6000_get_interface,
1282 
1283 	.area_priv_size		= sizeof(struct nfp6000_area_priv),
1284 	.area_init		= nfp6000_area_init,
1285 	.area_cleanup		= nfp6000_area_cleanup,
1286 	.area_acquire		= nfp6000_area_acquire,
1287 	.area_release		= nfp6000_area_release,
1288 	.area_phys		= nfp6000_area_phys,
1289 	.area_iomem		= nfp6000_area_iomem,
1290 	.area_resource		= nfp6000_area_resource,
1291 	.area_read		= nfp6000_area_read,
1292 	.area_write		= nfp6000_area_write,
1293 
1294 	.explicit_priv_size	= sizeof(struct nfp6000_explicit_priv),
1295 	.explicit_acquire	= nfp6000_explicit_acquire,
1296 	.explicit_release	= nfp6000_explicit_release,
1297 	.explicit_put		= nfp6000_explicit_put,
1298 	.explicit_do		= nfp6000_explicit_do,
1299 	.explicit_get		= nfp6000_explicit_get,
1300 };
1301 
1302 /**
1303  * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
1304  * @pdev:	NFP6000 PCI device
1305  * @dev_info:	NFP ASIC params
1306  *
1307  * Return: NFP CPP handle
1308  */
1309 struct nfp_cpp *
1310 nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info)
1311 {
1312 	struct nfp6000_pcie *nfp;
1313 	u16 interface;
1314 	int err;
1315 
1316 	/*  Finished with card initialization. */
1317 	dev_info(&pdev->dev, "Network Flow Processor %s PCIe Card Probe\n",
1318 		 dev_info->chip_names);
1319 	pcie_print_link_status(pdev);
1320 
1321 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
1322 	if (!nfp) {
1323 		err = -ENOMEM;
1324 		goto err_ret;
1325 	}
1326 
1327 	nfp->dev = &pdev->dev;
1328 	nfp->pdev = pdev;
1329 	nfp->dev_info = dev_info;
1330 	init_waitqueue_head(&nfp->bar_waiters);
1331 	spin_lock_init(&nfp->bar_lock);
1332 
1333 	interface = nfp6000_get_interface(&pdev->dev);
1334 
1335 	if (NFP_CPP_INTERFACE_TYPE_of(interface) !=
1336 	    NFP_CPP_INTERFACE_TYPE_PCI) {
1337 		dev_err(&pdev->dev,
1338 			"Interface type %d is not the expected %d\n",
1339 			NFP_CPP_INTERFACE_TYPE_of(interface),
1340 			NFP_CPP_INTERFACE_TYPE_PCI);
1341 		err = -ENODEV;
1342 		goto err_free_nfp;
1343 	}
1344 
1345 	if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
1346 	    NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
1347 		dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n",
1348 			NFP_CPP_INTERFACE_CHANNEL_of(interface),
1349 			NFP_CPP_INTERFACE_CHANNEL_PEROPENER);
1350 		err = -ENODEV;
1351 		goto err_free_nfp;
1352 	}
1353 
1354 	err = enable_bars(nfp, interface);
1355 	if (err)
1356 		goto err_free_nfp;
1357 
1358 	/* Probe for all the common NFP devices */
1359 	return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp);
1360 
1361 err_free_nfp:
1362 	kfree(nfp);
1363 err_ret:
1364 	dev_err(&pdev->dev, "NFP6000 PCI setup failed\n");
1365 	return ERR_PTR(err);
1366 }
1367