xref: /openbmc/linux/drivers/soc/fsl/qbman/bman.c (revision 55eb9a6c)
1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *	 notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *	 notice, this list of conditions and the following disclaimer in the
9  *	 documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *	 names of its contributors may be used to endorse or promote products
12  *	 derived from this software without specific prior written permission.
13  *
14  * ALTERNATIVELY, this software may be distributed under the terms of the
15  * GNU General Public License ("GPL") as published by the Free Software
16  * Foundation, either version 2 of that License or (at your option) any
17  * later version.
18  *
19  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "bman_priv.h"
32 
33 #define IRQNAME		"BMan portal %d"
34 #define MAX_IRQNAME	16	/* big enough for "BMan portal %d" */
35 
36 /* Portal register assists */
37 
38 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
39 /* Cache-inhibited register offsets */
40 #define BM_REG_RCR_PI_CINH	0x3000
41 #define BM_REG_RCR_CI_CINH	0x3100
42 #define BM_REG_RCR_ITR		0x3200
43 #define BM_REG_CFG		0x3300
44 #define BM_REG_SCN(n)		(0x3400 + ((n) << 6))
45 #define BM_REG_ISR		0x3e00
46 #define BM_REG_IER		0x3e40
47 #define BM_REG_ISDR		0x3e80
48 #define BM_REG_IIR		0x3ec0
49 
50 /* Cache-enabled register offsets */
51 #define BM_CL_CR		0x0000
52 #define BM_CL_RR0		0x0100
53 #define BM_CL_RR1		0x0140
54 #define BM_CL_RCR		0x1000
55 #define BM_CL_RCR_PI_CENA	0x3000
56 #define BM_CL_RCR_CI_CENA	0x3100
57 
58 #else
59 /* Cache-inhibited register offsets */
60 #define BM_REG_RCR_PI_CINH	0x0000
61 #define BM_REG_RCR_CI_CINH	0x0004
62 #define BM_REG_RCR_ITR		0x0008
63 #define BM_REG_CFG		0x0100
64 #define BM_REG_SCN(n)		(0x0200 + ((n) << 2))
65 #define BM_REG_ISR		0x0e00
66 #define BM_REG_IER		0x0e04
67 #define BM_REG_ISDR		0x0e08
68 #define BM_REG_IIR		0x0e0c
69 
70 /* Cache-enabled register offsets */
71 #define BM_CL_CR		0x0000
72 #define BM_CL_RR0		0x0100
73 #define BM_CL_RR1		0x0140
74 #define BM_CL_RCR		0x1000
75 #define BM_CL_RCR_PI_CENA	0x3000
76 #define BM_CL_RCR_CI_CENA	0x3100
77 #endif
78 
79 /*
80  * Portal modes.
81  *   Enum types;
82  *     pmode == production mode
83  *     cmode == consumption mode,
84  *   Enum values use 3 letter codes. First letter matches the portal mode,
85  *   remaining two letters indicate;
86  *     ci == cache-inhibited portal register
87  *     ce == cache-enabled portal register
88  *     vb == in-band valid-bit (cache-enabled)
89  */
90 enum bm_rcr_pmode {		/* matches BCSP_CFG::RPM */
91 	bm_rcr_pci = 0,		/* PI index, cache-inhibited */
92 	bm_rcr_pce = 1,		/* PI index, cache-enabled */
93 	bm_rcr_pvb = 2		/* valid-bit */
94 };
95 enum bm_rcr_cmode {		/* s/w-only */
96 	bm_rcr_cci,		/* CI index, cache-inhibited */
97 	bm_rcr_cce		/* CI index, cache-enabled */
98 };
99 
100 
101 /* --- Portal structures --- */
102 
103 #define BM_RCR_SIZE		8
104 
105 /* Release Command */
106 struct bm_rcr_entry {
107 	union {
108 		struct {
109 			u8 _ncw_verb; /* writes to this are non-coherent */
110 			u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
111 			u8 __reserved1[62];
112 		};
113 		struct bm_buffer bufs[8];
114 	};
115 };
116 #define BM_RCR_VERB_VBIT		0x80
117 #define BM_RCR_VERB_CMD_MASK		0x70	/* one of two values; */
118 #define BM_RCR_VERB_CMD_BPID_SINGLE	0x20
119 #define BM_RCR_VERB_CMD_BPID_MULTI	0x30
120 #define BM_RCR_VERB_BUFCOUNT_MASK	0x0f	/* values 1..8 */
121 
122 struct bm_rcr {
123 	struct bm_rcr_entry *ring, *cursor;
124 	u8 ci, available, ithresh, vbit;
125 #ifdef CONFIG_FSL_DPAA_CHECKING
126 	u32 busy;
127 	enum bm_rcr_pmode pmode;
128 	enum bm_rcr_cmode cmode;
129 #endif
130 };
131 
132 /* MC (Management Command) command */
133 struct bm_mc_command {
134 	u8 _ncw_verb; /* writes to this are non-coherent */
135 	u8 bpid; /* used by acquire command */
136 	u8 __reserved[62];
137 };
138 #define BM_MCC_VERB_VBIT		0x80
139 #define BM_MCC_VERB_CMD_MASK		0x70	/* where the verb contains; */
140 #define BM_MCC_VERB_CMD_ACQUIRE		0x10
141 #define BM_MCC_VERB_CMD_QUERY		0x40
142 #define BM_MCC_VERB_ACQUIRE_BUFCOUNT	0x0f	/* values 1..8 go here */
143 
144 /* MC result, Acquire and Query Response */
145 union bm_mc_result {
146 	struct {
147 		u8 verb;
148 		u8 bpid;
149 		u8 __reserved[62];
150 	};
151 	struct bm_buffer bufs[8];
152 };
153 #define BM_MCR_VERB_VBIT		0x80
154 #define BM_MCR_VERB_CMD_MASK		BM_MCC_VERB_CMD_MASK
155 #define BM_MCR_VERB_CMD_ACQUIRE		BM_MCC_VERB_CMD_ACQUIRE
156 #define BM_MCR_VERB_CMD_QUERY		BM_MCC_VERB_CMD_QUERY
157 #define BM_MCR_VERB_CMD_ERR_INVALID	0x60
158 #define BM_MCR_VERB_CMD_ERR_ECC		0x70
159 #define BM_MCR_VERB_ACQUIRE_BUFCOUNT	BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
160 #define BM_MCR_TIMEOUT			10000 /* us */
161 
162 struct bm_mc {
163 	struct bm_mc_command *cr;
164 	union bm_mc_result *rr;
165 	u8 rridx, vbit;
166 #ifdef CONFIG_FSL_DPAA_CHECKING
167 	enum {
168 		/* Can only be _mc_start()ed */
169 		mc_idle,
170 		/* Can only be _mc_commit()ed or _mc_abort()ed */
171 		mc_user,
172 		/* Can only be _mc_retry()ed */
173 		mc_hw
174 	} state;
175 #endif
176 };
177 
178 struct bm_addr {
179 	void *ce;		/* cache-enabled */
180 	__be32 *ce_be;		/* Same as above but for direct access */
181 	void __iomem *ci;	/* cache-inhibited */
182 };
183 
184 struct bm_portal {
185 	struct bm_addr addr;
186 	struct bm_rcr rcr;
187 	struct bm_mc mc;
188 } ____cacheline_aligned;
189 
190 /* Cache-inhibited register access. */
191 static inline u32 bm_in(struct bm_portal *p, u32 offset)
192 {
193 	return ioread32be(p->addr.ci + offset);
194 }
195 
196 static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
197 {
198 	iowrite32be(val, p->addr.ci + offset);
199 }
200 
201 /* Cache Enabled Portal Access */
202 static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
203 {
204 	dpaa_invalidate(p->addr.ce + offset);
205 }
206 
207 static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
208 {
209 	dpaa_touch_ro(p->addr.ce + offset);
210 }
211 
212 static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
213 {
214 	return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
215 }
216 
217 struct bman_portal {
218 	struct bm_portal p;
219 	/* interrupt sources processed by portal_isr(), configurable */
220 	unsigned long irq_sources;
221 	/* probing time config params for cpu-affine portals */
222 	const struct bm_portal_config *config;
223 	char irqname[MAX_IRQNAME];
224 };
225 
226 static cpumask_t affine_mask;
227 static DEFINE_SPINLOCK(affine_mask_lock);
228 static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
229 
230 static inline struct bman_portal *get_affine_portal(void)
231 {
232 	return &get_cpu_var(bman_affine_portal);
233 }
234 
235 static inline void put_affine_portal(void)
236 {
237 	put_cpu_var(bman_affine_portal);
238 }
239 
240 /*
241  * This object type refers to a pool, it isn't *the* pool. There may be
242  * more than one such object per BMan buffer pool, eg. if different users of the
243  * pool are operating via different portals.
244  */
245 struct bman_pool {
246 	/* index of the buffer pool to encapsulate (0-63) */
247 	u32 bpid;
248 	/* Used for hash-table admin when using depletion notifications. */
249 	struct bman_portal *portal;
250 	struct bman_pool *next;
251 };
252 
253 static u32 poll_portal_slow(struct bman_portal *p, u32 is);
254 
255 static irqreturn_t portal_isr(int irq, void *ptr)
256 {
257 	struct bman_portal *p = ptr;
258 	struct bm_portal *portal = &p->p;
259 	u32 clear = p->irq_sources;
260 	u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
261 
262 	if (unlikely(!is))
263 		return IRQ_NONE;
264 
265 	clear |= poll_portal_slow(p, is);
266 	bm_out(portal, BM_REG_ISR, clear);
267 	return IRQ_HANDLED;
268 }
269 
270 /* --- RCR API --- */
271 
272 #define RCR_SHIFT	ilog2(sizeof(struct bm_rcr_entry))
273 #define RCR_CARRY	(uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
274 
275 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
276 static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
277 {
278 	uintptr_t addr = (uintptr_t)p;
279 
280 	addr &= ~RCR_CARRY;
281 
282 	return (struct bm_rcr_entry *)addr;
283 }
284 
285 #ifdef CONFIG_FSL_DPAA_CHECKING
286 /* Bit-wise logic to convert a ring pointer to a ring index */
287 static int rcr_ptr2idx(struct bm_rcr_entry *e)
288 {
289 	return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
290 }
291 #endif
292 
293 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
294 static inline void rcr_inc(struct bm_rcr *rcr)
295 {
296 	/* increment to the next RCR pointer and handle overflow and 'vbit' */
297 	struct bm_rcr_entry *partial = rcr->cursor + 1;
298 
299 	rcr->cursor = rcr_carryclear(partial);
300 	if (partial != rcr->cursor)
301 		rcr->vbit ^= BM_RCR_VERB_VBIT;
302 }
303 
304 static int bm_rcr_get_avail(struct bm_portal *portal)
305 {
306 	struct bm_rcr *rcr = &portal->rcr;
307 
308 	return rcr->available;
309 }
310 
311 static int bm_rcr_get_fill(struct bm_portal *portal)
312 {
313 	struct bm_rcr *rcr = &portal->rcr;
314 
315 	return BM_RCR_SIZE - 1 - rcr->available;
316 }
317 
318 static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
319 {
320 	struct bm_rcr *rcr = &portal->rcr;
321 
322 	rcr->ithresh = ithresh;
323 	bm_out(portal, BM_REG_RCR_ITR, ithresh);
324 }
325 
326 static void bm_rcr_cce_prefetch(struct bm_portal *portal)
327 {
328 	__maybe_unused struct bm_rcr *rcr = &portal->rcr;
329 
330 	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
331 	bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
332 }
333 
334 static u8 bm_rcr_cce_update(struct bm_portal *portal)
335 {
336 	struct bm_rcr *rcr = &portal->rcr;
337 	u8 diff, old_ci = rcr->ci;
338 
339 	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
340 	rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
341 	bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
342 	diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
343 	rcr->available += diff;
344 	return diff;
345 }
346 
347 static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
348 {
349 	struct bm_rcr *rcr = &portal->rcr;
350 
351 	DPAA_ASSERT(!rcr->busy);
352 	if (!rcr->available)
353 		return NULL;
354 #ifdef CONFIG_FSL_DPAA_CHECKING
355 	rcr->busy = 1;
356 #endif
357 	dpaa_zero(rcr->cursor);
358 	return rcr->cursor;
359 }
360 
361 static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
362 {
363 	struct bm_rcr *rcr = &portal->rcr;
364 	struct bm_rcr_entry *rcursor;
365 
366 	DPAA_ASSERT(rcr->busy);
367 	DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
368 	DPAA_ASSERT(rcr->available >= 1);
369 	dma_wmb();
370 	rcursor = rcr->cursor;
371 	rcursor->_ncw_verb = myverb | rcr->vbit;
372 	dpaa_flush(rcursor);
373 	rcr_inc(rcr);
374 	rcr->available--;
375 #ifdef CONFIG_FSL_DPAA_CHECKING
376 	rcr->busy = 0;
377 #endif
378 }
379 
380 static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
381 		       enum bm_rcr_cmode cmode)
382 {
383 	struct bm_rcr *rcr = &portal->rcr;
384 	u32 cfg;
385 	u8 pi;
386 
387 	rcr->ring = portal->addr.ce + BM_CL_RCR;
388 	rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
389 	pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
390 	rcr->cursor = rcr->ring + pi;
391 	rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
392 		BM_RCR_VERB_VBIT : 0;
393 	rcr->available = BM_RCR_SIZE - 1
394 		- dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
395 	rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
396 #ifdef CONFIG_FSL_DPAA_CHECKING
397 	rcr->busy = 0;
398 	rcr->pmode = pmode;
399 	rcr->cmode = cmode;
400 #endif
401 	cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
402 		| (pmode & 0x3); /* BCSP_CFG::RPM */
403 	bm_out(portal, BM_REG_CFG, cfg);
404 	return 0;
405 }
406 
407 static void bm_rcr_finish(struct bm_portal *portal)
408 {
409 #ifdef CONFIG_FSL_DPAA_CHECKING
410 	struct bm_rcr *rcr = &portal->rcr;
411 	int i;
412 
413 	DPAA_ASSERT(!rcr->busy);
414 
415 	i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
416 	if (i != rcr_ptr2idx(rcr->cursor))
417 		pr_crit("losing uncommitted RCR entries\n");
418 
419 	i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
420 	if (i != rcr->ci)
421 		pr_crit("missing existing RCR completions\n");
422 	if (rcr->ci != rcr_ptr2idx(rcr->cursor))
423 		pr_crit("RCR destroyed unquiesced\n");
424 #endif
425 }
426 
427 /* --- Management command API --- */
428 static int bm_mc_init(struct bm_portal *portal)
429 {
430 	struct bm_mc *mc = &portal->mc;
431 
432 	mc->cr = portal->addr.ce + BM_CL_CR;
433 	mc->rr = portal->addr.ce + BM_CL_RR0;
434 	mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
435 		    0 : 1;
436 	mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
437 #ifdef CONFIG_FSL_DPAA_CHECKING
438 	mc->state = mc_idle;
439 #endif
440 	return 0;
441 }
442 
443 static void bm_mc_finish(struct bm_portal *portal)
444 {
445 #ifdef CONFIG_FSL_DPAA_CHECKING
446 	struct bm_mc *mc = &portal->mc;
447 
448 	DPAA_ASSERT(mc->state == mc_idle);
449 	if (mc->state != mc_idle)
450 		pr_crit("Losing incomplete MC command\n");
451 #endif
452 }
453 
454 static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
455 {
456 	struct bm_mc *mc = &portal->mc;
457 
458 	DPAA_ASSERT(mc->state == mc_idle);
459 #ifdef CONFIG_FSL_DPAA_CHECKING
460 	mc->state = mc_user;
461 #endif
462 	dpaa_zero(mc->cr);
463 	return mc->cr;
464 }
465 
466 static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
467 {
468 	struct bm_mc *mc = &portal->mc;
469 	union bm_mc_result *rr = mc->rr + mc->rridx;
470 
471 	DPAA_ASSERT(mc->state == mc_user);
472 	dma_wmb();
473 	mc->cr->_ncw_verb = myverb | mc->vbit;
474 	dpaa_flush(mc->cr);
475 	dpaa_invalidate_touch_ro(rr);
476 #ifdef CONFIG_FSL_DPAA_CHECKING
477 	mc->state = mc_hw;
478 #endif
479 }
480 
481 static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
482 {
483 	struct bm_mc *mc = &portal->mc;
484 	union bm_mc_result *rr = mc->rr + mc->rridx;
485 
486 	DPAA_ASSERT(mc->state == mc_hw);
487 	/*
488 	 * The inactive response register's verb byte always returns zero until
489 	 * its command is submitted and completed. This includes the valid-bit,
490 	 * in case you were wondering...
491 	 */
492 	if (!rr->verb) {
493 		dpaa_invalidate_touch_ro(rr);
494 		return NULL;
495 	}
496 	mc->rridx ^= 1;
497 	mc->vbit ^= BM_MCC_VERB_VBIT;
498 #ifdef CONFIG_FSL_DPAA_CHECKING
499 	mc->state = mc_idle;
500 #endif
501 	return rr;
502 }
503 
504 static inline int bm_mc_result_timeout(struct bm_portal *portal,
505 				       union bm_mc_result **mcr)
506 {
507 	int timeout = BM_MCR_TIMEOUT;
508 
509 	do {
510 		*mcr = bm_mc_result(portal);
511 		if (*mcr)
512 			break;
513 		udelay(1);
514 	} while (--timeout);
515 
516 	return timeout;
517 }
518 
519 /* Disable all BSCN interrupts for the portal */
520 static void bm_isr_bscn_disable(struct bm_portal *portal)
521 {
522 	bm_out(portal, BM_REG_SCN(0), 0);
523 	bm_out(portal, BM_REG_SCN(1), 0);
524 }
525 
526 static int bman_create_portal(struct bman_portal *portal,
527 			      const struct bm_portal_config *c)
528 {
529 	struct bm_portal *p;
530 	int ret;
531 
532 	p = &portal->p;
533 	/*
534 	 * prep the low-level portal struct with the mapped addresses from the
535 	 * config, everything that follows depends on it and "config" is more
536 	 * for (de)reference...
537 	 */
538 	p->addr.ce = c->addr_virt_ce;
539 	p->addr.ce_be = c->addr_virt_ce;
540 	p->addr.ci = c->addr_virt_ci;
541 	if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
542 		dev_err(c->dev, "RCR initialisation failed\n");
543 		goto fail_rcr;
544 	}
545 	if (bm_mc_init(p)) {
546 		dev_err(c->dev, "MC initialisation failed\n");
547 		goto fail_mc;
548 	}
549 	/*
550 	 * Default to all BPIDs disabled, we enable as required at
551 	 * run-time.
552 	 */
553 	bm_isr_bscn_disable(p);
554 
555 	/* Write-to-clear any stale interrupt status bits */
556 	bm_out(p, BM_REG_ISDR, 0xffffffff);
557 	portal->irq_sources = 0;
558 	bm_out(p, BM_REG_IER, 0);
559 	bm_out(p, BM_REG_ISR, 0xffffffff);
560 	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
561 	if (request_irq(c->irq, portal_isr, 0, portal->irqname,	portal)) {
562 		dev_err(c->dev, "request_irq() failed\n");
563 		goto fail_irq;
564 	}
565 
566 	if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
567 		goto fail_affinity;
568 
569 	/* Need RCR to be empty before continuing */
570 	ret = bm_rcr_get_fill(p);
571 	if (ret) {
572 		dev_err(c->dev, "RCR unclean\n");
573 		goto fail_rcr_empty;
574 	}
575 	/* Success */
576 	portal->config = c;
577 
578 	bm_out(p, BM_REG_ISDR, 0);
579 	bm_out(p, BM_REG_IIR, 0);
580 
581 	return 0;
582 
583 fail_rcr_empty:
584 fail_affinity:
585 	free_irq(c->irq, portal);
586 fail_irq:
587 	bm_mc_finish(p);
588 fail_mc:
589 	bm_rcr_finish(p);
590 fail_rcr:
591 	return -EIO;
592 }
593 
594 struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
595 {
596 	struct bman_portal *portal;
597 	int err;
598 
599 	portal = &per_cpu(bman_affine_portal, c->cpu);
600 	err = bman_create_portal(portal, c);
601 	if (err)
602 		return NULL;
603 
604 	spin_lock(&affine_mask_lock);
605 	cpumask_set_cpu(c->cpu, &affine_mask);
606 	spin_unlock(&affine_mask_lock);
607 
608 	return portal;
609 }
610 
611 static u32 poll_portal_slow(struct bman_portal *p, u32 is)
612 {
613 	u32 ret = is;
614 
615 	if (is & BM_PIRQ_RCRI) {
616 		bm_rcr_cce_update(&p->p);
617 		bm_rcr_set_ithresh(&p->p, 0);
618 		bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
619 		is &= ~BM_PIRQ_RCRI;
620 	}
621 
622 	/* There should be no status register bits left undefined */
623 	DPAA_ASSERT(!is);
624 	return ret;
625 }
626 
627 int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
628 {
629 	unsigned long irqflags;
630 
631 	local_irq_save(irqflags);
632 	p->irq_sources |= bits & BM_PIRQ_VISIBLE;
633 	bm_out(&p->p, BM_REG_IER, p->irq_sources);
634 	local_irq_restore(irqflags);
635 	return 0;
636 }
637 
638 int bm_shutdown_pool(u32 bpid)
639 {
640 	int err = 0;
641 	struct bm_mc_command *bm_cmd;
642 	union bm_mc_result *bm_res;
643 
644 
645 	struct bman_portal *p = get_affine_portal();
646 	while (1) {
647 		/* Acquire buffers until empty */
648 		bm_cmd = bm_mc_start(&p->p);
649 		bm_cmd->bpid = bpid;
650 		bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
651 		if (!bm_mc_result_timeout(&p->p, &bm_res)) {
652 			pr_crit("BMan Acquire Command timedout\n");
653 			err = -ETIMEDOUT;
654 			goto done;
655 		}
656 		if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
657 			/* Pool is empty */
658 			goto done;
659 		}
660 	}
661 done:
662 	put_affine_portal();
663 	return err;
664 }
665 
666 struct gen_pool *bm_bpalloc;
667 
668 static int bm_alloc_bpid_range(u32 *result, u32 count)
669 {
670 	unsigned long addr;
671 
672 	addr = gen_pool_alloc(bm_bpalloc, count);
673 	if (!addr)
674 		return -ENOMEM;
675 
676 	*result = addr & ~DPAA_GENALLOC_OFF;
677 
678 	return 0;
679 }
680 
681 static int bm_release_bpid(u32 bpid)
682 {
683 	int ret;
684 
685 	ret = bm_shutdown_pool(bpid);
686 	if (ret) {
687 		pr_debug("BPID %d leaked\n", bpid);
688 		return ret;
689 	}
690 
691 	gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
692 	return 0;
693 }
694 
695 struct bman_pool *bman_new_pool(void)
696 {
697 	struct bman_pool *pool = NULL;
698 	u32 bpid;
699 
700 	if (bm_alloc_bpid_range(&bpid, 1))
701 		return NULL;
702 
703 	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
704 	if (!pool)
705 		goto err;
706 
707 	pool->bpid = bpid;
708 
709 	return pool;
710 err:
711 	bm_release_bpid(bpid);
712 	return NULL;
713 }
714 EXPORT_SYMBOL(bman_new_pool);
715 
716 void bman_free_pool(struct bman_pool *pool)
717 {
718 	bm_release_bpid(pool->bpid);
719 
720 	kfree(pool);
721 }
722 EXPORT_SYMBOL(bman_free_pool);
723 
724 int bman_get_bpid(const struct bman_pool *pool)
725 {
726 	return pool->bpid;
727 }
728 EXPORT_SYMBOL(bman_get_bpid);
729 
730 static void update_rcr_ci(struct bman_portal *p, int avail)
731 {
732 	if (avail)
733 		bm_rcr_cce_prefetch(&p->p);
734 	else
735 		bm_rcr_cce_update(&p->p);
736 }
737 
738 int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
739 {
740 	struct bman_portal *p;
741 	struct bm_rcr_entry *r;
742 	unsigned long irqflags;
743 	int avail, timeout = 1000; /* 1ms */
744 	int i = num - 1;
745 
746 	DPAA_ASSERT(num > 0 && num <= 8);
747 
748 	do {
749 		p = get_affine_portal();
750 		local_irq_save(irqflags);
751 		avail = bm_rcr_get_avail(&p->p);
752 		if (avail < 2)
753 			update_rcr_ci(p, avail);
754 		r = bm_rcr_start(&p->p);
755 		local_irq_restore(irqflags);
756 		put_affine_portal();
757 		if (likely(r))
758 			break;
759 
760 		udelay(1);
761 	} while (--timeout);
762 
763 	if (unlikely(!timeout))
764 		return -ETIMEDOUT;
765 
766 	p = get_affine_portal();
767 	local_irq_save(irqflags);
768 	/*
769 	 * we can copy all but the first entry, as this can trigger badness
770 	 * with the valid-bit
771 	 */
772 	bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
773 	bm_buffer_set_bpid(r->bufs, pool->bpid);
774 	if (i)
775 		memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
776 
777 	bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
778 			  (num & BM_RCR_VERB_BUFCOUNT_MASK));
779 
780 	local_irq_restore(irqflags);
781 	put_affine_portal();
782 	return 0;
783 }
784 EXPORT_SYMBOL(bman_release);
785 
786 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
787 {
788 	struct bman_portal *p = get_affine_portal();
789 	struct bm_mc_command *mcc;
790 	union bm_mc_result *mcr;
791 	int ret;
792 
793 	DPAA_ASSERT(num > 0 && num <= 8);
794 
795 	mcc = bm_mc_start(&p->p);
796 	mcc->bpid = pool->bpid;
797 	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
798 		     (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
799 	if (!bm_mc_result_timeout(&p->p, &mcr)) {
800 		put_affine_portal();
801 		pr_crit("BMan Acquire Timeout\n");
802 		return -ETIMEDOUT;
803 	}
804 	ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
805 	if (bufs)
806 		memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
807 
808 	put_affine_portal();
809 	if (ret != num)
810 		ret = -ENOMEM;
811 	return ret;
812 }
813 EXPORT_SYMBOL(bman_acquire);
814 
815 const struct bm_portal_config *
816 bman_get_bm_portal_config(const struct bman_portal *portal)
817 {
818 	return portal->config;
819 }
820