1 /*
2  * Copyright 2008 - 2015 Freescale Semiconductor Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *     * Redistributions of source code must retain the above copyright
7  *       notice, this list of conditions and the following disclaimer.
8  *     * Redistributions in binary form must reproduce the above copyright
9  *       notice, this list of conditions and the following disclaimer in the
10  *       documentation and/or other materials provided with the distribution.
11  *     * Neither the name of Freescale Semiconductor nor the
12  *       names of its contributors may be used to endorse or promote products
13  *       derived from this software without specific prior written permission.
14  *
15  *
16  * ALTERNATIVELY, this software may be distributed under the terms of the
17  * GNU General Public License ("GPL") as published by the Free Software
18  * Foundation, either version 2 of that License or (at your option) any
19  * later version.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include "fman_port.h"
36 #include "fman.h"
37 #include "fman_sp.h"
38 
39 #include <linux/io.h>
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/of_platform.h>
44 #include <linux/of_address.h>
45 #include <linux/delay.h>
46 #include <linux/libfdt_env.h>
47 
48 /* Queue ID */
49 #define DFLT_FQ_ID		0x00FFFFFF
50 
51 /* General defines */
52 #define PORT_BMI_FIFO_UNITS		0x100
53 
54 #define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)	\
55 	min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS)
56 
57 #define PORT_CG_MAP_NUM			8
58 #define PORT_PRS_RESULT_WORDS_NUM	8
59 #define PORT_IC_OFFSET_UNITS		0x10
60 
61 #define MIN_EXT_BUF_SIZE		64
62 
63 #define BMI_PORT_REGS_OFFSET				0
64 #define QMI_PORT_REGS_OFFSET				0x400
65 #define HWP_PORT_REGS_OFFSET				0x800
66 
67 /* Default values */
68 #define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN		\
69 	DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN
70 
71 #define DFLT_PORT_CUT_BYTES_FROM_END		4
72 
73 #define DFLT_PORT_ERRORS_TO_DISCARD		FM_PORT_FRM_ERR_CLS_DISCARD
74 #define DFLT_PORT_MAX_FRAME_LENGTH		9600
75 
76 #define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size)	\
77 	MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)
78 
79 #define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size)	\
80 	(major == 6 ?						\
81 	MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) :		\
82 	(MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4))	\
83 
84 #define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS		0
85 
86 /* QMI defines */
87 #define QMI_DEQ_CFG_SUBPORTAL_MASK		0x1f
88 
89 #define QMI_PORT_CFG_EN				0x80000000
90 #define QMI_PORT_STATUS_DEQ_FD_BSY		0x20000000
91 
92 #define QMI_DEQ_CFG_PRI				0x80000000
93 #define QMI_DEQ_CFG_TYPE1			0x10000000
94 #define QMI_DEQ_CFG_TYPE2			0x20000000
95 #define QMI_DEQ_CFG_TYPE3			0x30000000
96 #define QMI_DEQ_CFG_PREFETCH_PARTIAL		0x01000000
97 #define QMI_DEQ_CFG_PREFETCH_FULL		0x03000000
98 #define QMI_DEQ_CFG_SP_MASK			0xf
99 #define QMI_DEQ_CFG_SP_SHIFT			20
100 
101 #define QMI_BYTE_COUNT_LEVEL_CONTROL(_type)	\
102 	(_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400)
103 
104 /* BMI defins */
105 #define BMI_EBD_EN				0x80000000
106 
107 #define BMI_PORT_CFG_EN				0x80000000
108 
109 #define BMI_PORT_STATUS_BSY			0x80000000
110 
111 #define BMI_DMA_ATTR_SWP_SHIFT			FMAN_SP_DMA_ATTR_SWP_SHIFT
112 #define BMI_DMA_ATTR_WRITE_OPTIMIZE		FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
113 
114 #define BMI_RX_FIFO_PRI_ELEVATION_SHIFT	16
115 #define BMI_RX_FIFO_THRESHOLD_ETHE		0x80000000
116 
117 #define BMI_FRAME_END_CS_IGNORE_SHIFT		24
118 #define BMI_FRAME_END_CS_IGNORE_MASK		0x0000001f
119 
120 #define BMI_RX_FRAME_END_CUT_SHIFT		16
121 #define BMI_RX_FRAME_END_CUT_MASK		0x0000001f
122 
123 #define BMI_IC_TO_EXT_SHIFT			FMAN_SP_IC_TO_EXT_SHIFT
124 #define BMI_IC_TO_EXT_MASK			0x0000001f
125 #define BMI_IC_FROM_INT_SHIFT			FMAN_SP_IC_FROM_INT_SHIFT
126 #define BMI_IC_FROM_INT_MASK			0x0000000f
127 #define BMI_IC_SIZE_MASK			0x0000001f
128 
129 #define BMI_INT_BUF_MARG_SHIFT			28
130 #define BMI_INT_BUF_MARG_MASK			0x0000000f
131 #define BMI_EXT_BUF_MARG_START_SHIFT		FMAN_SP_EXT_BUF_MARG_START_SHIFT
132 #define BMI_EXT_BUF_MARG_START_MASK		0x000001ff
133 #define BMI_EXT_BUF_MARG_END_MASK		0x000001ff
134 
135 #define BMI_CMD_MR_LEAC				0x00200000
136 #define BMI_CMD_MR_SLEAC			0x00100000
137 #define BMI_CMD_MR_MA				0x00080000
138 #define BMI_CMD_MR_DEAS				0x00040000
139 #define BMI_CMD_RX_MR_DEF			(BMI_CMD_MR_LEAC | \
140 						BMI_CMD_MR_SLEAC | \
141 						BMI_CMD_MR_MA | \
142 						BMI_CMD_MR_DEAS)
143 #define BMI_CMD_TX_MR_DEF			0
144 
145 #define BMI_CMD_ATTR_ORDER			0x80000000
146 #define BMI_CMD_ATTR_SYNC			0x02000000
147 #define BMI_CMD_ATTR_COLOR_SHIFT		26
148 
149 #define BMI_FIFO_PIPELINE_DEPTH_SHIFT		12
150 #define BMI_FIFO_PIPELINE_DEPTH_MASK		0x0000000f
151 #define BMI_NEXT_ENG_FD_BITS_SHIFT		24
152 
153 #define BMI_EXT_BUF_POOL_VALID			FMAN_SP_EXT_BUF_POOL_VALID
154 #define BMI_EXT_BUF_POOL_EN_COUNTER		FMAN_SP_EXT_BUF_POOL_EN_COUNTER
155 #define BMI_EXT_BUF_POOL_BACKUP		FMAN_SP_EXT_BUF_POOL_BACKUP
156 #define BMI_EXT_BUF_POOL_ID_SHIFT		16
157 #define BMI_EXT_BUF_POOL_ID_MASK		0x003F0000
158 #define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT	16
159 
160 #define BMI_TX_FIFO_MIN_FILL_SHIFT		16
161 
162 #define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
163 #define BMI_FIFO_THRESHOLD	      ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
164 
165 #define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed)		\
166 	((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
167 
168 #define RX_ERRS_TO_ENQ				  \
169 	(FM_PORT_FRM_ERR_DMA			| \
170 	FM_PORT_FRM_ERR_PHYSICAL		| \
171 	FM_PORT_FRM_ERR_SIZE			| \
172 	FM_PORT_FRM_ERR_EXTRACTION		| \
173 	FM_PORT_FRM_ERR_NO_SCHEME		| \
174 	FM_PORT_FRM_ERR_PRS_TIMEOUT		| \
175 	FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT	| \
176 	FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED	| \
177 	FM_PORT_FRM_ERR_PRS_HDR_ERR		| \
178 	FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW	| \
179 	FM_PORT_FRM_ERR_IPRE)
180 
181 /* NIA defines */
182 #define NIA_ORDER_RESTOR				0x00800000
183 #define NIA_ENG_BMI					0x00500000
184 #define NIA_ENG_QMI_ENQ					0x00540000
185 #define NIA_ENG_QMI_DEQ					0x00580000
186 #define NIA_ENG_HWP					0x00440000
187 #define NIA_BMI_AC_ENQ_FRAME				0x00000002
188 #define NIA_BMI_AC_TX_RELEASE				0x000002C0
189 #define NIA_BMI_AC_RELEASE				0x000000C0
190 #define NIA_BMI_AC_TX					0x00000274
191 #define NIA_BMI_AC_FETCH_ALL_FRAME			0x0000020c
192 
193 /* Port IDs */
194 #define TX_10G_PORT_BASE		0x30
195 #define RX_10G_PORT_BASE		0x10
196 
197 /* BMI Rx port register map */
198 struct fman_port_rx_bmi_regs {
199 	u32 fmbm_rcfg;		/* Rx Configuration */
200 	u32 fmbm_rst;		/* Rx Status */
201 	u32 fmbm_rda;		/* Rx DMA attributes */
202 	u32 fmbm_rfp;		/* Rx FIFO Parameters */
203 	u32 fmbm_rfed;		/* Rx Frame End Data */
204 	u32 fmbm_ricp;		/* Rx Internal Context Parameters */
205 	u32 fmbm_rim;		/* Rx Internal Buffer Margins */
206 	u32 fmbm_rebm;		/* Rx External Buffer Margins */
207 	u32 fmbm_rfne;		/* Rx Frame Next Engine */
208 	u32 fmbm_rfca;		/* Rx Frame Command Attributes. */
209 	u32 fmbm_rfpne;		/* Rx Frame Parser Next Engine */
210 	u32 fmbm_rpso;		/* Rx Parse Start Offset */
211 	u32 fmbm_rpp;		/* Rx Policer Profile  */
212 	u32 fmbm_rccb;		/* Rx Coarse Classification Base */
213 	u32 fmbm_reth;		/* Rx Excessive Threshold */
214 	u32 reserved003c[1];	/* (0x03C 0x03F) */
215 	u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM];
216 	/* Rx Parse Results Array Init */
217 	u32 fmbm_rfqid;		/* Rx Frame Queue ID */
218 	u32 fmbm_refqid;	/* Rx Error Frame Queue ID */
219 	u32 fmbm_rfsdm;		/* Rx Frame Status Discard Mask */
220 	u32 fmbm_rfsem;		/* Rx Frame Status Error Mask */
221 	u32 fmbm_rfene;		/* Rx Frame Enqueue Next Engine */
222 	u32 reserved0074[0x2];	/* (0x074-0x07C)  */
223 	u32 fmbm_rcmne;		/* Rx Frame Continuous Mode Next Engine */
224 	u32 reserved0080[0x20];	/* (0x080 0x0FF)  */
225 	u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
226 	/* Buffer Manager pool Information- */
227 	u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];	/* Allocate Counter- */
228 	u32 reserved0130[8];	/* 0x130/0x140 - 0x15F reserved - */
229 	u32 fmbm_rcgm[PORT_CG_MAP_NUM];	/* Congestion Group Map */
230 	u32 fmbm_mpd;		/* BM Pool Depletion  */
231 	u32 reserved0184[0x1F];	/* (0x184 0x1FF) */
232 	u32 fmbm_rstc;		/* Rx Statistics Counters */
233 	u32 fmbm_rfrc;		/* Rx Frame Counter */
234 	u32 fmbm_rfbc;		/* Rx Bad Frames Counter */
235 	u32 fmbm_rlfc;		/* Rx Large Frames Counter */
236 	u32 fmbm_rffc;		/* Rx Filter Frames Counter */
237 	u32 fmbm_rfdc;		/* Rx Frame Discard Counter */
238 	u32 fmbm_rfldec;		/* Rx Frames List DMA Error Counter */
239 	u32 fmbm_rodc;		/* Rx Out of Buffers Discard nntr */
240 	u32 fmbm_rbdc;		/* Rx Buffers Deallocate Counter */
241 	u32 fmbm_rpec;		/* RX Prepare to enqueue Counte */
242 	u32 reserved0224[0x16];	/* (0x224 0x27F) */
243 	u32 fmbm_rpc;		/* Rx Performance Counters */
244 	u32 fmbm_rpcp;		/* Rx Performance Count Parameters */
245 	u32 fmbm_rccn;		/* Rx Cycle Counter */
246 	u32 fmbm_rtuc;		/* Rx Tasks Utilization Counter */
247 	u32 fmbm_rrquc;		/* Rx Receive Queue Utilization cntr */
248 	u32 fmbm_rduc;		/* Rx DMA Utilization Counter */
249 	u32 fmbm_rfuc;		/* Rx FIFO Utilization Counter */
250 	u32 fmbm_rpac;		/* Rx Pause Activation Counter */
251 	u32 reserved02a0[0x18];	/* (0x2A0 0x2FF) */
252 	u32 fmbm_rdcfg[0x3];	/* Rx Debug Configuration */
253 	u32 fmbm_rgpr;		/* Rx General Purpose Register */
254 	u32 reserved0310[0x3a];
255 };
256 
257 /* BMI Tx port register map */
258 struct fman_port_tx_bmi_regs {
259 	u32 fmbm_tcfg;		/* Tx Configuration */
260 	u32 fmbm_tst;		/* Tx Status */
261 	u32 fmbm_tda;		/* Tx DMA attributes */
262 	u32 fmbm_tfp;		/* Tx FIFO Parameters */
263 	u32 fmbm_tfed;		/* Tx Frame End Data */
264 	u32 fmbm_ticp;		/* Tx Internal Context Parameters */
265 	u32 fmbm_tfdne;		/* Tx Frame Dequeue Next Engine. */
266 	u32 fmbm_tfca;		/* Tx Frame Command attribute. */
267 	u32 fmbm_tcfqid;	/* Tx Confirmation Frame Queue ID. */
268 	u32 fmbm_tefqid;	/* Tx Frame Error Queue ID */
269 	u32 fmbm_tfene;		/* Tx Frame Enqueue Next Engine */
270 	u32 fmbm_trlmts;	/* Tx Rate Limiter Scale */
271 	u32 fmbm_trlmt;		/* Tx Rate Limiter */
272 	u32 reserved0034[0x0e];	/* (0x034-0x6c) */
273 	u32 fmbm_tccb;		/* Tx Coarse Classification base */
274 	u32 fmbm_tfne;		/* Tx Frame Next Engine */
275 	u32 fmbm_tpfcm[0x02];
276 	/* Tx Priority based Flow Control (PFC) Mapping */
277 	u32 fmbm_tcmne;		/* Tx Frame Continuous Mode Next Engine */
278 	u32 reserved0080[0x60];	/* (0x080-0x200) */
279 	u32 fmbm_tstc;		/* Tx Statistics Counters */
280 	u32 fmbm_tfrc;		/* Tx Frame Counter */
281 	u32 fmbm_tfdc;		/* Tx Frames Discard Counter */
282 	u32 fmbm_tfledc;	/* Tx Frame len error discard cntr */
283 	u32 fmbm_tfufdc;	/* Tx Frame unsprt frmt discard cntr */
284 	u32 fmbm_tbdc;		/* Tx Buffers Deallocate Counter */
285 	u32 reserved0218[0x1A];	/* (0x218-0x280) */
286 	u32 fmbm_tpc;		/* Tx Performance Counters */
287 	u32 fmbm_tpcp;		/* Tx Performance Count Parameters */
288 	u32 fmbm_tccn;		/* Tx Cycle Counter */
289 	u32 fmbm_ttuc;		/* Tx Tasks Utilization Counter */
290 	u32 fmbm_ttcquc;	/* Tx Transmit conf Q util Counter */
291 	u32 fmbm_tduc;		/* Tx DMA Utilization Counter */
292 	u32 fmbm_tfuc;		/* Tx FIFO Utilization Counter */
293 	u32 reserved029c[16];	/* (0x29C-0x2FF) */
294 	u32 fmbm_tdcfg[0x3];	/* Tx Debug Configuration */
295 	u32 fmbm_tgpr;		/* Tx General Purpose Register */
296 	u32 reserved0310[0x3a]; /* (0x310-0x3FF) */
297 };
298 
299 /* BMI port register map */
300 union fman_port_bmi_regs {
301 	struct fman_port_rx_bmi_regs rx;
302 	struct fman_port_tx_bmi_regs tx;
303 };
304 
305 /* QMI port register map */
306 struct fman_port_qmi_regs {
307 	u32 fmqm_pnc;		/* PortID n Configuration Register */
308 	u32 fmqm_pns;		/* PortID n Status Register */
309 	u32 fmqm_pnts;		/* PortID n Task Status Register */
310 	u32 reserved00c[4];	/* 0xn00C - 0xn01B */
311 	u32 fmqm_pnen;		/* PortID n Enqueue NIA Register */
312 	u32 fmqm_pnetfc;		/* PortID n Enq Total Frame Counter */
313 	u32 reserved024[2];	/* 0xn024 - 0x02B */
314 	u32 fmqm_pndn;		/* PortID n Dequeue NIA Register */
315 	u32 fmqm_pndc;		/* PortID n Dequeue Config Register */
316 	u32 fmqm_pndtfc;		/* PortID n Dequeue tot Frame cntr */
317 	u32 fmqm_pndfdc;		/* PortID n Dequeue FQID Dflt Cntr */
318 	u32 fmqm_pndcc;		/* PortID n Dequeue Confirm Counter */
319 };
320 
321 #define HWP_HXS_COUNT 16
322 #define HWP_HXS_PHE_REPORT 0x00000800
323 #define HWP_HXS_PCAC_PSTAT 0x00000100
324 #define HWP_HXS_PCAC_PSTOP 0x00000001
325 struct fman_port_hwp_regs {
326 	struct {
327 		u32 ssa; /* Soft Sequence Attachment */
328 		u32 lcv; /* Line-up Enable Confirmation Mask */
329 	} pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
330 	u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
331 	u32 fmpr_pcac; /* Configuration Access Control */
332 };
333 
334 /* QMI dequeue prefetch modes */
335 enum fman_port_deq_prefetch {
336 	FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
337 	FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */
338 	FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */
339 };
340 
341 /* A structure for defining FM port resources */
342 struct fman_port_rsrc {
343 	u32 num; /* Committed required resource */
344 	u32 extra; /* Extra (not committed) required resource */
345 };
346 
347 enum fman_port_dma_swap {
348 	FMAN_PORT_DMA_NO_SWAP,	/* No swap, transfer data as is */
349 	FMAN_PORT_DMA_SWAP_LE,
350 	/* The transferred data should be swapped in PPC Little Endian mode */
351 	FMAN_PORT_DMA_SWAP_BE
352 	/* The transferred data should be swapped in Big Endian mode */
353 };
354 
355 /* Default port color */
356 enum fman_port_color {
357 	FMAN_PORT_COLOR_GREEN,	/* Default port color is green */
358 	FMAN_PORT_COLOR_YELLOW,	/* Default port color is yellow */
359 	FMAN_PORT_COLOR_RED,		/* Default port color is red */
360 	FMAN_PORT_COLOR_OVERRIDE	/* Ignore color */
361 };
362 
363 /* QMI dequeue from the SP channel - types */
364 enum fman_port_deq_type {
365 	FMAN_PORT_DEQ_BY_PRI,
366 	/* Priority precedence and Intra-Class scheduling */
367 	FMAN_PORT_DEQ_ACTIVE_FQ,
368 	/* Active FQ precedence and Intra-Class scheduling */
369 	FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
370 	/* Active FQ precedence and override Intra-Class scheduling */
371 };
372 
373 /* External buffer pools configuration */
374 struct fman_port_bpools {
375 	u8 count;			/* Num of pools to set up */
376 	bool counters_enable;		/* Enable allocate counters */
377 	u8 grp_bp_depleted_num;
378 	/* Number of depleted pools - if reached the BMI indicates
379 	 * the MAC to send a pause frame
380 	 */
381 	struct {
382 		u8 bpid;		/* BM pool ID */
383 		u16 size;
384 		/* Pool's size - must be in ascending order */
385 		bool is_backup;
386 		/* If this is a backup pool */
387 		bool grp_bp_depleted;
388 		/* Consider this buffer in multiple pools depletion criteria */
389 		bool single_bp_depleted;
390 		/* Consider this buffer in single pool depletion criteria */
391 	} bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
392 };
393 
394 struct fman_port_cfg {
395 	u32 dflt_fqid;
396 	u32 err_fqid;
397 	u8 deq_sp;
398 	bool deq_high_priority;
399 	enum fman_port_deq_type deq_type;
400 	enum fman_port_deq_prefetch deq_prefetch_option;
401 	u16 deq_byte_cnt;
402 	u8 cheksum_last_bytes_ignore;
403 	u8 rx_cut_end_bytes;
404 	struct fman_buf_pool_depletion buf_pool_depletion;
405 	struct fman_ext_pools ext_buf_pools;
406 	u32 tx_fifo_min_level;
407 	u32 tx_fifo_low_comf_level;
408 	u32 rx_pri_elevation;
409 	u32 rx_fifo_thr;
410 	struct fman_sp_buf_margins buf_margins;
411 	u32 int_buf_start_margin;
412 	struct fman_sp_int_context_data_copy int_context;
413 	u32 discard_mask;
414 	u32 err_mask;
415 	struct fman_buffer_prefix_content buffer_prefix_content;
416 	bool dont_release_buf;
417 
418 	u8 rx_fd_bits;
419 	u32 tx_fifo_deq_pipeline_depth;
420 	bool errata_A006320;
421 	bool excessive_threshold_register;
422 	bool fmbm_tfne_has_features;
423 
424 	enum fman_port_dma_swap dma_swap_data;
425 	enum fman_port_color color;
426 };
427 
428 struct fman_port_rx_pools_params {
429 	u8 num_of_pools;
430 	u16 second_largest_buf_size;
431 	u16 largest_buf_size;
432 };
433 
434 struct fman_port_dts_params {
435 	void __iomem *base_addr;	/* FMan port virtual memory */
436 	enum fman_port_type type;	/* Port type */
437 	u16 speed;			/* Port speed */
438 	u8 id;				/* HW Port Id */
439 	u32 qman_channel_id;		/* QMan channel id (non RX only) */
440 	struct fman *fman;		/* FMan Handle */
441 };
442 
443 struct fman_port {
444 	void *fm;
445 	struct device *dev;
446 	struct fman_rev_info rev_info;
447 	u8 port_id;
448 	enum fman_port_type port_type;
449 	u16 port_speed;
450 
451 	union fman_port_bmi_regs __iomem *bmi_regs;
452 	struct fman_port_qmi_regs __iomem *qmi_regs;
453 	struct fman_port_hwp_regs __iomem *hwp_regs;
454 
455 	struct fman_sp_buffer_offsets buffer_offsets;
456 
457 	u8 internal_buf_offset;
458 	struct fman_ext_pools ext_buf_pools;
459 
460 	u16 max_frame_length;
461 	struct fman_port_rsrc open_dmas;
462 	struct fman_port_rsrc tasks;
463 	struct fman_port_rsrc fifo_bufs;
464 	struct fman_port_rx_pools_params rx_pools_params;
465 
466 	struct fman_port_cfg *cfg;
467 	struct fman_port_dts_params dts_params;
468 
469 	u8 ext_pools_num;
470 	u32 max_port_fifo_size;
471 	u32 max_num_of_ext_pools;
472 	u32 max_num_of_sub_portals;
473 	u32 bm_max_num_of_pools;
474 };
475 
476 static int init_bmi_rx(struct fman_port *port)
477 {
478 	struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx;
479 	struct fman_port_cfg *cfg = port->cfg;
480 	u32 tmp;
481 
482 	/* DMA attributes */
483 	tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
484 	/* Enable write optimization */
485 	tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
486 	iowrite32be(tmp, &regs->fmbm_rda);
487 
488 	/* Rx FIFO parameters */
489 	tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) <<
490 		BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
491 	tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1;
492 	iowrite32be(tmp, &regs->fmbm_rfp);
493 
494 	if (cfg->excessive_threshold_register)
495 		/* always allow access to the extra resources */
496 		iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, &regs->fmbm_reth);
497 
498 	/* Frame end data */
499 	tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
500 		BMI_FRAME_END_CS_IGNORE_SHIFT;
501 	tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) <<
502 		BMI_RX_FRAME_END_CUT_SHIFT;
503 	if (cfg->errata_A006320)
504 		tmp &= 0xffe0ffff;
505 	iowrite32be(tmp, &regs->fmbm_rfed);
506 
507 	/* Internal context parameters */
508 	tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
509 		BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
510 	tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
511 		BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
512 	tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
513 		BMI_IC_SIZE_MASK;
514 	iowrite32be(tmp, &regs->fmbm_ricp);
515 
516 	/* Internal buffer offset */
517 	tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) &
518 		BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT;
519 	iowrite32be(tmp, &regs->fmbm_rim);
520 
521 	/* External buffer margins */
522 	tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
523 		BMI_EXT_BUF_MARG_START_SHIFT;
524 	tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
525 	iowrite32be(tmp, &regs->fmbm_rebm);
526 
527 	/* Frame attributes */
528 	tmp = BMI_CMD_RX_MR_DEF;
529 	tmp |= BMI_CMD_ATTR_ORDER;
530 	tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
531 	/* Synchronization request */
532 	tmp |= BMI_CMD_ATTR_SYNC;
533 
534 	iowrite32be(tmp, &regs->fmbm_rfca);
535 
536 	/* NIA */
537 	tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
538 
539 	tmp |= NIA_ENG_HWP;
540 	iowrite32be(tmp, &regs->fmbm_rfne);
541 
542 	/* Parser Next Engine NIA */
543 	iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, &regs->fmbm_rfpne);
544 
545 	/* Enqueue NIA */
546 	iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
547 
548 	/* Default/error queues */
549 	iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), &regs->fmbm_rfqid);
550 	iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_refqid);
551 
552 	/* Discard/error masks */
553 	iowrite32be(cfg->discard_mask, &regs->fmbm_rfsdm);
554 	iowrite32be(cfg->err_mask, &regs->fmbm_rfsem);
555 
556 	return 0;
557 }
558 
559 static int init_bmi_tx(struct fman_port *port)
560 {
561 	struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx;
562 	struct fman_port_cfg *cfg = port->cfg;
563 	u32 tmp;
564 
565 	/* Tx Configuration register */
566 	tmp = 0;
567 	iowrite32be(tmp, &regs->fmbm_tcfg);
568 
569 	/* DMA attributes */
570 	tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
571 	iowrite32be(tmp, &regs->fmbm_tda);
572 
573 	/* Tx FIFO parameters */
574 	tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) <<
575 		BMI_TX_FIFO_MIN_FILL_SHIFT;
576 	tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) &
577 		BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
578 	tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1;
579 	iowrite32be(tmp, &regs->fmbm_tfp);
580 
581 	/* Frame end data */
582 	tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
583 		BMI_FRAME_END_CS_IGNORE_SHIFT;
584 	iowrite32be(tmp, &regs->fmbm_tfed);
585 
586 	/* Internal context parameters */
587 	tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
588 		BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
589 	tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
590 		BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
591 	tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
592 		BMI_IC_SIZE_MASK;
593 	iowrite32be(tmp, &regs->fmbm_ticp);
594 
595 	/* Frame attributes */
596 	tmp = BMI_CMD_TX_MR_DEF;
597 	tmp |= BMI_CMD_ATTR_ORDER;
598 	tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
599 	iowrite32be(tmp, &regs->fmbm_tfca);
600 
601 	/* Dequeue NIA + enqueue NIA */
602 	iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_tfdne);
603 	iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_tfene);
604 	if (cfg->fmbm_tfne_has_features)
605 		iowrite32be(!cfg->dflt_fqid ?
606 			    BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
607 			    NIA_BMI_AC_FETCH_ALL_FRAME, &regs->fmbm_tfne);
608 	if (!cfg->dflt_fqid && cfg->dont_release_buf) {
609 		iowrite32be(DFLT_FQ_ID, &regs->fmbm_tcfqid);
610 		iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
611 			    &regs->fmbm_tfene);
612 		if (cfg->fmbm_tfne_has_features)
613 			iowrite32be(ioread32be(&regs->fmbm_tfne) & ~BMI_EBD_EN,
614 				    &regs->fmbm_tfne);
615 	}
616 
617 	/* Confirmation/error queues */
618 	if (cfg->dflt_fqid || !cfg->dont_release_buf)
619 		iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, &regs->fmbm_tcfqid);
620 	iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_tefqid);
621 
622 	return 0;
623 }
624 
625 static int init_qmi(struct fman_port *port)
626 {
627 	struct fman_port_qmi_regs __iomem *regs = port->qmi_regs;
628 	struct fman_port_cfg *cfg = port->cfg;
629 	u32 tmp;
630 
631 	/* Rx port configuration */
632 	if (port->port_type == FMAN_PORT_TYPE_RX) {
633 		/* Enqueue NIA */
634 		iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
635 		return 0;
636 	}
637 
638 	/* Continue with Tx port configuration */
639 	if (port->port_type == FMAN_PORT_TYPE_TX) {
640 		/* Enqueue NIA */
641 		iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
642 			    &regs->fmqm_pnen);
643 		/* Dequeue NIA */
644 		iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, &regs->fmqm_pndn);
645 	}
646 
647 	/* Dequeue Configuration register */
648 	tmp = 0;
649 	if (cfg->deq_high_priority)
650 		tmp |= QMI_DEQ_CFG_PRI;
651 
652 	switch (cfg->deq_type) {
653 	case FMAN_PORT_DEQ_BY_PRI:
654 		tmp |= QMI_DEQ_CFG_TYPE1;
655 		break;
656 	case FMAN_PORT_DEQ_ACTIVE_FQ:
657 		tmp |= QMI_DEQ_CFG_TYPE2;
658 		break;
659 	case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
660 		tmp |= QMI_DEQ_CFG_TYPE3;
661 		break;
662 	default:
663 		return -EINVAL;
664 	}
665 
666 	switch (cfg->deq_prefetch_option) {
667 	case FMAN_PORT_DEQ_NO_PREFETCH:
668 		break;
669 	case FMAN_PORT_DEQ_PART_PREFETCH:
670 		tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
671 		break;
672 	case FMAN_PORT_DEQ_FULL_PREFETCH:
673 		tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
674 		break;
675 	default:
676 		return -EINVAL;
677 	}
678 
679 	tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
680 	tmp |= cfg->deq_byte_cnt;
681 	iowrite32be(tmp, &regs->fmqm_pndc);
682 
683 	return 0;
684 }
685 
686 static void stop_port_hwp(struct fman_port *port)
687 {
688 	struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
689 	int cnt = 100;
690 
691 	iowrite32be(HWP_HXS_PCAC_PSTOP, &regs->fmpr_pcac);
692 
693 	while (cnt-- > 0 &&
694 	       (ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
695 		udelay(10);
696 	if (!cnt)
697 		pr_err("Timeout stopping HW Parser\n");
698 }
699 
700 static void start_port_hwp(struct fman_port *port)
701 {
702 	struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
703 	int cnt = 100;
704 
705 	iowrite32be(0, &regs->fmpr_pcac);
706 
707 	while (cnt-- > 0 &&
708 	       !(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
709 		udelay(10);
710 	if (!cnt)
711 		pr_err("Timeout starting HW Parser\n");
712 }
713 
714 static void init_hwp(struct fman_port *port)
715 {
716 	struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
717 	int i;
718 
719 	stop_port_hwp(port);
720 
721 	for (i = 0; i < HWP_HXS_COUNT; i++) {
722 		/* enable HXS error reporting into FD[STATUS] PHE */
723 		iowrite32be(0x00000000, &regs->pmda[i].ssa);
724 		iowrite32be(0xffffffff, &regs->pmda[i].lcv);
725 	}
726 
727 	start_port_hwp(port);
728 }
729 
730 static int init(struct fman_port *port)
731 {
732 	int err;
733 
734 	/* Init BMI registers */
735 	switch (port->port_type) {
736 	case FMAN_PORT_TYPE_RX:
737 		err = init_bmi_rx(port);
738 		if (!err)
739 			init_hwp(port);
740 		break;
741 	case FMAN_PORT_TYPE_TX:
742 		err = init_bmi_tx(port);
743 		break;
744 	default:
745 		return -EINVAL;
746 	}
747 
748 	if (err)
749 		return err;
750 
751 	/* Init QMI registers */
752 	err = init_qmi(port);
753 	if (err)
754 		return err;
755 
756 	return 0;
757 }
758 
759 static int set_bpools(const struct fman_port *port,
760 		      const struct fman_port_bpools *bp)
761 {
762 	u32 __iomem *bp_reg, *bp_depl_reg;
763 	u32 tmp;
764 	u8 i, max_bp_num;
765 	bool grp_depl_used = false, rx_port;
766 
767 	switch (port->port_type) {
768 	case FMAN_PORT_TYPE_RX:
769 		max_bp_num = port->ext_pools_num;
770 		rx_port = true;
771 		bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
772 		bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
773 		break;
774 	default:
775 		return -EINVAL;
776 	}
777 
778 	if (rx_port) {
779 		/* Check buffers are provided in ascending order */
780 		for (i = 0; (i < (bp->count - 1) &&
781 			     (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) {
782 			if (bp->bpool[i].size > bp->bpool[i + 1].size)
783 				return -EINVAL;
784 		}
785 	}
786 
787 	/* Set up external buffers pools */
788 	for (i = 0; i < bp->count; i++) {
789 		tmp = BMI_EXT_BUF_POOL_VALID;
790 		tmp |= ((u32)bp->bpool[i].bpid <<
791 			BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
792 
793 		if (rx_port) {
794 			if (bp->counters_enable)
795 				tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
796 
797 			if (bp->bpool[i].is_backup)
798 				tmp |= BMI_EXT_BUF_POOL_BACKUP;
799 
800 			tmp |= (u32)bp->bpool[i].size;
801 		}
802 
803 		iowrite32be(tmp, &bp_reg[i]);
804 	}
805 
806 	/* Clear unused pools */
807 	for (i = bp->count; i < max_bp_num; i++)
808 		iowrite32be(0, &bp_reg[i]);
809 
810 	/* Pools depletion */
811 	tmp = 0;
812 	for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
813 		if (bp->bpool[i].grp_bp_depleted) {
814 			grp_depl_used = true;
815 			tmp |= 0x80000000 >> i;
816 		}
817 
818 		if (bp->bpool[i].single_bp_depleted)
819 			tmp |= 0x80 >> i;
820 	}
821 
822 	if (grp_depl_used)
823 		tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
824 		    BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
825 
826 	iowrite32be(tmp, bp_depl_reg);
827 	return 0;
828 }
829 
830 static bool is_init_done(struct fman_port_cfg *cfg)
831 {
832 	/* Checks if FMan port driver parameters were initialized */
833 	if (!cfg)
834 		return true;
835 
836 	return false;
837 }
838 
839 static int verify_size_of_fifo(struct fman_port *port)
840 {
841 	u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0;
842 
843 	/* TX Ports */
844 	if (port->port_type == FMAN_PORT_TYPE_TX) {
845 		min_fifo_size_required = (u32)
846 		    (roundup(port->max_frame_length,
847 			     FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS));
848 
849 		min_fifo_size_required +=
850 		    port->cfg->tx_fifo_deq_pipeline_depth *
851 		    FMAN_BMI_FIFO_UNITS;
852 
853 		opt_fifo_size_for_b2b = min_fifo_size_required;
854 
855 		/* Add some margin for back-to-back capability to improve
856 		 * performance, allows the hardware to pipeline new frame dma
857 		 * while the previous frame not yet transmitted.
858 		 */
859 		if (port->port_speed == 10000)
860 			opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
861 		else
862 			opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
863 	}
864 
865 	/* RX Ports */
866 	else if (port->port_type == FMAN_PORT_TYPE_RX) {
867 		if (port->rev_info.major >= 6)
868 			min_fifo_size_required = (u32)
869 			(roundup(port->max_frame_length,
870 				 FMAN_BMI_FIFO_UNITS) +
871 				 (5 * FMAN_BMI_FIFO_UNITS));
872 			/* 4 according to spec + 1 for FOF>0 */
873 		else
874 			min_fifo_size_required = (u32)
875 			(roundup(min(port->max_frame_length,
876 				     port->rx_pools_params.largest_buf_size),
877 				     FMAN_BMI_FIFO_UNITS) +
878 				     (7 * FMAN_BMI_FIFO_UNITS));
879 
880 		opt_fifo_size_for_b2b = min_fifo_size_required;
881 
882 		/* Add some margin for back-to-back capability to improve
883 		 * performance,allows the hardware to pipeline new frame dma
884 		 * while the previous frame not yet transmitted.
885 		 */
886 		if (port->port_speed == 10000)
887 			opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS;
888 		else
889 			opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
890 	}
891 
892 	WARN_ON(min_fifo_size_required <= 0);
893 	WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required);
894 
895 	/* Verify the size  */
896 	if (port->fifo_bufs.num < min_fifo_size_required)
897 		dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
898 			__func__, min_fifo_size_required);
899 	else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
900 		dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
901 			__func__, opt_fifo_size_for_b2b);
902 
903 	return 0;
904 }
905 
906 static int set_ext_buffer_pools(struct fman_port *port)
907 {
908 	struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools;
909 	struct fman_buf_pool_depletion *buf_pool_depletion =
910 	&port->cfg->buf_pool_depletion;
911 	u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM];
912 	u16 sizes_array[BM_MAX_NUM_OF_POOLS];
913 	int i = 0, j = 0, err;
914 	struct fman_port_bpools bpools;
915 
916 	memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM);
917 	memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS);
918 	memcpy(&port->ext_buf_pools, ext_buf_pools,
919 	       sizeof(struct fman_ext_pools));
920 
921 	fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools,
922 							ordered_array,
923 							sizes_array);
924 
925 	memset(&bpools, 0, sizeof(struct fman_port_bpools));
926 	bpools.count = ext_buf_pools->num_of_pools_used;
927 	bpools.counters_enable = true;
928 	for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
929 		bpools.bpool[i].bpid = ordered_array[i];
930 		bpools.bpool[i].size = sizes_array[ordered_array[i]];
931 	}
932 
933 	/* save pools parameters for later use */
934 	port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
935 	port->rx_pools_params.largest_buf_size =
936 	    sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
937 	port->rx_pools_params.second_largest_buf_size =
938 	    sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
939 
940 	/* FMBM_RMPD reg. - pool depletion */
941 	if (buf_pool_depletion->pools_grp_mode_enable) {
942 		bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools;
943 		for (i = 0; i < port->bm_max_num_of_pools; i++) {
944 			if (buf_pool_depletion->pools_to_consider[i]) {
945 				for (j = 0; j < ext_buf_pools->
946 				     num_of_pools_used; j++) {
947 					if (i == ordered_array[j]) {
948 						bpools.bpool[j].
949 						    grp_bp_depleted = true;
950 						break;
951 					}
952 				}
953 			}
954 		}
955 	}
956 
957 	if (buf_pool_depletion->single_pool_mode_enable) {
958 		for (i = 0; i < port->bm_max_num_of_pools; i++) {
959 			if (buf_pool_depletion->
960 			    pools_to_consider_for_single_mode[i]) {
961 				for (j = 0; j < ext_buf_pools->
962 				     num_of_pools_used; j++) {
963 					if (i == ordered_array[j]) {
964 						bpools.bpool[j].
965 						    single_bp_depleted = true;
966 						break;
967 					}
968 				}
969 			}
970 		}
971 	}
972 
973 	err = set_bpools(port, &bpools);
974 	if (err != 0) {
975 		dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
976 		return -EINVAL;
977 	}
978 
979 	return 0;
980 }
981 
982 static int init_low_level_driver(struct fman_port *port)
983 {
984 	struct fman_port_cfg *cfg = port->cfg;
985 	u32 tmp_val;
986 
987 	switch (port->port_type) {
988 	case FMAN_PORT_TYPE_RX:
989 		cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
990 		break;
991 	default:
992 		break;
993 	}
994 
995 	tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ?
996 		(port->internal_buf_offset / OFFSET_UNITS + 1) :
997 		(port->internal_buf_offset / OFFSET_UNITS));
998 	port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS);
999 	port->cfg->int_buf_start_margin = port->internal_buf_offset;
1000 
1001 	if (init(port) != 0) {
1002 		dev_err(port->dev, "%s: fman port initialization failed\n",
1003 			__func__);
1004 		return -ENODEV;
1005 	}
1006 
1007 	/* The code bellow is a trick so the FM will not release the buffer
1008 	 * to BM nor will try to enqueue the frame to QM
1009 	 */
1010 	if (port->port_type == FMAN_PORT_TYPE_TX) {
1011 		if (!cfg->dflt_fqid && cfg->dont_release_buf) {
1012 			/* override fmbm_tcfqid 0 with a false non-0 value.
1013 			 * This will force FM to act according to tfene.
1014 			 * Otherwise, if fmbm_tcfqid is 0 the FM will release
1015 			 * buffers to BM regardless of fmbm_tfene
1016 			 */
1017 			iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
1018 			iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
1019 				    &port->bmi_regs->tx.fmbm_tfene);
1020 		}
1021 	}
1022 
1023 	return 0;
1024 }
1025 
1026 static int fill_soc_specific_params(struct fman_port *port)
1027 {
1028 	u32 bmi_max_fifo_size;
1029 
1030 	bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm);
1031 	port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size);
1032 	port->bm_max_num_of_pools = 64;
1033 
1034 	/* P4080 - Major 2
1035 	 * P2041/P3041/P5020/P5040 - Major 3
1036 	 * Tx/Bx - Major 6
1037 	 */
1038 	switch (port->rev_info.major) {
1039 	case 2:
1040 	case 3:
1041 		port->max_num_of_ext_pools		= 4;
1042 		port->max_num_of_sub_portals		= 12;
1043 		break;
1044 
1045 	case 6:
1046 		port->max_num_of_ext_pools		= 8;
1047 		port->max_num_of_sub_portals		= 16;
1048 		break;
1049 
1050 	default:
1051 		dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
1052 		return -EINVAL;
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type,
1059 					    u16 speed)
1060 {
1061 	switch (type) {
1062 	case FMAN_PORT_TYPE_RX:
1063 	case FMAN_PORT_TYPE_TX:
1064 		switch (speed) {
1065 		case 10000:
1066 			return 4;
1067 		case 1000:
1068 			if (major >= 6)
1069 				return 2;
1070 			else
1071 				return 1;
1072 		default:
1073 			return 0;
1074 		}
1075 	default:
1076 		return 0;
1077 	}
1078 }
1079 
1080 static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type,
1081 				 u16 speed)
1082 {
1083 	switch (type) {
1084 	case FMAN_PORT_TYPE_RX:
1085 	case FMAN_PORT_TYPE_TX:
1086 		switch (speed) {
1087 		case 10000:
1088 			return 16;
1089 		case 1000:
1090 			if (major >= 6)
1091 				return 4;
1092 			else
1093 				return 3;
1094 		default:
1095 			return 0;
1096 		}
1097 	default:
1098 		return 0;
1099 	}
1100 }
1101 
1102 static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type,
1103 				       u16 speed)
1104 {
1105 	switch (type) {
1106 	case FMAN_PORT_TYPE_RX:
1107 		/* FMan V3 */
1108 		if (major >= 6)
1109 			return 0;
1110 
1111 		/* FMan V2 */
1112 		if (speed == 10000)
1113 			return 8;
1114 		else
1115 			return 2;
1116 	case FMAN_PORT_TYPE_TX:
1117 	default:
1118 		return 0;
1119 	}
1120 }
1121 
1122 static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type,
1123 				     u16 speed)
1124 {
1125 	int val;
1126 
1127 	if (major >= 6) {
1128 		switch (type) {
1129 		case FMAN_PORT_TYPE_TX:
1130 			if (speed == 10000)
1131 				val = 12;
1132 			else
1133 				val = 3;
1134 			break;
1135 		case FMAN_PORT_TYPE_RX:
1136 			if (speed == 10000)
1137 				val = 8;
1138 			else
1139 				val = 2;
1140 			break;
1141 		default:
1142 			return 0;
1143 		}
1144 	} else {
1145 		switch (type) {
1146 		case FMAN_PORT_TYPE_TX:
1147 		case FMAN_PORT_TYPE_RX:
1148 			if (speed == 10000)
1149 				val = 8;
1150 			else
1151 				val = 1;
1152 			break;
1153 		default:
1154 			val = 0;
1155 		}
1156 	}
1157 
1158 	return val;
1159 }
1160 
1161 static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type,
1162 					   u16 speed)
1163 {
1164 	/* FMan V3 */
1165 	if (major >= 6)
1166 		return 0;
1167 
1168 	/* FMan V2 */
1169 	switch (type) {
1170 	case FMAN_PORT_TYPE_RX:
1171 	case FMAN_PORT_TYPE_TX:
1172 		if (speed == 10000)
1173 			return 8;
1174 		else
1175 			return 1;
1176 	default:
1177 		return 0;
1178 	}
1179 }
1180 
1181 static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type,
1182 				     u16 speed)
1183 {
1184 	int val;
1185 
1186 	if (major >= 6) {
1187 		switch (type) {
1188 		case FMAN_PORT_TYPE_TX:
1189 			if (speed == 10000)
1190 				val = 64;
1191 			else
1192 				val = 50;
1193 			break;
1194 		case FMAN_PORT_TYPE_RX:
1195 			if (speed == 10000)
1196 				val = 96;
1197 			else
1198 				val = 50;
1199 			break;
1200 		default:
1201 			val = 0;
1202 		}
1203 	} else {
1204 		switch (type) {
1205 		case FMAN_PORT_TYPE_TX:
1206 			if (speed == 10000)
1207 				val = 48;
1208 			else
1209 				val = 44;
1210 			break;
1211 		case FMAN_PORT_TYPE_RX:
1212 			if (speed == 10000)
1213 				val = 48;
1214 			else
1215 				val = 45;
1216 			break;
1217 		default:
1218 			val = 0;
1219 		}
1220 	}
1221 
1222 	return val;
1223 }
1224 
1225 static void set_dflt_cfg(struct fman_port *port,
1226 			 struct fman_port_params *port_params)
1227 {
1228 	struct fman_port_cfg *cfg = port->cfg;
1229 
1230 	cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
1231 	cfg->color = FMAN_PORT_COLOR_GREEN;
1232 	cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
1233 	cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
1234 	cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
1235 	cfg->tx_fifo_low_comf_level = (5 * 1024);
1236 	cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
1237 	cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
1238 	cfg->tx_fifo_deq_pipeline_depth =
1239 		BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
1240 	cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type);
1241 
1242 	cfg->rx_pri_elevation =
1243 		DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size);
1244 	port->cfg->rx_fifo_thr =
1245 		DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major,
1246 					    port->max_port_fifo_size);
1247 
1248 	if ((port->rev_info.major == 6) &&
1249 	    ((port->rev_info.minor == 0) || (port->rev_info.minor == 3)))
1250 		cfg->errata_A006320 = true;
1251 
1252 	/* Excessive Threshold register - exists for pre-FMv3 chips only */
1253 	if (port->rev_info.major < 6)
1254 		cfg->excessive_threshold_register = true;
1255 	else
1256 		cfg->fmbm_tfne_has_features = true;
1257 
1258 	cfg->buffer_prefix_content.data_align =
1259 		DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
1260 }
1261 
1262 static void set_rx_dflt_cfg(struct fman_port *port,
1263 			    struct fman_port_params *port_params)
1264 {
1265 	port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD;
1266 
1267 	memcpy(&port->cfg->ext_buf_pools,
1268 	       &port_params->specific_params.rx_params.ext_buf_pools,
1269 	       sizeof(struct fman_ext_pools));
1270 	port->cfg->err_fqid =
1271 		port_params->specific_params.rx_params.err_fqid;
1272 	port->cfg->dflt_fqid =
1273 		port_params->specific_params.rx_params.dflt_fqid;
1274 }
1275 
1276 static void set_tx_dflt_cfg(struct fman_port *port,
1277 			    struct fman_port_params *port_params,
1278 			    struct fman_port_dts_params *dts_params)
1279 {
1280 	port->cfg->tx_fifo_deq_pipeline_depth =
1281 		get_dflt_fifo_deq_pipeline_depth(port->rev_info.major,
1282 						 port->port_type,
1283 						 port->port_speed);
1284 	port->cfg->err_fqid =
1285 		port_params->specific_params.non_rx_params.err_fqid;
1286 	port->cfg->deq_sp =
1287 		(u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK);
1288 	port->cfg->dflt_fqid =
1289 		port_params->specific_params.non_rx_params.dflt_fqid;
1290 	port->cfg->deq_high_priority = true;
1291 }
1292 
1293 /**
1294  * fman_port_config
1295  * @port:	Pointer to the port structure
1296  * @params:	Pointer to data structure of parameters
1297  *
1298  * Creates a descriptor for the FM PORT module.
1299  * The routine returns a pointer to the FM PORT object.
1300  * This descriptor must be passed as first parameter to all other FM PORT
1301  * function calls.
1302  * No actual initialization or configuration of FM hardware is done by this
1303  * routine.
1304  *
1305  * Return: 0 on success; Error code otherwise.
1306  */
1307 int fman_port_config(struct fman_port *port, struct fman_port_params *params)
1308 {
1309 	void __iomem *base_addr = port->dts_params.base_addr;
1310 	int err;
1311 
1312 	/* Allocate the FM driver's parameters structure */
1313 	port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
1314 	if (!port->cfg)
1315 		return -EINVAL;
1316 
1317 	/* Initialize FM port parameters which will be kept by the driver */
1318 	port->port_type = port->dts_params.type;
1319 	port->port_speed = port->dts_params.speed;
1320 	port->port_id = port->dts_params.id;
1321 	port->fm = port->dts_params.fman;
1322 	port->ext_pools_num = (u8)8;
1323 
1324 	/* get FM revision */
1325 	fman_get_revision(port->fm, &port->rev_info);
1326 
1327 	err = fill_soc_specific_params(port);
1328 	if (err)
1329 		goto err_port_cfg;
1330 
1331 	switch (port->port_type) {
1332 	case FMAN_PORT_TYPE_RX:
1333 		set_rx_dflt_cfg(port, params);
1334 	case FMAN_PORT_TYPE_TX:
1335 		set_tx_dflt_cfg(port, params, &port->dts_params);
1336 	default:
1337 		set_dflt_cfg(port, params);
1338 	}
1339 
1340 	/* Continue with other parameters */
1341 	/* set memory map pointers */
1342 	port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
1343 	port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
1344 	port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
1345 
1346 	port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
1347 	/* resource distribution. */
1348 
1349 	port->fifo_bufs.num =
1350 	get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type,
1351 				  port->port_speed) * FMAN_BMI_FIFO_UNITS;
1352 	port->fifo_bufs.extra =
1353 	DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS;
1354 
1355 	port->open_dmas.num =
1356 	get_dflt_num_of_open_dmas(port->rev_info.major,
1357 				  port->port_type, port->port_speed);
1358 	port->open_dmas.extra =
1359 	get_dflt_extra_num_of_open_dmas(port->rev_info.major,
1360 					port->port_type, port->port_speed);
1361 	port->tasks.num =
1362 	get_dflt_num_of_tasks(port->rev_info.major,
1363 			      port->port_type, port->port_speed);
1364 	port->tasks.extra =
1365 	get_dflt_extra_num_of_tasks(port->rev_info.major,
1366 				    port->port_type, port->port_speed);
1367 
1368 	/* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata
1369 	 * workaround
1370 	 */
1371 	if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) &&
1372 	    (((port->port_type == FMAN_PORT_TYPE_TX) &&
1373 	    (port->port_speed == 1000)))) {
1374 		port->open_dmas.num = 16;
1375 		port->open_dmas.extra = 0;
1376 	}
1377 
1378 	if (port->rev_info.major >= 6 &&
1379 	    port->port_type == FMAN_PORT_TYPE_TX &&
1380 	    port->port_speed == 1000) {
1381 		/* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
1382 		 * workaround
1383 		 */
1384 		if (port->rev_info.major >= 6) {
1385 			u32 reg;
1386 
1387 			reg = 0x00001013;
1388 			iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
1389 		}
1390 	}
1391 
1392 	return 0;
1393 
1394 err_port_cfg:
1395 	kfree(port->cfg);
1396 	return -EINVAL;
1397 }
1398 EXPORT_SYMBOL(fman_port_config);
1399 
1400 /**
1401  * fman_port_init
1402  * port:	A pointer to a FM Port module.
1403  * Initializes the FM PORT module by defining the software structure and
1404  * configuring the hardware registers.
1405  *
1406  * Return: 0 on success; Error code otherwise.
1407  */
1408 int fman_port_init(struct fman_port *port)
1409 {
1410 	struct fman_port_cfg *cfg;
1411 	int err;
1412 	struct fman_port_init_params params;
1413 
1414 	if (is_init_done(port->cfg))
1415 		return -EINVAL;
1416 
1417 	err = fman_sp_build_buffer_struct(&port->cfg->int_context,
1418 					  &port->cfg->buffer_prefix_content,
1419 					  &port->cfg->buf_margins,
1420 					  &port->buffer_offsets,
1421 					  &port->internal_buf_offset);
1422 	if (err)
1423 		return err;
1424 
1425 	cfg = port->cfg;
1426 
1427 	if (port->port_type == FMAN_PORT_TYPE_RX) {
1428 		/* Call the external Buffer routine which also checks fifo
1429 		 * size and updates it if necessary
1430 		 */
1431 		/* define external buffer pools and pool depletion */
1432 		err = set_ext_buffer_pools(port);
1433 		if (err)
1434 			return err;
1435 		/* check if the largest external buffer pool is large enough */
1436 		if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
1437 		    cfg->buf_margins.end_margins >
1438 		    port->rx_pools_params.largest_buf_size) {
1439 			dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
1440 				__func__, cfg->buf_margins.start_margins,
1441 				cfg->buf_margins.end_margins,
1442 				port->rx_pools_params.largest_buf_size);
1443 			return -EINVAL;
1444 		}
1445 	}
1446 
1447 	/* Call FM module routine for communicating parameters */
1448 	memset(&params, 0, sizeof(params));
1449 	params.port_id = port->port_id;
1450 	params.port_type = port->port_type;
1451 	params.port_speed = port->port_speed;
1452 	params.num_of_tasks = (u8)port->tasks.num;
1453 	params.num_of_extra_tasks = (u8)port->tasks.extra;
1454 	params.num_of_open_dmas = (u8)port->open_dmas.num;
1455 	params.num_of_extra_open_dmas = (u8)port->open_dmas.extra;
1456 
1457 	if (port->fifo_bufs.num) {
1458 		err = verify_size_of_fifo(port);
1459 		if (err)
1460 			return err;
1461 	}
1462 	params.size_of_fifo = port->fifo_bufs.num;
1463 	params.extra_size_of_fifo = port->fifo_bufs.extra;
1464 	params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth;
1465 	params.max_frame_length = port->max_frame_length;
1466 
1467 	err = fman_set_port_params(port->fm, &params);
1468 	if (err)
1469 		return err;
1470 
1471 	err = init_low_level_driver(port);
1472 	if (err)
1473 		return err;
1474 
1475 	kfree(port->cfg);
1476 	port->cfg = NULL;
1477 
1478 	return 0;
1479 }
1480 EXPORT_SYMBOL(fman_port_init);
1481 
1482 /**
1483  * fman_port_cfg_buf_prefix_content
1484  * @port			A pointer to a FM Port module.
1485  * @buffer_prefix_content	A structure of parameters describing
1486  *				the structure of the buffer.
1487  *				Out parameter:
1488  *				Start margin - offset of data from
1489  *				start of external buffer.
1490  * Defines the structure, size and content of the application buffer.
1491  * The prefix, in Tx ports, if 'pass_prs_result', the application should set
1492  * a value to their offsets in the prefix of the FM will save the first
1493  * 'priv_data_size', than, depending on 'pass_prs_result' and
1494  * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
1495  * (in this order), to the application buffer, and to offset.
1496  * Calling this routine changes the buffer margins definitions in the internal
1497  * driver data base from its default configuration:
1498  * Data size:  [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
1499  * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
1500  * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
1501  * May be used for all ports
1502  *
1503  * Allowed only following fman_port_config() and before fman_port_init().
1504  *
1505  * Return: 0 on success; Error code otherwise.
1506  */
1507 int fman_port_cfg_buf_prefix_content(struct fman_port *port,
1508 				     struct fman_buffer_prefix_content *
1509 				     buffer_prefix_content)
1510 {
1511 	if (is_init_done(port->cfg))
1512 		return -EINVAL;
1513 
1514 	memcpy(&port->cfg->buffer_prefix_content,
1515 	       buffer_prefix_content,
1516 	       sizeof(struct fman_buffer_prefix_content));
1517 	/* if data_align was not initialized by user,
1518 	 * we return to driver's default
1519 	 */
1520 	if (!port->cfg->buffer_prefix_content.data_align)
1521 		port->cfg->buffer_prefix_content.data_align =
1522 		DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
1523 
1524 	return 0;
1525 }
1526 EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
1527 
1528 /**
1529  * fman_port_disable
1530  * port:	A pointer to a FM Port module.
1531  *
1532  * Gracefully disable an FM port. The port will not start new	tasks after all
1533  * tasks associated with the port are terminated.
1534  *
1535  * This is a blocking routine, it returns after port is gracefully stopped,
1536  * i.e. the port will not except new frames, but it will finish all frames
1537  * or tasks which were already began.
1538  * Allowed only following fman_port_init().
1539  *
1540  * Return: 0 on success; Error code otherwise.
1541  */
1542 int fman_port_disable(struct fman_port *port)
1543 {
1544 	u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
1545 	u32 tmp;
1546 	bool rx_port, failure = false;
1547 	int count;
1548 
1549 	if (!is_init_done(port->cfg))
1550 		return -EINVAL;
1551 
1552 	switch (port->port_type) {
1553 	case FMAN_PORT_TYPE_RX:
1554 		bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
1555 		bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
1556 		rx_port = true;
1557 		break;
1558 	case FMAN_PORT_TYPE_TX:
1559 		bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
1560 		bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
1561 		rx_port = false;
1562 		break;
1563 	default:
1564 		return -EINVAL;
1565 	}
1566 
1567 	/* Disable QMI */
1568 	if (!rx_port) {
1569 		tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
1570 		iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
1571 
1572 		/* Wait for QMI to finish FD handling */
1573 		count = 100;
1574 		do {
1575 			udelay(10);
1576 			tmp = ioread32be(&port->qmi_regs->fmqm_pns);
1577 		} while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
1578 
1579 		if (count == 0) {
1580 			/* Timeout */
1581 			failure = true;
1582 		}
1583 	}
1584 
1585 	/* Disable BMI */
1586 	tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
1587 	iowrite32be(tmp, bmi_cfg_reg);
1588 
1589 	/* Wait for graceful stop end */
1590 	count = 500;
1591 	do {
1592 		udelay(10);
1593 		tmp = ioread32be(bmi_status_reg);
1594 	} while ((tmp & BMI_PORT_STATUS_BSY) && --count);
1595 
1596 	if (count == 0) {
1597 		/* Timeout */
1598 		failure = true;
1599 	}
1600 
1601 	if (failure)
1602 		dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
1603 			__func__,  port->port_id);
1604 
1605 	return 0;
1606 }
1607 EXPORT_SYMBOL(fman_port_disable);
1608 
1609 /**
1610  * fman_port_enable
1611  * port:	A pointer to a FM Port module.
1612  *
1613  * A runtime routine provided to allow disable/enable of port.
1614  *
1615  * Allowed only following fman_port_init().
1616  *
1617  * Return: 0 on success; Error code otherwise.
1618  */
1619 int fman_port_enable(struct fman_port *port)
1620 {
1621 	u32 __iomem *bmi_cfg_reg;
1622 	u32 tmp;
1623 	bool rx_port;
1624 
1625 	if (!is_init_done(port->cfg))
1626 		return -EINVAL;
1627 
1628 	switch (port->port_type) {
1629 	case FMAN_PORT_TYPE_RX:
1630 		bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
1631 		rx_port = true;
1632 		break;
1633 	case FMAN_PORT_TYPE_TX:
1634 		bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
1635 		rx_port = false;
1636 		break;
1637 	default:
1638 		return -EINVAL;
1639 	}
1640 
1641 	/* Enable QMI */
1642 	if (!rx_port) {
1643 		tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
1644 		iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
1645 	}
1646 
1647 	/* Enable BMI */
1648 	tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
1649 	iowrite32be(tmp, bmi_cfg_reg);
1650 
1651 	return 0;
1652 }
1653 EXPORT_SYMBOL(fman_port_enable);
1654 
1655 /**
1656  * fman_port_bind
1657  * dev:		FMan Port OF device pointer
1658  *
1659  * Bind to a specific FMan Port.
1660  *
1661  * Allowed only after the port was created.
1662  *
1663  * Return: A pointer to the FMan port device.
1664  */
1665 struct fman_port *fman_port_bind(struct device *dev)
1666 {
1667 	return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
1668 }
1669 EXPORT_SYMBOL(fman_port_bind);
1670 
1671 /**
1672  * fman_port_get_qman_channel_id
1673  * port:	Pointer to the FMan port devuce
1674  *
1675  * Get the QMan channel ID for the specific port
1676  *
1677  * Return: QMan channel ID
1678  */
1679 u32 fman_port_get_qman_channel_id(struct fman_port *port)
1680 {
1681 	return port->dts_params.qman_channel_id;
1682 }
1683 EXPORT_SYMBOL(fman_port_get_qman_channel_id);
1684 
1685 static int fman_port_probe(struct platform_device *of_dev)
1686 {
1687 	struct fman_port *port;
1688 	struct fman *fman;
1689 	struct device_node *fm_node, *port_node;
1690 	struct resource res;
1691 	struct resource *dev_res;
1692 	u32 val;
1693 	int err = 0, lenp;
1694 	enum fman_port_type port_type;
1695 	u16 port_speed;
1696 	u8 port_id;
1697 
1698 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1699 	if (!port)
1700 		return -ENOMEM;
1701 
1702 	port->dev = &of_dev->dev;
1703 
1704 	port_node = of_node_get(of_dev->dev.of_node);
1705 
1706 	/* Get the FM node */
1707 	fm_node = of_get_parent(port_node);
1708 	if (!fm_node) {
1709 		dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
1710 		err = -ENODEV;
1711 		goto return_err;
1712 	}
1713 
1714 	fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
1715 	of_node_put(fm_node);
1716 	if (!fman) {
1717 		err = -EINVAL;
1718 		goto return_err;
1719 	}
1720 
1721 	err = of_property_read_u32(port_node, "cell-index", &val);
1722 	if (err) {
1723 		dev_err(port->dev, "%s: reading cell-index for %s failed\n",
1724 			__func__, port_node->full_name);
1725 		err = -EINVAL;
1726 		goto return_err;
1727 	}
1728 	port_id = (u8)val;
1729 	port->dts_params.id = port_id;
1730 
1731 	if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
1732 		port_type = FMAN_PORT_TYPE_TX;
1733 		port_speed = 1000;
1734 		if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
1735 			port_speed = 10000;
1736 
1737 	} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
1738 		if (port_id >= TX_10G_PORT_BASE)
1739 			port_speed = 10000;
1740 		else
1741 			port_speed = 1000;
1742 		port_type = FMAN_PORT_TYPE_TX;
1743 
1744 	} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
1745 		port_type = FMAN_PORT_TYPE_RX;
1746 		port_speed = 1000;
1747 		if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
1748 			port_speed = 10000;
1749 
1750 	} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
1751 		if (port_id >= RX_10G_PORT_BASE)
1752 			port_speed = 10000;
1753 		else
1754 			port_speed = 1000;
1755 		port_type = FMAN_PORT_TYPE_RX;
1756 
1757 	}  else {
1758 		dev_err(port->dev, "%s: Illegal port type\n", __func__);
1759 		err = -EINVAL;
1760 		goto return_err;
1761 	}
1762 
1763 	port->dts_params.type = port_type;
1764 	port->dts_params.speed = port_speed;
1765 
1766 	if (port_type == FMAN_PORT_TYPE_TX) {
1767 		u32 qman_channel_id;
1768 
1769 		qman_channel_id = fman_get_qman_channel_id(fman, port_id);
1770 		if (qman_channel_id == 0) {
1771 			dev_err(port->dev, "%s: incorrect qman-channel-id\n",
1772 				__func__);
1773 			err = -EINVAL;
1774 			goto return_err;
1775 		}
1776 		port->dts_params.qman_channel_id = qman_channel_id;
1777 	}
1778 
1779 	err = of_address_to_resource(port_node, 0, &res);
1780 	if (err < 0) {
1781 		dev_err(port->dev, "%s: of_address_to_resource() failed\n",
1782 			__func__);
1783 		err = -ENOMEM;
1784 		goto return_err;
1785 	}
1786 
1787 	port->dts_params.fman = fman;
1788 
1789 	of_node_put(port_node);
1790 
1791 	dev_res = __devm_request_region(port->dev, &res, res.start,
1792 					resource_size(&res), "fman-port");
1793 	if (!dev_res) {
1794 		dev_err(port->dev, "%s: __devm_request_region() failed\n",
1795 			__func__);
1796 		err = -EINVAL;
1797 		goto free_port;
1798 	}
1799 
1800 	port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
1801 						  resource_size(&res));
1802 	if (!port->dts_params.base_addr)
1803 		dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
1804 
1805 	dev_set_drvdata(&of_dev->dev, port);
1806 
1807 	return 0;
1808 
1809 return_err:
1810 	of_node_put(port_node);
1811 free_port:
1812 	kfree(port);
1813 	return err;
1814 }
1815 
1816 static const struct of_device_id fman_port_match[] = {
1817 	{.compatible = "fsl,fman-v3-port-rx"},
1818 	{.compatible = "fsl,fman-v2-port-rx"},
1819 	{.compatible = "fsl,fman-v3-port-tx"},
1820 	{.compatible = "fsl,fman-v2-port-tx"},
1821 	{}
1822 };
1823 
1824 MODULE_DEVICE_TABLE(of, fman_port_match);
1825 
1826 static struct platform_driver fman_port_driver = {
1827 	.driver = {
1828 		.name = "fsl-fman-port",
1829 		.of_match_table = fman_port_match,
1830 	},
1831 	.probe = fman_port_probe,
1832 };
1833 
1834 static int __init fman_port_load(void)
1835 {
1836 	int err;
1837 
1838 	pr_debug("FSL DPAA FMan driver\n");
1839 
1840 	err = platform_driver_register(&fman_port_driver);
1841 	if (err < 0)
1842 		pr_err("Error, platform_driver_register() = %d\n", err);
1843 
1844 	return err;
1845 }
1846 module_init(fman_port_load);
1847 
1848 static void __exit fman_port_unload(void)
1849 {
1850 	platform_driver_unregister(&fman_port_driver);
1851 }
1852 module_exit(fman_port_unload);
1853 
1854 MODULE_LICENSE("Dual BSD/GPL");
1855 MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
1856