xref: /openbmc/u-boot/drivers/net/pfe_eth/pfe_hw.c (revision a4a40437)
1*a4a40437SCalvin Johnson /*
2*a4a40437SCalvin Johnson  * Copyright 2015-2016 Freescale Semiconductor, Inc.
3*a4a40437SCalvin Johnson  * Copyright 2017 NXP
4*a4a40437SCalvin Johnson  *
5*a4a40437SCalvin Johnson  * SPDX-License-Identifier:GPL-2.0+
6*a4a40437SCalvin Johnson  */
7*a4a40437SCalvin Johnson #include <net/pfe_eth/pfe_eth.h>
8*a4a40437SCalvin Johnson #include <net/pfe_eth/pfe/pfe_hw.h>
9*a4a40437SCalvin Johnson 
10*a4a40437SCalvin Johnson static struct pe_info pe[MAX_PE];
11*a4a40437SCalvin Johnson 
12*a4a40437SCalvin Johnson /*
13*a4a40437SCalvin Johnson  * Initializes the PFE library.
14*a4a40437SCalvin Johnson  * Must be called before using any of the library functions.
15*a4a40437SCalvin Johnson  */
16*a4a40437SCalvin Johnson void pfe_lib_init(void)
17*a4a40437SCalvin Johnson {
18*a4a40437SCalvin Johnson 	int pfe_pe_id;
19*a4a40437SCalvin Johnson 
20*a4a40437SCalvin Johnson 	for (pfe_pe_id = CLASS0_ID; pfe_pe_id <= CLASS_MAX_ID; pfe_pe_id++) {
21*a4a40437SCalvin Johnson 		pe[pfe_pe_id].dmem_base_addr =
22*a4a40437SCalvin Johnson 			(u32)CLASS_DMEM_BASE_ADDR(pfe_pe_id);
23*a4a40437SCalvin Johnson 		pe[pfe_pe_id].pmem_base_addr =
24*a4a40437SCalvin Johnson 			(u32)CLASS_IMEM_BASE_ADDR(pfe_pe_id);
25*a4a40437SCalvin Johnson 		pe[pfe_pe_id].pmem_size = (u32)CLASS_IMEM_SIZE;
26*a4a40437SCalvin Johnson 		pe[pfe_pe_id].mem_access_wdata =
27*a4a40437SCalvin Johnson 			(void *)CLASS_MEM_ACCESS_WDATA;
28*a4a40437SCalvin Johnson 		pe[pfe_pe_id].mem_access_addr = (void *)CLASS_MEM_ACCESS_ADDR;
29*a4a40437SCalvin Johnson 		pe[pfe_pe_id].mem_access_rdata = (void *)CLASS_MEM_ACCESS_RDATA;
30*a4a40437SCalvin Johnson 	}
31*a4a40437SCalvin Johnson 
32*a4a40437SCalvin Johnson 	for (pfe_pe_id = TMU0_ID; pfe_pe_id <= TMU_MAX_ID; pfe_pe_id++) {
33*a4a40437SCalvin Johnson 		if (pfe_pe_id == TMU2_ID)
34*a4a40437SCalvin Johnson 			continue;
35*a4a40437SCalvin Johnson 		pe[pfe_pe_id].dmem_base_addr =
36*a4a40437SCalvin Johnson 			(u32)TMU_DMEM_BASE_ADDR(pfe_pe_id - TMU0_ID);
37*a4a40437SCalvin Johnson 		pe[pfe_pe_id].pmem_base_addr =
38*a4a40437SCalvin Johnson 			(u32)TMU_IMEM_BASE_ADDR(pfe_pe_id - TMU0_ID);
39*a4a40437SCalvin Johnson 		pe[pfe_pe_id].pmem_size = (u32)TMU_IMEM_SIZE;
40*a4a40437SCalvin Johnson 		pe[pfe_pe_id].mem_access_wdata = (void *)TMU_MEM_ACCESS_WDATA;
41*a4a40437SCalvin Johnson 		pe[pfe_pe_id].mem_access_addr = (void *)TMU_MEM_ACCESS_ADDR;
42*a4a40437SCalvin Johnson 		pe[pfe_pe_id].mem_access_rdata = (void *)TMU_MEM_ACCESS_RDATA;
43*a4a40437SCalvin Johnson 	}
44*a4a40437SCalvin Johnson }
45*a4a40437SCalvin Johnson 
46*a4a40437SCalvin Johnson /*
47*a4a40437SCalvin Johnson  * Writes a buffer to PE internal memory from the host
48*a4a40437SCalvin Johnson  * through indirect access registers.
49*a4a40437SCalvin Johnson  *
50*a4a40437SCalvin Johnson  * @param[in] id	       PE identification (CLASS0_ID, ..., TMU0_ID,
51*a4a40437SCalvin Johnson  *				..., UTIL_ID)
52*a4a40437SCalvin Johnson  * @param[in] mem_access_addr	DMEM destination address (must be 32bit
53*a4a40437SCalvin Johnson  *				aligned)
54*a4a40437SCalvin Johnson  * @param[in] src		Buffer source address
55*a4a40437SCalvin Johnson  * @param[in] len		Number of bytes to copy
56*a4a40437SCalvin Johnson  */
57*a4a40437SCalvin Johnson static void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src,
58*a4a40437SCalvin Johnson 			       unsigned int len)
59*a4a40437SCalvin Johnson {
60*a4a40437SCalvin Johnson 	u32 offset = 0, val, addr;
61*a4a40437SCalvin Johnson 	unsigned int len32 = len >> 2;
62*a4a40437SCalvin Johnson 	int i;
63*a4a40437SCalvin Johnson 
64*a4a40437SCalvin Johnson 	addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
65*a4a40437SCalvin Johnson 		PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
66*a4a40437SCalvin Johnson 
67*a4a40437SCalvin Johnson 	for (i = 0; i < len32; i++, offset += 4, src += 4) {
68*a4a40437SCalvin Johnson 		val = *(u32 *)src;
69*a4a40437SCalvin Johnson 		writel(cpu_to_be32(val), pe[id].mem_access_wdata);
70*a4a40437SCalvin Johnson 		writel(addr + offset, pe[id].mem_access_addr);
71*a4a40437SCalvin Johnson 	}
72*a4a40437SCalvin Johnson 
73*a4a40437SCalvin Johnson 	len = (len & 0x3);
74*a4a40437SCalvin Johnson 	if (len) {
75*a4a40437SCalvin Johnson 		val = 0;
76*a4a40437SCalvin Johnson 
77*a4a40437SCalvin Johnson 		addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
78*a4a40437SCalvin Johnson 			PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
79*a4a40437SCalvin Johnson 
80*a4a40437SCalvin Johnson 		for (i = 0; i < len; i++, src++)
81*a4a40437SCalvin Johnson 			val |= (*(u8 *)src) << (8 * i);
82*a4a40437SCalvin Johnson 
83*a4a40437SCalvin Johnson 		writel(cpu_to_be32(val), pe[id].mem_access_wdata);
84*a4a40437SCalvin Johnson 		writel(addr, pe[id].mem_access_addr);
85*a4a40437SCalvin Johnson 	}
86*a4a40437SCalvin Johnson }
87*a4a40437SCalvin Johnson 
88*a4a40437SCalvin Johnson /*
89*a4a40437SCalvin Johnson  * Writes a buffer to PE internal data memory (DMEM) from the host
90*a4a40437SCalvin Johnson  * through indirect access registers.
91*a4a40437SCalvin Johnson  * @param[in] id	PE identification (CLASS0_ID, ..., TMU0_ID,
92*a4a40437SCalvin Johnson  *			..., UTIL_ID)
93*a4a40437SCalvin Johnson  * @param[in] dst	DMEM destination address (must be 32bit
94*a4a40437SCalvin Johnson  *			aligned)
95*a4a40437SCalvin Johnson  * @param[in] src	Buffer source address
96*a4a40437SCalvin Johnson  * @param[in] len	Number of bytes to copy
97*a4a40437SCalvin Johnson  */
98*a4a40437SCalvin Johnson static void pe_dmem_memcpy_to32(int id, u32 dst, const void *src,
99*a4a40437SCalvin Johnson 				unsigned int len)
100*a4a40437SCalvin Johnson {
101*a4a40437SCalvin Johnson 	pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst | PE_MEM_ACCESS_DMEM,
102*a4a40437SCalvin Johnson 			   src, len);
103*a4a40437SCalvin Johnson }
104*a4a40437SCalvin Johnson 
105*a4a40437SCalvin Johnson /*
106*a4a40437SCalvin Johnson  * Writes a buffer to PE internal program memory (PMEM) from the host
107*a4a40437SCalvin Johnson  * through indirect access registers.
108*a4a40437SCalvin Johnson  * @param[in] id	PE identification (CLASS0_ID, ..., TMU0_ID,
109*a4a40437SCalvin Johnson  *			..., TMU3_ID)
110*a4a40437SCalvin Johnson  * @param[in] dst	PMEM destination address (must be 32bit
111*a4a40437SCalvin Johnson  *			aligned)
112*a4a40437SCalvin Johnson  * @param[in] src	Buffer source address
113*a4a40437SCalvin Johnson  * @param[in] len	Number of bytes to copy
114*a4a40437SCalvin Johnson  */
115*a4a40437SCalvin Johnson static void pe_pmem_memcpy_to32(int id, u32 dst, const void *src,
116*a4a40437SCalvin Johnson 				unsigned int len)
117*a4a40437SCalvin Johnson {
118*a4a40437SCalvin Johnson 	pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
119*a4a40437SCalvin Johnson 				- 1)) | PE_MEM_ACCESS_IMEM, src, len);
120*a4a40437SCalvin Johnson }
121*a4a40437SCalvin Johnson 
122*a4a40437SCalvin Johnson /*
123*a4a40437SCalvin Johnson  * Reads PE internal program memory (IMEM) from the host
124*a4a40437SCalvin Johnson  * through indirect access registers.
125*a4a40437SCalvin Johnson  * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
126*a4a40437SCalvin Johnson  *				..., TMU3_ID)
127*a4a40437SCalvin Johnson  * @param[in] addr		PMEM read address (must be aligned on size)
128*a4a40437SCalvin Johnson  * @param[in] size		Number of bytes to read (maximum 4, must not
129*a4a40437SCalvin Johnson  *				cross 32bit boundaries)
130*a4a40437SCalvin Johnson  * @return			the data read (in PE endianness, i.e BE).
131*a4a40437SCalvin Johnson  */
132*a4a40437SCalvin Johnson u32 pe_pmem_read(int id, u32 addr, u8 size)
133*a4a40437SCalvin Johnson {
134*a4a40437SCalvin Johnson 	u32 offset = addr & 0x3;
135*a4a40437SCalvin Johnson 	u32 mask = 0xffffffff >> ((4 - size) << 3);
136*a4a40437SCalvin Johnson 	u32 val;
137*a4a40437SCalvin Johnson 
138*a4a40437SCalvin Johnson 	addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
139*a4a40437SCalvin Johnson 		| PE_MEM_ACCESS_READ | PE_MEM_ACCESS_IMEM |
140*a4a40437SCalvin Johnson 		PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
141*a4a40437SCalvin Johnson 
142*a4a40437SCalvin Johnson 	writel(addr, pe[id].mem_access_addr);
143*a4a40437SCalvin Johnson 	val = be32_to_cpu(readl(pe[id].mem_access_rdata));
144*a4a40437SCalvin Johnson 
145*a4a40437SCalvin Johnson 	return (val >> (offset << 3)) & mask;
146*a4a40437SCalvin Johnson }
147*a4a40437SCalvin Johnson 
148*a4a40437SCalvin Johnson /*
149*a4a40437SCalvin Johnson  * Writes PE internal data memory (DMEM) from the host
150*a4a40437SCalvin Johnson  * through indirect access registers.
151*a4a40437SCalvin Johnson  * @param[in] id	PE identification (CLASS0_ID, ..., TMU0_ID,
152*a4a40437SCalvin Johnson  *			..., UTIL_ID)
153*a4a40437SCalvin Johnson  * @param[in] val	Value to write (in PE endianness, i.e BE)
154*a4a40437SCalvin Johnson  * @param[in] addr	DMEM write address (must be aligned on size)
155*a4a40437SCalvin Johnson  * @param[in] size	Number of bytes to write (maximum 4, must not
156*a4a40437SCalvin Johnson  *			cross 32bit boundaries)
157*a4a40437SCalvin Johnson  */
158*a4a40437SCalvin Johnson void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
159*a4a40437SCalvin Johnson {
160*a4a40437SCalvin Johnson 	u32 offset = addr & 0x3;
161*a4a40437SCalvin Johnson 
162*a4a40437SCalvin Johnson 	addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
163*a4a40437SCalvin Johnson 		PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
164*a4a40437SCalvin Johnson 
165*a4a40437SCalvin Johnson 	/* Indirect access interface is byte swapping data being written */
166*a4a40437SCalvin Johnson 	writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
167*a4a40437SCalvin Johnson 	writel(addr, pe[id].mem_access_addr);
168*a4a40437SCalvin Johnson }
169*a4a40437SCalvin Johnson 
170*a4a40437SCalvin Johnson /*
171*a4a40437SCalvin Johnson  * Reads PE internal data memory (DMEM) from the host
172*a4a40437SCalvin Johnson  * through indirect access registers.
173*a4a40437SCalvin Johnson  * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
174*a4a40437SCalvin Johnson  *				..., UTIL_ID)
175*a4a40437SCalvin Johnson  * @param[in] addr		DMEM read address (must be aligned on size)
176*a4a40437SCalvin Johnson  * @param[in] size		Number of bytes to read (maximum 4, must not
177*a4a40437SCalvin Johnson  *				cross 32bit boundaries)
178*a4a40437SCalvin Johnson  * @return			the data read (in PE endianness, i.e BE).
179*a4a40437SCalvin Johnson  */
180*a4a40437SCalvin Johnson u32 pe_dmem_read(int id, u32 addr, u8 size)
181*a4a40437SCalvin Johnson {
182*a4a40437SCalvin Johnson 	u32 offset = addr & 0x3;
183*a4a40437SCalvin Johnson 	u32 mask = 0xffffffff >> ((4 - size) << 3);
184*a4a40437SCalvin Johnson 	u32 val;
185*a4a40437SCalvin Johnson 
186*a4a40437SCalvin Johnson 	addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_READ |
187*a4a40437SCalvin Johnson 		PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
188*a4a40437SCalvin Johnson 
189*a4a40437SCalvin Johnson 	writel(addr, pe[id].mem_access_addr);
190*a4a40437SCalvin Johnson 
191*a4a40437SCalvin Johnson 	/* Indirect access interface is byte swapping data being read */
192*a4a40437SCalvin Johnson 	val = be32_to_cpu(readl(pe[id].mem_access_rdata));
193*a4a40437SCalvin Johnson 
194*a4a40437SCalvin Johnson 	return (val >> (offset << 3)) & mask;
195*a4a40437SCalvin Johnson }
196*a4a40437SCalvin Johnson 
197*a4a40437SCalvin Johnson /*
198*a4a40437SCalvin Johnson  * This function is used to write to CLASS internal bus peripherals (ccu,
199*a4a40437SCalvin Johnson  * pe-lem) from the host
200*a4a40437SCalvin Johnson  * through indirect access registers.
201*a4a40437SCalvin Johnson  * @param[in]	val	value to write
202*a4a40437SCalvin Johnson  * @param[in]	addr	Address to write to (must be aligned on size)
203*a4a40437SCalvin Johnson  * @param[in]	size	Number of bytes to write (1, 2 or 4)
204*a4a40437SCalvin Johnson  *
205*a4a40437SCalvin Johnson  */
206*a4a40437SCalvin Johnson static void class_bus_write(u32 val, u32 addr, u8 size)
207*a4a40437SCalvin Johnson {
208*a4a40437SCalvin Johnson 	u32 offset = addr & 0x3;
209*a4a40437SCalvin Johnson 
210*a4a40437SCalvin Johnson 	writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
211*a4a40437SCalvin Johnson 
212*a4a40437SCalvin Johnson 	addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
213*a4a40437SCalvin Johnson 		(size << 24);
214*a4a40437SCalvin Johnson 
215*a4a40437SCalvin Johnson 	writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
216*a4a40437SCalvin Johnson 	writel(addr, CLASS_BUS_ACCESS_ADDR);
217*a4a40437SCalvin Johnson }
218*a4a40437SCalvin Johnson 
219*a4a40437SCalvin Johnson /*
220*a4a40437SCalvin Johnson  * Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
221*a4a40437SCalvin Johnson  * through indirect access registers.
222*a4a40437SCalvin Johnson  * @param[in] addr	Address to read from (must be aligned on size)
223*a4a40437SCalvin Johnson  * @param[in] size	Number of bytes to read (1, 2 or 4)
224*a4a40437SCalvin Johnson  * @return		the read data
225*a4a40437SCalvin Johnson  */
226*a4a40437SCalvin Johnson static u32 class_bus_read(u32 addr, u8 size)
227*a4a40437SCalvin Johnson {
228*a4a40437SCalvin Johnson 	u32 offset = addr & 0x3;
229*a4a40437SCalvin Johnson 	u32 mask = 0xffffffff >> ((4 - size) << 3);
230*a4a40437SCalvin Johnson 	u32 val;
231*a4a40437SCalvin Johnson 
232*a4a40437SCalvin Johnson 	writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
233*a4a40437SCalvin Johnson 
234*a4a40437SCalvin Johnson 	addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
235*a4a40437SCalvin Johnson 
236*a4a40437SCalvin Johnson 	writel(addr, CLASS_BUS_ACCESS_ADDR);
237*a4a40437SCalvin Johnson 	val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
238*a4a40437SCalvin Johnson 
239*a4a40437SCalvin Johnson 	return (val >> (offset << 3)) & mask;
240*a4a40437SCalvin Johnson }
241*a4a40437SCalvin Johnson 
242*a4a40437SCalvin Johnson /*
243*a4a40437SCalvin Johnson  * Writes data to the cluster memory (PE_LMEM)
244*a4a40437SCalvin Johnson  * @param[in] dst	PE LMEM destination address (must be 32bit aligned)
245*a4a40437SCalvin Johnson  * @param[in] src	Buffer source address
246*a4a40437SCalvin Johnson  * @param[in] len	Number of bytes to copy
247*a4a40437SCalvin Johnson  */
248*a4a40437SCalvin Johnson static void class_pe_lmem_memcpy_to32(u32 dst, const void *src,
249*a4a40437SCalvin Johnson 				      unsigned int len)
250*a4a40437SCalvin Johnson {
251*a4a40437SCalvin Johnson 	u32 len32 = len >> 2;
252*a4a40437SCalvin Johnson 	int i;
253*a4a40437SCalvin Johnson 
254*a4a40437SCalvin Johnson 	for (i = 0; i < len32; i++, src += 4, dst += 4)
255*a4a40437SCalvin Johnson 		class_bus_write(*(u32 *)src, dst, 4);
256*a4a40437SCalvin Johnson 
257*a4a40437SCalvin Johnson 	if (len & 0x2) {
258*a4a40437SCalvin Johnson 		class_bus_write(*(u16 *)src, dst, 2);
259*a4a40437SCalvin Johnson 		src += 2;
260*a4a40437SCalvin Johnson 		dst += 2;
261*a4a40437SCalvin Johnson 	}
262*a4a40437SCalvin Johnson 
263*a4a40437SCalvin Johnson 	if (len & 0x1) {
264*a4a40437SCalvin Johnson 		class_bus_write(*(u8 *)src, dst, 1);
265*a4a40437SCalvin Johnson 		src++;
266*a4a40437SCalvin Johnson 		dst++;
267*a4a40437SCalvin Johnson 	}
268*a4a40437SCalvin Johnson }
269*a4a40437SCalvin Johnson 
270*a4a40437SCalvin Johnson /*
271*a4a40437SCalvin Johnson  * Writes value to the cluster memory (PE_LMEM)
272*a4a40437SCalvin Johnson  * @param[in] dst	PE LMEM destination address (must be 32bit aligned)
273*a4a40437SCalvin Johnson  * @param[in] val	Value to write
274*a4a40437SCalvin Johnson  * @param[in] len	Number of bytes to write
275*a4a40437SCalvin Johnson  */
276*a4a40437SCalvin Johnson static void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
277*a4a40437SCalvin Johnson {
278*a4a40437SCalvin Johnson 	u32 len32 = len >> 2;
279*a4a40437SCalvin Johnson 	int i;
280*a4a40437SCalvin Johnson 
281*a4a40437SCalvin Johnson 	val = val | (val << 8) | (val << 16) | (val << 24);
282*a4a40437SCalvin Johnson 
283*a4a40437SCalvin Johnson 	for (i = 0; i < len32; i++, dst += 4)
284*a4a40437SCalvin Johnson 		class_bus_write(val, dst, 4);
285*a4a40437SCalvin Johnson 
286*a4a40437SCalvin Johnson 	if (len & 0x2) {
287*a4a40437SCalvin Johnson 		class_bus_write(val, dst, 2);
288*a4a40437SCalvin Johnson 		dst += 2;
289*a4a40437SCalvin Johnson 	}
290*a4a40437SCalvin Johnson 
291*a4a40437SCalvin Johnson 	if (len & 0x1) {
292*a4a40437SCalvin Johnson 		class_bus_write(val, dst, 1);
293*a4a40437SCalvin Johnson 		dst++;
294*a4a40437SCalvin Johnson 	}
295*a4a40437SCalvin Johnson }
296*a4a40437SCalvin Johnson 
297*a4a40437SCalvin Johnson /*
298*a4a40437SCalvin Johnson  * Reads data from the cluster memory (PE_LMEM)
299*a4a40437SCalvin Johnson  * @param[out] dst	pointer to the source buffer data are copied to
300*a4a40437SCalvin Johnson  * @param[in] len	length in bytes of the amount of data to read
301*a4a40437SCalvin Johnson  *			from cluster memory
302*a4a40437SCalvin Johnson  * @param[in] offset	offset in bytes in the cluster memory where data are
303*a4a40437SCalvin Johnson  *			read from
304*a4a40437SCalvin Johnson  */
305*a4a40437SCalvin Johnson void pe_lmem_read(u32 *dst, u32 len, u32 offset)
306*a4a40437SCalvin Johnson {
307*a4a40437SCalvin Johnson 	u32 len32 = len >> 2;
308*a4a40437SCalvin Johnson 	int i = 0;
309*a4a40437SCalvin Johnson 
310*a4a40437SCalvin Johnson 	for (i = 0; i < len32; dst++, i++, offset += 4)
311*a4a40437SCalvin Johnson 		*dst = class_bus_read(PE_LMEM_BASE_ADDR + offset, 4);
312*a4a40437SCalvin Johnson 
313*a4a40437SCalvin Johnson 	if (len & 0x03)
314*a4a40437SCalvin Johnson 		*dst = class_bus_read(PE_LMEM_BASE_ADDR + offset, (len & 0x03));
315*a4a40437SCalvin Johnson }
316*a4a40437SCalvin Johnson 
317*a4a40437SCalvin Johnson /*
318*a4a40437SCalvin Johnson  * Writes data to the cluster memory (PE_LMEM)
319*a4a40437SCalvin Johnson  * @param[in] src	pointer to the source buffer data are copied from
320*a4a40437SCalvin Johnson  * @param[in] len	length in bytes of the amount of data to write to the
321*a4a40437SCalvin Johnson  *				cluster memory
322*a4a40437SCalvin Johnson  * @param[in] offset	offset in bytes in the cluster memory where data are
323*a4a40437SCalvin Johnson  *				written to
324*a4a40437SCalvin Johnson  */
325*a4a40437SCalvin Johnson void pe_lmem_write(u32 *src, u32 len, u32 offset)
326*a4a40437SCalvin Johnson {
327*a4a40437SCalvin Johnson 	u32 len32 = len >> 2;
328*a4a40437SCalvin Johnson 	int i = 0;
329*a4a40437SCalvin Johnson 
330*a4a40437SCalvin Johnson 	for (i = 0; i < len32; src++, i++, offset += 4)
331*a4a40437SCalvin Johnson 		class_bus_write(*src, PE_LMEM_BASE_ADDR + offset, 4);
332*a4a40437SCalvin Johnson 
333*a4a40437SCalvin Johnson 	if (len & 0x03)
334*a4a40437SCalvin Johnson 		class_bus_write(*src, PE_LMEM_BASE_ADDR + offset, (len &
335*a4a40437SCalvin Johnson 					0x03));
336*a4a40437SCalvin Johnson }
337*a4a40437SCalvin Johnson 
338*a4a40437SCalvin Johnson /*
339*a4a40437SCalvin Johnson  * Loads an elf section into pmem
340*a4a40437SCalvin Johnson  * Code needs to be at least 16bit aligned and only PROGBITS sections are
341*a4a40437SCalvin Johnson  * supported
342*a4a40437SCalvin Johnson  *
343*a4a40437SCalvin Johnson  * @param[in] id	PE identification (CLASS0_ID, ..., TMU0_ID, ...,
344*a4a40437SCalvin Johnson  *					TMU3_ID)
345*a4a40437SCalvin Johnson  * @param[in] data	pointer to the elf firmware
346*a4a40437SCalvin Johnson  * @param[in] shdr	pointer to the elf section header
347*a4a40437SCalvin Johnson  */
348*a4a40437SCalvin Johnson static int pe_load_pmem_section(int id, const void *data, Elf32_Shdr *shdr)
349*a4a40437SCalvin Johnson {
350*a4a40437SCalvin Johnson 	u32 offset = be32_to_cpu(shdr->sh_offset);
351*a4a40437SCalvin Johnson 	u32 addr = be32_to_cpu(shdr->sh_addr);
352*a4a40437SCalvin Johnson 	u32 size = be32_to_cpu(shdr->sh_size);
353*a4a40437SCalvin Johnson 	u32 type = be32_to_cpu(shdr->sh_type);
354*a4a40437SCalvin Johnson 
355*a4a40437SCalvin Johnson 	if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
356*a4a40437SCalvin Johnson 		printf(
357*a4a40437SCalvin Johnson 			"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
358*a4a40437SCalvin Johnson 			__func__, addr, (unsigned long)data + offset);
359*a4a40437SCalvin Johnson 
360*a4a40437SCalvin Johnson 		return -1;
361*a4a40437SCalvin Johnson 	}
362*a4a40437SCalvin Johnson 
363*a4a40437SCalvin Johnson 	if (addr & 0x1) {
364*a4a40437SCalvin Johnson 		printf("%s: load address(%x) is not 16bit aligned\n",
365*a4a40437SCalvin Johnson 		       __func__, addr);
366*a4a40437SCalvin Johnson 		return -1;
367*a4a40437SCalvin Johnson 	}
368*a4a40437SCalvin Johnson 
369*a4a40437SCalvin Johnson 	if (size & 0x1) {
370*a4a40437SCalvin Johnson 		printf("%s: load size(%x) is not 16bit aligned\n", __func__,
371*a4a40437SCalvin Johnson 		       size);
372*a4a40437SCalvin Johnson 		return -1;
373*a4a40437SCalvin Johnson 	}
374*a4a40437SCalvin Johnson 
375*a4a40437SCalvin Johnson 		debug("pmem pe%d @%x len %d\n", id, addr, size);
376*a4a40437SCalvin Johnson 	switch (type) {
377*a4a40437SCalvin Johnson 	case SHT_PROGBITS:
378*a4a40437SCalvin Johnson 		pe_pmem_memcpy_to32(id, addr, data + offset, size);
379*a4a40437SCalvin Johnson 		break;
380*a4a40437SCalvin Johnson 
381*a4a40437SCalvin Johnson 	default:
382*a4a40437SCalvin Johnson 		printf("%s: unsupported section type(%x)\n", __func__, type);
383*a4a40437SCalvin Johnson 		return -1;
384*a4a40437SCalvin Johnson 	}
385*a4a40437SCalvin Johnson 
386*a4a40437SCalvin Johnson 	return 0;
387*a4a40437SCalvin Johnson }
388*a4a40437SCalvin Johnson 
389*a4a40437SCalvin Johnson /*
390*a4a40437SCalvin Johnson  * Loads an elf section into dmem
391*a4a40437SCalvin Johnson  * Data needs to be at least 32bit aligned, NOBITS sections are correctly
392*a4a40437SCalvin Johnson  * initialized to 0
393*a4a40437SCalvin Johnson  *
394*a4a40437SCalvin Johnson  * @param[in] id	PE identification (CLASS0_ID, ..., TMU0_ID,
395*a4a40437SCalvin Johnson  *			..., UTIL_ID)
396*a4a40437SCalvin Johnson  * @param[in] data	pointer to the elf firmware
397*a4a40437SCalvin Johnson  * @param[in] shdr	pointer to the elf section header
398*a4a40437SCalvin Johnson  */
399*a4a40437SCalvin Johnson static int pe_load_dmem_section(int id, const void *data, Elf32_Shdr *shdr)
400*a4a40437SCalvin Johnson {
401*a4a40437SCalvin Johnson 	u32 offset = be32_to_cpu(shdr->sh_offset);
402*a4a40437SCalvin Johnson 	u32 addr = be32_to_cpu(shdr->sh_addr);
403*a4a40437SCalvin Johnson 	u32 size = be32_to_cpu(shdr->sh_size);
404*a4a40437SCalvin Johnson 	u32 type = be32_to_cpu(shdr->sh_type);
405*a4a40437SCalvin Johnson 	u32 size32 = size >> 2;
406*a4a40437SCalvin Johnson 	int i;
407*a4a40437SCalvin Johnson 
408*a4a40437SCalvin Johnson 	if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
409*a4a40437SCalvin Johnson 		printf(
410*a4a40437SCalvin Johnson 			"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
411*a4a40437SCalvin Johnson 			__func__, addr, (unsigned long)data + offset);
412*a4a40437SCalvin Johnson 
413*a4a40437SCalvin Johnson 		return -1;
414*a4a40437SCalvin Johnson 	}
415*a4a40437SCalvin Johnson 
416*a4a40437SCalvin Johnson 	if (addr & 0x3) {
417*a4a40437SCalvin Johnson 		printf("%s: load address(%x) is not 32bit aligned\n",
418*a4a40437SCalvin Johnson 		       __func__, addr);
419*a4a40437SCalvin Johnson 		return -1;
420*a4a40437SCalvin Johnson 	}
421*a4a40437SCalvin Johnson 
422*a4a40437SCalvin Johnson 	switch (type) {
423*a4a40437SCalvin Johnson 	case SHT_PROGBITS:
424*a4a40437SCalvin Johnson 		debug("dmem pe%d @%x len %d\n", id, addr, size);
425*a4a40437SCalvin Johnson 		pe_dmem_memcpy_to32(id, addr, data + offset, size);
426*a4a40437SCalvin Johnson 		break;
427*a4a40437SCalvin Johnson 
428*a4a40437SCalvin Johnson 	case SHT_NOBITS:
429*a4a40437SCalvin Johnson 		debug("dmem zero pe%d @%x len %d\n", id, addr, size);
430*a4a40437SCalvin Johnson 		for (i = 0; i < size32; i++, addr += 4)
431*a4a40437SCalvin Johnson 			pe_dmem_write(id, 0, addr, 4);
432*a4a40437SCalvin Johnson 
433*a4a40437SCalvin Johnson 		if (size & 0x3)
434*a4a40437SCalvin Johnson 			pe_dmem_write(id, 0, addr, size & 0x3);
435*a4a40437SCalvin Johnson 
436*a4a40437SCalvin Johnson 		break;
437*a4a40437SCalvin Johnson 
438*a4a40437SCalvin Johnson 	default:
439*a4a40437SCalvin Johnson 		printf("%s: unsupported section type(%x)\n", __func__, type);
440*a4a40437SCalvin Johnson 		return -1;
441*a4a40437SCalvin Johnson 	}
442*a4a40437SCalvin Johnson 
443*a4a40437SCalvin Johnson 	return 0;
444*a4a40437SCalvin Johnson }
445*a4a40437SCalvin Johnson 
446*a4a40437SCalvin Johnson /*
447*a4a40437SCalvin Johnson  * Loads an elf section into DDR
448*a4a40437SCalvin Johnson  * Data needs to be at least 32bit aligned, NOBITS sections are correctly
449*a4a40437SCalvin Johnson  *		initialized to 0
450*a4a40437SCalvin Johnson  *
451*a4a40437SCalvin Johnson  * @param[in] id	PE identification (CLASS0_ID, ..., TMU0_ID,
452*a4a40437SCalvin Johnson  *			..., UTIL_ID)
453*a4a40437SCalvin Johnson  * @param[in] data	pointer to the elf firmware
454*a4a40437SCalvin Johnson  * @param[in] shdr	pointer to the elf section header
455*a4a40437SCalvin Johnson  */
456*a4a40437SCalvin Johnson static int pe_load_ddr_section(int id, const void *data, Elf32_Shdr *shdr)
457*a4a40437SCalvin Johnson {
458*a4a40437SCalvin Johnson 	u32 offset = be32_to_cpu(shdr->sh_offset);
459*a4a40437SCalvin Johnson 	u32 addr = be32_to_cpu(shdr->sh_addr);
460*a4a40437SCalvin Johnson 	u32 size = be32_to_cpu(shdr->sh_size);
461*a4a40437SCalvin Johnson 	u32 type = be32_to_cpu(shdr->sh_type);
462*a4a40437SCalvin Johnson 	u32 flags = be32_to_cpu(shdr->sh_flags);
463*a4a40437SCalvin Johnson 
464*a4a40437SCalvin Johnson 	switch (type) {
465*a4a40437SCalvin Johnson 	case SHT_PROGBITS:
466*a4a40437SCalvin Johnson 		debug("ddr  pe%d @%x len %d\n", id, addr, size);
467*a4a40437SCalvin Johnson 		if (flags & SHF_EXECINSTR) {
468*a4a40437SCalvin Johnson 			if (id <= CLASS_MAX_ID) {
469*a4a40437SCalvin Johnson 				/* DO the loading only once in DDR */
470*a4a40437SCalvin Johnson 				if (id == CLASS0_ID) {
471*a4a40437SCalvin Johnson 					debug(
472*a4a40437SCalvin Johnson 						"%s: load address(%x) and elf file address(%lx) rcvd\n"
473*a4a40437SCalvin Johnson 						, __func__, addr,
474*a4a40437SCalvin Johnson 						(unsigned long)data + offset);
475*a4a40437SCalvin Johnson 					if (((unsigned long)(data + offset)
476*a4a40437SCalvin Johnson 						& 0x3) != (addr & 0x3)) {
477*a4a40437SCalvin Johnson 						printf(
478*a4a40437SCalvin Johnson 							"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
479*a4a40437SCalvin Johnson 							__func__, addr,
480*a4a40437SCalvin Johnson 							(unsigned long)data +
481*a4a40437SCalvin Johnson 							offset);
482*a4a40437SCalvin Johnson 
483*a4a40437SCalvin Johnson 						return -1;
484*a4a40437SCalvin Johnson 					}
485*a4a40437SCalvin Johnson 
486*a4a40437SCalvin Johnson 					if (addr & 0x1) {
487*a4a40437SCalvin Johnson 						printf(
488*a4a40437SCalvin Johnson 							"%s: load address(%x) is not 16bit aligned\n"
489*a4a40437SCalvin Johnson 							, __func__, addr);
490*a4a40437SCalvin Johnson 						return -1;
491*a4a40437SCalvin Johnson 					}
492*a4a40437SCalvin Johnson 
493*a4a40437SCalvin Johnson 					if (size & 0x1) {
494*a4a40437SCalvin Johnson 						printf(
495*a4a40437SCalvin Johnson 							"%s: load length(%x) is not 16bit aligned\n"
496*a4a40437SCalvin Johnson 							, __func__, size);
497*a4a40437SCalvin Johnson 						return -1;
498*a4a40437SCalvin Johnson 					}
499*a4a40437SCalvin Johnson 
500*a4a40437SCalvin Johnson 					memcpy((void *)DDR_PFE_TO_VIRT(addr),
501*a4a40437SCalvin Johnson 					       data + offset, size);
502*a4a40437SCalvin Johnson 				}
503*a4a40437SCalvin Johnson 			} else {
504*a4a40437SCalvin Johnson 				printf(
505*a4a40437SCalvin Johnson 					"%s: unsupported ddr section type(%x) for PE(%d)\n"
506*a4a40437SCalvin Johnson 					, __func__, type, id);
507*a4a40437SCalvin Johnson 				return -1;
508*a4a40437SCalvin Johnson 			}
509*a4a40437SCalvin Johnson 
510*a4a40437SCalvin Johnson 		} else {
511*a4a40437SCalvin Johnson 			memcpy((void *)DDR_PFE_TO_VIRT(addr), data + offset,
512*a4a40437SCalvin Johnson 			       size);
513*a4a40437SCalvin Johnson 		}
514*a4a40437SCalvin Johnson 
515*a4a40437SCalvin Johnson 		break;
516*a4a40437SCalvin Johnson 
517*a4a40437SCalvin Johnson 	case SHT_NOBITS:
518*a4a40437SCalvin Johnson 		debug("ddr zero pe%d @%x len %d\n", id, addr, size);
519*a4a40437SCalvin Johnson 		memset((void *)DDR_PFE_TO_VIRT(addr), 0, size);
520*a4a40437SCalvin Johnson 
521*a4a40437SCalvin Johnson 		break;
522*a4a40437SCalvin Johnson 
523*a4a40437SCalvin Johnson 	default:
524*a4a40437SCalvin Johnson 		printf("%s: unsupported section type(%x)\n", __func__, type);
525*a4a40437SCalvin Johnson 		return -1;
526*a4a40437SCalvin Johnson 	}
527*a4a40437SCalvin Johnson 
528*a4a40437SCalvin Johnson 	return 0;
529*a4a40437SCalvin Johnson }
530*a4a40437SCalvin Johnson 
531*a4a40437SCalvin Johnson /*
532*a4a40437SCalvin Johnson  * Loads an elf section into pe lmem
533*a4a40437SCalvin Johnson  * Data needs to be at least 32bit aligned, NOBITS sections are correctly
534*a4a40437SCalvin Johnson  * initialized to 0
535*a4a40437SCalvin Johnson  *
536*a4a40437SCalvin Johnson  * @param[in] id	PE identification (CLASS0_ID,..., CLASS5_ID)
537*a4a40437SCalvin Johnson  * @param[in] data	pointer to the elf firmware
538*a4a40437SCalvin Johnson  * @param[in] shdr	pointer to the elf section header
539*a4a40437SCalvin Johnson  */
540*a4a40437SCalvin Johnson static int pe_load_pe_lmem_section(int id, const void *data, Elf32_Shdr *shdr)
541*a4a40437SCalvin Johnson {
542*a4a40437SCalvin Johnson 	u32 offset = be32_to_cpu(shdr->sh_offset);
543*a4a40437SCalvin Johnson 	u32 addr = be32_to_cpu(shdr->sh_addr);
544*a4a40437SCalvin Johnson 	u32 size = be32_to_cpu(shdr->sh_size);
545*a4a40437SCalvin Johnson 	u32 type = be32_to_cpu(shdr->sh_type);
546*a4a40437SCalvin Johnson 
547*a4a40437SCalvin Johnson 	if (id > CLASS_MAX_ID) {
548*a4a40437SCalvin Johnson 		printf("%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
549*a4a40437SCalvin Johnson 		       __func__, type, id);
550*a4a40437SCalvin Johnson 		return -1;
551*a4a40437SCalvin Johnson 	}
552*a4a40437SCalvin Johnson 
553*a4a40437SCalvin Johnson 	if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
554*a4a40437SCalvin Johnson 		printf(
555*a4a40437SCalvin Johnson 			"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
556*a4a40437SCalvin Johnson 			__func__, addr, (unsigned long)data + offset);
557*a4a40437SCalvin Johnson 
558*a4a40437SCalvin Johnson 		return -1;
559*a4a40437SCalvin Johnson 	}
560*a4a40437SCalvin Johnson 
561*a4a40437SCalvin Johnson 	if (addr & 0x3) {
562*a4a40437SCalvin Johnson 		printf("%s: load address(%x) is not 32bit aligned\n",
563*a4a40437SCalvin Johnson 		       __func__, addr);
564*a4a40437SCalvin Johnson 		return -1;
565*a4a40437SCalvin Johnson 	}
566*a4a40437SCalvin Johnson 
567*a4a40437SCalvin Johnson 	debug("lmem  pe%d @%x len %d\n", id, addr, size);
568*a4a40437SCalvin Johnson 
569*a4a40437SCalvin Johnson 	switch (type) {
570*a4a40437SCalvin Johnson 	case SHT_PROGBITS:
571*a4a40437SCalvin Johnson 		class_pe_lmem_memcpy_to32(addr, data + offset, size);
572*a4a40437SCalvin Johnson 		break;
573*a4a40437SCalvin Johnson 
574*a4a40437SCalvin Johnson 	case SHT_NOBITS:
575*a4a40437SCalvin Johnson 		class_pe_lmem_memset(addr, 0, size);
576*a4a40437SCalvin Johnson 		break;
577*a4a40437SCalvin Johnson 
578*a4a40437SCalvin Johnson 	default:
579*a4a40437SCalvin Johnson 		printf("%s: unsupported section type(%x)\n", __func__, type);
580*a4a40437SCalvin Johnson 		return -1;
581*a4a40437SCalvin Johnson 	}
582*a4a40437SCalvin Johnson 
583*a4a40437SCalvin Johnson 	return 0;
584*a4a40437SCalvin Johnson }
585*a4a40437SCalvin Johnson 
586*a4a40437SCalvin Johnson /*
587*a4a40437SCalvin Johnson  * Loads an elf section into a PE
588*a4a40437SCalvin Johnson  * For now only supports loading a section to dmem (all PE's), pmem (class and
589*a4a40437SCalvin Johnson  * tmu PE's), DDDR (util PE code)
590*a4a40437SCalvin Johnson  * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
591*a4a40437SCalvin Johnson  * ..., UTIL_ID)
592*a4a40437SCalvin Johnson  * @param[in] data	pointer to the elf firmware
593*a4a40437SCalvin Johnson  * @param[in] shdr	pointer to the elf section header
594*a4a40437SCalvin Johnson  */
595*a4a40437SCalvin Johnson int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr)
596*a4a40437SCalvin Johnson {
597*a4a40437SCalvin Johnson 	u32 addr = be32_to_cpu(shdr->sh_addr);
598*a4a40437SCalvin Johnson 	u32 size = be32_to_cpu(shdr->sh_size);
599*a4a40437SCalvin Johnson 
600*a4a40437SCalvin Johnson 	if (IS_DMEM(addr, size))
601*a4a40437SCalvin Johnson 		return pe_load_dmem_section(id, data, shdr);
602*a4a40437SCalvin Johnson 	else if (IS_PMEM(addr, size))
603*a4a40437SCalvin Johnson 		return pe_load_pmem_section(id, data, shdr);
604*a4a40437SCalvin Johnson 	else if (IS_PFE_LMEM(addr, size))
605*a4a40437SCalvin Johnson 		return 0;
606*a4a40437SCalvin Johnson 	else if (IS_PHYS_DDR(addr, size))
607*a4a40437SCalvin Johnson 		return pe_load_ddr_section(id, data, shdr);
608*a4a40437SCalvin Johnson 	else if (IS_PE_LMEM(addr, size))
609*a4a40437SCalvin Johnson 		return pe_load_pe_lmem_section(id, data, shdr);
610*a4a40437SCalvin Johnson 
611*a4a40437SCalvin Johnson 	printf("%s: unsupported memory range(%x)\n", __func__, addr);
612*a4a40437SCalvin Johnson 
613*a4a40437SCalvin Johnson 	return 0;
614*a4a40437SCalvin Johnson }
615*a4a40437SCalvin Johnson 
616*a4a40437SCalvin Johnson /**************************** BMU ***************************/
617*a4a40437SCalvin Johnson /*
618*a4a40437SCalvin Johnson  * Resets a BMU block.
619*a4a40437SCalvin Johnson  * @param[in] base	BMU block base address
620*a4a40437SCalvin Johnson  */
621*a4a40437SCalvin Johnson static inline void bmu_reset(void *base)
622*a4a40437SCalvin Johnson {
623*a4a40437SCalvin Johnson 	writel(CORE_SW_RESET, base + BMU_CTRL);
624*a4a40437SCalvin Johnson 
625*a4a40437SCalvin Johnson 	/* Wait for self clear */
626*a4a40437SCalvin Johnson 	while (readl(base + BMU_CTRL) & CORE_SW_RESET)
627*a4a40437SCalvin Johnson 		;
628*a4a40437SCalvin Johnson }
629*a4a40437SCalvin Johnson 
630*a4a40437SCalvin Johnson /*
631*a4a40437SCalvin Johnson  * Enabled a BMU block.
632*a4a40437SCalvin Johnson  * @param[in] base	BMU block base address
633*a4a40437SCalvin Johnson  */
634*a4a40437SCalvin Johnson void bmu_enable(void *base)
635*a4a40437SCalvin Johnson {
636*a4a40437SCalvin Johnson 	writel(CORE_ENABLE, base + BMU_CTRL);
637*a4a40437SCalvin Johnson }
638*a4a40437SCalvin Johnson 
639*a4a40437SCalvin Johnson /*
640*a4a40437SCalvin Johnson  * Disables a BMU block.
641*a4a40437SCalvin Johnson  * @param[in] base	BMU block base address
642*a4a40437SCalvin Johnson  */
643*a4a40437SCalvin Johnson static inline void bmu_disable(void *base)
644*a4a40437SCalvin Johnson {
645*a4a40437SCalvin Johnson 	writel(CORE_DISABLE, base + BMU_CTRL);
646*a4a40437SCalvin Johnson }
647*a4a40437SCalvin Johnson 
648*a4a40437SCalvin Johnson /*
649*a4a40437SCalvin Johnson  * Sets the configuration of a BMU block.
650*a4a40437SCalvin Johnson  * @param[in] base	BMU block base address
651*a4a40437SCalvin Johnson  * @param[in] cfg	BMU configuration
652*a4a40437SCalvin Johnson  */
653*a4a40437SCalvin Johnson static inline void bmu_set_config(void *base, struct bmu_cfg *cfg)
654*a4a40437SCalvin Johnson {
655*a4a40437SCalvin Johnson 	writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
656*a4a40437SCalvin Johnson 	writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
657*a4a40437SCalvin Johnson 	writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
658*a4a40437SCalvin Johnson 
659*a4a40437SCalvin Johnson 	/* Interrupts are never used */
660*a4a40437SCalvin Johnson 	writel(0x0, base + BMU_INT_ENABLE);
661*a4a40437SCalvin Johnson }
662*a4a40437SCalvin Johnson 
663*a4a40437SCalvin Johnson /*
664*a4a40437SCalvin Johnson  * Initializes a BMU block.
665*a4a40437SCalvin Johnson  * @param[in] base	BMU block base address
666*a4a40437SCalvin Johnson  * @param[in] cfg	BMU configuration
667*a4a40437SCalvin Johnson  */
668*a4a40437SCalvin Johnson void bmu_init(void *base, struct bmu_cfg *cfg)
669*a4a40437SCalvin Johnson {
670*a4a40437SCalvin Johnson 	bmu_disable(base);
671*a4a40437SCalvin Johnson 
672*a4a40437SCalvin Johnson 	bmu_set_config(base, cfg);
673*a4a40437SCalvin Johnson 
674*a4a40437SCalvin Johnson 	bmu_reset(base);
675*a4a40437SCalvin Johnson }
676*a4a40437SCalvin Johnson 
677*a4a40437SCalvin Johnson /**************************** GPI ***************************/
678*a4a40437SCalvin Johnson /*
679*a4a40437SCalvin Johnson  * Resets a GPI block.
680*a4a40437SCalvin Johnson  * @param[in] base	GPI base address
681*a4a40437SCalvin Johnson  */
682*a4a40437SCalvin Johnson static inline void gpi_reset(void *base)
683*a4a40437SCalvin Johnson {
684*a4a40437SCalvin Johnson 	writel(CORE_SW_RESET, base + GPI_CTRL);
685*a4a40437SCalvin Johnson }
686*a4a40437SCalvin Johnson 
687*a4a40437SCalvin Johnson /*
688*a4a40437SCalvin Johnson  * Enables a GPI block.
689*a4a40437SCalvin Johnson  * @param[in] base	GPI base address
690*a4a40437SCalvin Johnson  */
691*a4a40437SCalvin Johnson void gpi_enable(void *base)
692*a4a40437SCalvin Johnson {
693*a4a40437SCalvin Johnson 	writel(CORE_ENABLE, base + GPI_CTRL);
694*a4a40437SCalvin Johnson }
695*a4a40437SCalvin Johnson 
696*a4a40437SCalvin Johnson /*
697*a4a40437SCalvin Johnson  * Disables a GPI block.
698*a4a40437SCalvin Johnson  * @param[in] base	GPI base address
699*a4a40437SCalvin Johnson  */
700*a4a40437SCalvin Johnson void gpi_disable(void *base)
701*a4a40437SCalvin Johnson {
702*a4a40437SCalvin Johnson 	writel(CORE_DISABLE, base + GPI_CTRL);
703*a4a40437SCalvin Johnson }
704*a4a40437SCalvin Johnson 
705*a4a40437SCalvin Johnson /*
706*a4a40437SCalvin Johnson  * Sets the configuration of a GPI block.
707*a4a40437SCalvin Johnson  * @param[in] base	GPI base address
708*a4a40437SCalvin Johnson  * @param[in] cfg	GPI configuration
709*a4a40437SCalvin Johnson  */
710*a4a40437SCalvin Johnson static inline void gpi_set_config(void *base, struct gpi_cfg *cfg)
711*a4a40437SCalvin Johnson {
712*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
713*a4a40437SCalvin Johnson 	       + GPI_LMEM_ALLOC_ADDR);
714*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
715*a4a40437SCalvin Johnson 	       + GPI_LMEM_FREE_ADDR);
716*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
717*a4a40437SCalvin Johnson 	       + GPI_DDR_ALLOC_ADDR);
718*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
719*a4a40437SCalvin Johnson 	       + GPI_DDR_FREE_ADDR);
720*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
721*a4a40437SCalvin Johnson 	writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
722*a4a40437SCalvin Johnson 	writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
723*a4a40437SCalvin Johnson 	writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
724*a4a40437SCalvin Johnson 	writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
725*a4a40437SCalvin Johnson 	writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
726*a4a40437SCalvin Johnson 	writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
727*a4a40437SCalvin Johnson 
728*a4a40437SCalvin Johnson 	writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
729*a4a40437SCalvin Johnson 		GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
730*a4a40437SCalvin Johnson 	writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
731*a4a40437SCalvin Johnson 	writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
732*a4a40437SCalvin Johnson 
733*a4a40437SCalvin Johnson 	/*Make GPI AXI transactions non-bufferable */
734*a4a40437SCalvin Johnson 	writel(0x1, base + GPI_AXI_CTRL);
735*a4a40437SCalvin Johnson }
736*a4a40437SCalvin Johnson 
737*a4a40437SCalvin Johnson /*
738*a4a40437SCalvin Johnson  * Initializes a GPI block.
739*a4a40437SCalvin Johnson  * @param[in] base	GPI base address
740*a4a40437SCalvin Johnson  * @param[in] cfg	GPI configuration
741*a4a40437SCalvin Johnson  */
742*a4a40437SCalvin Johnson void gpi_init(void *base, struct gpi_cfg *cfg)
743*a4a40437SCalvin Johnson {
744*a4a40437SCalvin Johnson 	gpi_reset(base);
745*a4a40437SCalvin Johnson 
746*a4a40437SCalvin Johnson 	gpi_disable(base);
747*a4a40437SCalvin Johnson 
748*a4a40437SCalvin Johnson 	gpi_set_config(base, cfg);
749*a4a40437SCalvin Johnson }
750*a4a40437SCalvin Johnson 
751*a4a40437SCalvin Johnson /**************************** CLASSIFIER ***************************/
752*a4a40437SCalvin Johnson /*
753*a4a40437SCalvin Johnson  * Resets CLASSIFIER block.
754*a4a40437SCalvin Johnson  */
755*a4a40437SCalvin Johnson static inline void class_reset(void)
756*a4a40437SCalvin Johnson {
757*a4a40437SCalvin Johnson 	writel(CORE_SW_RESET, CLASS_TX_CTRL);
758*a4a40437SCalvin Johnson }
759*a4a40437SCalvin Johnson 
760*a4a40437SCalvin Johnson /*
761*a4a40437SCalvin Johnson  * Enables all CLASS-PE's cores.
762*a4a40437SCalvin Johnson  */
763*a4a40437SCalvin Johnson void class_enable(void)
764*a4a40437SCalvin Johnson {
765*a4a40437SCalvin Johnson 	writel(CORE_ENABLE, CLASS_TX_CTRL);
766*a4a40437SCalvin Johnson }
767*a4a40437SCalvin Johnson 
768*a4a40437SCalvin Johnson /*
769*a4a40437SCalvin Johnson  * Disables all CLASS-PE's cores.
770*a4a40437SCalvin Johnson  */
771*a4a40437SCalvin Johnson void class_disable(void)
772*a4a40437SCalvin Johnson {
773*a4a40437SCalvin Johnson 	writel(CORE_DISABLE, CLASS_TX_CTRL);
774*a4a40437SCalvin Johnson }
775*a4a40437SCalvin Johnson 
776*a4a40437SCalvin Johnson /*
777*a4a40437SCalvin Johnson  * Sets the configuration of the CLASSIFIER block.
778*a4a40437SCalvin Johnson  * @param[in] cfg	CLASSIFIER configuration
779*a4a40437SCalvin Johnson  */
780*a4a40437SCalvin Johnson static inline void class_set_config(struct class_cfg *cfg)
781*a4a40437SCalvin Johnson {
782*a4a40437SCalvin Johnson 	if (PLL_CLK_EN == 0) {
783*a4a40437SCalvin Johnson 		/* Clock ratio: for 1:1 the value is 0 */
784*a4a40437SCalvin Johnson 		writel(0x0, CLASS_PE_SYS_CLK_RATIO);
785*a4a40437SCalvin Johnson 	} else {
786*a4a40437SCalvin Johnson 		/* Clock ratio: for 1:2 the value is 1 */
787*a4a40437SCalvin Johnson 		writel(0x1, CLASS_PE_SYS_CLK_RATIO);
788*a4a40437SCalvin Johnson 	}
789*a4a40437SCalvin Johnson 	writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
790*a4a40437SCalvin Johnson 	writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
791*a4a40437SCalvin Johnson 	writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
792*a4a40437SCalvin Johnson 		CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
793*a4a40437SCalvin Johnson 		CLASS_ROUTE_HASH_ENTRY_SIZE);
794*a4a40437SCalvin Johnson 	writel(HASH_CRC_PORT_IP | QB2BUS_LE, CLASS_ROUTE_MULTI);
795*a4a40437SCalvin Johnson 
796*a4a40437SCalvin Johnson 	writel(cfg->route_table_baseaddr, CLASS_ROUTE_TABLE_BASE);
797*a4a40437SCalvin Johnson 	memset((void *)DDR_PFE_TO_VIRT(cfg->route_table_baseaddr), 0,
798*a4a40437SCalvin Johnson 	       ROUTE_TABLE_SIZE);
799*a4a40437SCalvin Johnson 
800*a4a40437SCalvin Johnson 	writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
801*a4a40437SCalvin Johnson 	writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
802*a4a40437SCalvin Johnson 	writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
803*a4a40437SCalvin Johnson 	writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
804*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
805*a4a40437SCalvin Johnson 
806*a4a40437SCalvin Johnson 	writel(23, CLASS_AFULL_THRES);
807*a4a40437SCalvin Johnson 	writel(23, CLASS_TSQ_FIFO_THRES);
808*a4a40437SCalvin Johnson 
809*a4a40437SCalvin Johnson 	writel(24, CLASS_MAX_BUF_CNT);
810*a4a40437SCalvin Johnson 	writel(24, CLASS_TSQ_MAX_CNT);
811*a4a40437SCalvin Johnson 
812*a4a40437SCalvin Johnson 	/*Make Class AXI transactions non-bufferable */
813*a4a40437SCalvin Johnson 	writel(0x1, CLASS_AXI_CTRL);
814*a4a40437SCalvin Johnson 
815*a4a40437SCalvin Johnson 	/*Make Util AXI transactions non-bufferable */
816*a4a40437SCalvin Johnson 	/*Util is disabled in U-boot, do it from here */
817*a4a40437SCalvin Johnson 	writel(0x1, UTIL_AXI_CTRL);
818*a4a40437SCalvin Johnson }
819*a4a40437SCalvin Johnson 
820*a4a40437SCalvin Johnson /*
821*a4a40437SCalvin Johnson  * Initializes CLASSIFIER block.
822*a4a40437SCalvin Johnson  * @param[in] cfg	CLASSIFIER configuration
823*a4a40437SCalvin Johnson  */
824*a4a40437SCalvin Johnson void class_init(struct class_cfg *cfg)
825*a4a40437SCalvin Johnson {
826*a4a40437SCalvin Johnson 	class_reset();
827*a4a40437SCalvin Johnson 
828*a4a40437SCalvin Johnson 	class_disable();
829*a4a40437SCalvin Johnson 
830*a4a40437SCalvin Johnson 	class_set_config(cfg);
831*a4a40437SCalvin Johnson }
832*a4a40437SCalvin Johnson 
833*a4a40437SCalvin Johnson /**************************** TMU ***************************/
834*a4a40437SCalvin Johnson /*
835*a4a40437SCalvin Johnson  * Enables TMU-PE cores.
836*a4a40437SCalvin Johnson  * @param[in] pe_mask	TMU PE mask
837*a4a40437SCalvin Johnson  */
838*a4a40437SCalvin Johnson void tmu_enable(u32 pe_mask)
839*a4a40437SCalvin Johnson {
840*a4a40437SCalvin Johnson 	writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
841*a4a40437SCalvin Johnson }
842*a4a40437SCalvin Johnson 
843*a4a40437SCalvin Johnson /*
844*a4a40437SCalvin Johnson  * Disables TMU cores.
845*a4a40437SCalvin Johnson  * @param[in] pe_mask	TMU PE mask
846*a4a40437SCalvin Johnson  */
847*a4a40437SCalvin Johnson void tmu_disable(u32 pe_mask)
848*a4a40437SCalvin Johnson {
849*a4a40437SCalvin Johnson 	writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
850*a4a40437SCalvin Johnson }
851*a4a40437SCalvin Johnson 
852*a4a40437SCalvin Johnson /*
853*a4a40437SCalvin Johnson  * Initializes TMU block.
854*a4a40437SCalvin Johnson  * @param[in] cfg	TMU configuration
855*a4a40437SCalvin Johnson  */
856*a4a40437SCalvin Johnson void tmu_init(struct tmu_cfg *cfg)
857*a4a40437SCalvin Johnson {
858*a4a40437SCalvin Johnson 	int q, phyno;
859*a4a40437SCalvin Johnson 
860*a4a40437SCalvin Johnson 	/* keep in soft reset */
861*a4a40437SCalvin Johnson 	writel(SW_RESET, TMU_CTRL);
862*a4a40437SCalvin Johnson 
863*a4a40437SCalvin Johnson 	/*Make Class AXI transactions non-bufferable */
864*a4a40437SCalvin Johnson 	writel(0x1, TMU_AXI_CTRL);
865*a4a40437SCalvin Johnson 
866*a4a40437SCalvin Johnson 	/* enable EMAC PHY ports */
867*a4a40437SCalvin Johnson 	writel(0x3, TMU_SYS_GENERIC_CONTROL);
868*a4a40437SCalvin Johnson 
869*a4a40437SCalvin Johnson 	writel(750, TMU_INQ_WATERMARK);
870*a4a40437SCalvin Johnson 
871*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR + GPI_INQ_PKTPTR),
872*a4a40437SCalvin Johnson 	       TMU_PHY0_INQ_ADDR);
873*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR + GPI_INQ_PKTPTR),
874*a4a40437SCalvin Johnson 	       TMU_PHY1_INQ_ADDR);
875*a4a40437SCalvin Johnson 
876*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR + GPI_INQ_PKTPTR),
877*a4a40437SCalvin Johnson 	       TMU_PHY3_INQ_ADDR);
878*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
879*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
880*a4a40437SCalvin Johnson 	writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
881*a4a40437SCalvin Johnson 	       TMU_BMU_INQ_ADDR);
882*a4a40437SCalvin Johnson 
883*a4a40437SCalvin Johnson 	/* enabling all 10 schedulers [9:0] of each TDQ  */
884*a4a40437SCalvin Johnson 	writel(0x3FF, TMU_TDQ0_SCH_CTRL);
885*a4a40437SCalvin Johnson 	writel(0x3FF, TMU_TDQ1_SCH_CTRL);
886*a4a40437SCalvin Johnson 	writel(0x3FF, TMU_TDQ3_SCH_CTRL);
887*a4a40437SCalvin Johnson 
888*a4a40437SCalvin Johnson 	if (PLL_CLK_EN == 0) {
889*a4a40437SCalvin Johnson 		/* Clock ratio: for 1:1 the value is 0 */
890*a4a40437SCalvin Johnson 		writel(0x0, TMU_PE_SYS_CLK_RATIO);
891*a4a40437SCalvin Johnson 	} else {
892*a4a40437SCalvin Johnson 		/* Clock ratio: for 1:2 the value is 1 */
893*a4a40437SCalvin Johnson 		writel(0x1, TMU_PE_SYS_CLK_RATIO);
894*a4a40437SCalvin Johnson 	}
895*a4a40437SCalvin Johnson 
896*a4a40437SCalvin Johnson 	/* Extra packet pointers will be stored from this address onwards */
897*a4a40437SCalvin Johnson 	debug("TMU_LLM_BASE_ADDR %x\n", cfg->llm_base_addr);
898*a4a40437SCalvin Johnson 	writel(cfg->llm_base_addr, TMU_LLM_BASE_ADDR);
899*a4a40437SCalvin Johnson 
900*a4a40437SCalvin Johnson 	debug("TMU_LLM_QUE_LEN %x\n", cfg->llm_queue_len);
901*a4a40437SCalvin Johnson 	writel(cfg->llm_queue_len,	TMU_LLM_QUE_LEN);
902*a4a40437SCalvin Johnson 
903*a4a40437SCalvin Johnson 	writel(5, TMU_TDQ_IIFG_CFG);
904*a4a40437SCalvin Johnson 	writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
905*a4a40437SCalvin Johnson 
906*a4a40437SCalvin Johnson 	writel(0x0, TMU_CTRL);
907*a4a40437SCalvin Johnson 
908*a4a40437SCalvin Johnson 	/* MEM init */
909*a4a40437SCalvin Johnson 	writel(MEM_INIT, TMU_CTRL);
910*a4a40437SCalvin Johnson 
911*a4a40437SCalvin Johnson 	while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
912*a4a40437SCalvin Johnson 		;
913*a4a40437SCalvin Johnson 
914*a4a40437SCalvin Johnson 	/* LLM init */
915*a4a40437SCalvin Johnson 	writel(LLM_INIT, TMU_CTRL);
916*a4a40437SCalvin Johnson 
917*a4a40437SCalvin Johnson 	while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
918*a4a40437SCalvin Johnson 		;
919*a4a40437SCalvin Johnson 
920*a4a40437SCalvin Johnson 	/* set up each queue for tail drop */
921*a4a40437SCalvin Johnson 	for (phyno = 0; phyno < 4; phyno++) {
922*a4a40437SCalvin Johnson 		if (phyno == 2)
923*a4a40437SCalvin Johnson 			continue;
924*a4a40437SCalvin Johnson 		for (q = 0; q < 16; q++) {
925*a4a40437SCalvin Johnson 			u32 qmax;
926*a4a40437SCalvin Johnson 
927*a4a40437SCalvin Johnson 			writel((phyno << 8) | q, TMU_TEQ_CTRL);
928*a4a40437SCalvin Johnson 			writel(BIT(22), TMU_TEQ_QCFG);
929*a4a40437SCalvin Johnson 
930*a4a40437SCalvin Johnson 			if (phyno == 3)
931*a4a40437SCalvin Johnson 				qmax = DEFAULT_TMU3_QDEPTH;
932*a4a40437SCalvin Johnson 			else
933*a4a40437SCalvin Johnson 				qmax = (q == 0) ? DEFAULT_Q0_QDEPTH :
934*a4a40437SCalvin Johnson 					DEFAULT_MAX_QDEPTH;
935*a4a40437SCalvin Johnson 
936*a4a40437SCalvin Johnson 			writel(qmax << 18, TMU_TEQ_HW_PROB_CFG2);
937*a4a40437SCalvin Johnson 			writel(qmax >> 14, TMU_TEQ_HW_PROB_CFG3);
938*a4a40437SCalvin Johnson 		}
939*a4a40437SCalvin Johnson 	}
940*a4a40437SCalvin Johnson 	writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
941*a4a40437SCalvin Johnson 	writel(0, TMU_CTRL);
942*a4a40437SCalvin Johnson }
943*a4a40437SCalvin Johnson 
944*a4a40437SCalvin Johnson /**************************** HIF ***************************/
945*a4a40437SCalvin Johnson /*
946*a4a40437SCalvin Johnson  * Enable hif tx DMA and interrupt
947*a4a40437SCalvin Johnson  */
948*a4a40437SCalvin Johnson void hif_tx_enable(void)
949*a4a40437SCalvin Johnson {
950*a4a40437SCalvin Johnson 	writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
951*a4a40437SCalvin Johnson }
952*a4a40437SCalvin Johnson 
953*a4a40437SCalvin Johnson /*
954*a4a40437SCalvin Johnson  * Disable hif tx DMA and interrupt
955*a4a40437SCalvin Johnson  */
956*a4a40437SCalvin Johnson void hif_tx_disable(void)
957*a4a40437SCalvin Johnson {
958*a4a40437SCalvin Johnson 	u32 hif_int;
959*a4a40437SCalvin Johnson 
960*a4a40437SCalvin Johnson 	writel(0, HIF_TX_CTRL);
961*a4a40437SCalvin Johnson 
962*a4a40437SCalvin Johnson 	hif_int = readl(HIF_INT_ENABLE);
963*a4a40437SCalvin Johnson 	hif_int &= HIF_TXPKT_INT_EN;
964*a4a40437SCalvin Johnson 	writel(hif_int, HIF_INT_ENABLE);
965*a4a40437SCalvin Johnson }
966*a4a40437SCalvin Johnson 
967*a4a40437SCalvin Johnson /*
968*a4a40437SCalvin Johnson  * Enable hif rx DMA and interrupt
969*a4a40437SCalvin Johnson  */
970*a4a40437SCalvin Johnson void hif_rx_enable(void)
971*a4a40437SCalvin Johnson {
972*a4a40437SCalvin Johnson 	writel((HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL);
973*a4a40437SCalvin Johnson }
974*a4a40437SCalvin Johnson 
975*a4a40437SCalvin Johnson /*
976*a4a40437SCalvin Johnson  * Disable hif rx DMA and interrupt
977*a4a40437SCalvin Johnson  */
978*a4a40437SCalvin Johnson void hif_rx_disable(void)
979*a4a40437SCalvin Johnson {
980*a4a40437SCalvin Johnson 	u32 hif_int;
981*a4a40437SCalvin Johnson 
982*a4a40437SCalvin Johnson 	writel(0, HIF_RX_CTRL);
983*a4a40437SCalvin Johnson 
984*a4a40437SCalvin Johnson 	hif_int = readl(HIF_INT_ENABLE);
985*a4a40437SCalvin Johnson 	hif_int &= HIF_RXPKT_INT_EN;
986*a4a40437SCalvin Johnson 	writel(hif_int, HIF_INT_ENABLE);
987*a4a40437SCalvin Johnson }
988*a4a40437SCalvin Johnson 
989*a4a40437SCalvin Johnson /*
990*a4a40437SCalvin Johnson  * Initializes HIF copy block.
991*a4a40437SCalvin Johnson  */
992*a4a40437SCalvin Johnson void hif_init(void)
993*a4a40437SCalvin Johnson {
994*a4a40437SCalvin Johnson 	/* Initialize HIF registers */
995*a4a40437SCalvin Johnson 	writel(HIF_RX_POLL_CTRL_CYCLE << 16 | HIF_TX_POLL_CTRL_CYCLE,
996*a4a40437SCalvin Johnson 	       HIF_POLL_CTRL);
997*a4a40437SCalvin Johnson 	/* Make HIF AXI transactions non-bufferable */
998*a4a40437SCalvin Johnson 	writel(0x1, HIF_AXI_CTRL);
999*a4a40437SCalvin Johnson }
1000