xref: /openbmc/linux/drivers/staging/most/dim2/hal.c (revision f79e4d5f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * hal.c - DIM2 HAL implementation
4  * (MediaLB, Device Interface Macro IP, OS62420)
5  *
6  * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
7  */
8 
9 /* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
10 
11 #include "hal.h"
12 #include "errors.h"
13 #include "reg.h"
14 #include <linux/stddef.h>
15 #include <linux/kernel.h>
16 
17 /*
18  * Size factor for isochronous DBR buffer.
19  * Minimal value is 3.
20  */
21 #define ISOC_DBR_FACTOR 3u
22 
23 /*
24  * Number of 32-bit units for DBR map.
25  *
26  * 1: block size is 512, max allocation is 16K
27  * 2: block size is 256, max allocation is 8K
28  * 4: block size is 128, max allocation is 4K
29  * 8: block size is 64, max allocation is 2K
30  *
31  * Min allocated space is block size.
32  * Max possible allocated space is 32 blocks.
33  */
34 #define DBR_MAP_SIZE 2
35 
36 /* -------------------------------------------------------------------------- */
37 /* not configurable area */
38 
39 #define CDT 0x00
40 #define ADT 0x40
41 #define MLB_CAT 0x80
42 #define AHB_CAT 0x88
43 
44 #define DBR_SIZE  (16 * 1024) /* specified by IP */
45 #define DBR_BLOCK_SIZE  (DBR_SIZE / 32 / DBR_MAP_SIZE)
46 
47 #define ROUND_UP_TO(x, d)  (DIV_ROUND_UP(x, (d)) * (d))
48 
49 /* -------------------------------------------------------------------------- */
50 /* generic helper functions and macros */
51 
52 static inline u32 bit_mask(u8 position)
53 {
54 	return (u32)1 << position;
55 }
56 
57 static inline bool dim_on_error(u8 error_id, const char *error_message)
58 {
59 	dimcb_on_error(error_id, error_message);
60 	return false;
61 }
62 
63 /* -------------------------------------------------------------------------- */
64 /* types and local variables */
65 
66 struct async_tx_dbr {
67 	u8 ch_addr;
68 	u16 rpc;
69 	u16 wpc;
70 	u16 rest_size;
71 	u16 sz_queue[CDT0_RPC_MASK + 1];
72 };
73 
74 struct lld_global_vars_t {
75 	bool dim_is_initialized;
76 	bool mcm_is_initialized;
77 	struct dim2_regs __iomem *dim2; /* DIM2 core base address */
78 	struct async_tx_dbr atx_dbr;
79 	u32 fcnt;
80 	u32 dbr_map[DBR_MAP_SIZE];
81 };
82 
83 static struct lld_global_vars_t g = { false };
84 
85 /* -------------------------------------------------------------------------- */
86 
87 static int dbr_get_mask_size(u16 size)
88 {
89 	int i;
90 
91 	for (i = 0; i < 6; i++)
92 		if (size <= (DBR_BLOCK_SIZE << i))
93 			return 1 << i;
94 	return 0;
95 }
96 
97 /**
98  * Allocates DBR memory.
99  * @param size Allocating memory size.
100  * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
101  */
102 static int alloc_dbr(u16 size)
103 {
104 	int mask_size;
105 	int i, block_idx = 0;
106 
107 	if (size <= 0)
108 		return DBR_SIZE; /* out of memory */
109 
110 	mask_size = dbr_get_mask_size(size);
111 	if (mask_size == 0)
112 		return DBR_SIZE; /* out of memory */
113 
114 	for (i = 0; i < DBR_MAP_SIZE; i++) {
115 		u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
116 		u32 mask = ~((~(u32)0) << blocks);
117 
118 		do {
119 			if ((g.dbr_map[i] & mask) == 0) {
120 				g.dbr_map[i] |= mask;
121 				return block_idx * DBR_BLOCK_SIZE;
122 			}
123 			block_idx += mask_size;
124 			/* do shift left with 2 steps in case mask_size == 32 */
125 			mask <<= mask_size - 1;
126 		} while ((mask <<= 1) != 0);
127 	}
128 
129 	return DBR_SIZE; /* out of memory */
130 }
131 
132 static void free_dbr(int offs, int size)
133 {
134 	int block_idx = offs / DBR_BLOCK_SIZE;
135 	u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
136 	u32 mask = ~((~(u32)0) << blocks);
137 
138 	mask <<= block_idx % 32;
139 	g.dbr_map[block_idx / 32] &= ~mask;
140 }
141 
142 /* -------------------------------------------------------------------------- */
143 
144 static void dim2_transfer_madr(u32 val)
145 {
146 	dimcb_io_write(&g.dim2->MADR, val);
147 
148 	/* wait for transfer completion */
149 	while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
150 		continue;
151 
152 	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
153 }
154 
155 static void dim2_clear_dbr(u16 addr, u16 size)
156 {
157 	enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
158 
159 	u16 const end_addr = addr + size;
160 	u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
161 
162 	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
163 	dimcb_io_write(&g.dim2->MDAT0, 0);
164 
165 	for (; addr < end_addr; addr++)
166 		dim2_transfer_madr(cmd | addr);
167 }
168 
169 static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
170 {
171 	dim2_transfer_madr(ctr_addr);
172 
173 	return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
174 }
175 
176 static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
177 {
178 	enum { MADR_WNR_BIT = 31 };
179 
180 	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
181 
182 	if (mask[0] != 0)
183 		dimcb_io_write(&g.dim2->MDAT0, value[0]);
184 	if (mask[1] != 0)
185 		dimcb_io_write(&g.dim2->MDAT1, value[1]);
186 	if (mask[2] != 0)
187 		dimcb_io_write(&g.dim2->MDAT2, value[2]);
188 	if (mask[3] != 0)
189 		dimcb_io_write(&g.dim2->MDAT3, value[3]);
190 
191 	dimcb_io_write(&g.dim2->MDWE0, mask[0]);
192 	dimcb_io_write(&g.dim2->MDWE1, mask[1]);
193 	dimcb_io_write(&g.dim2->MDWE2, mask[2]);
194 	dimcb_io_write(&g.dim2->MDWE3, mask[3]);
195 
196 	dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
197 }
198 
199 static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
200 {
201 	u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
202 
203 	dim2_write_ctr_mask(ctr_addr, mask, value);
204 }
205 
206 static inline void dim2_clear_ctr(u32 ctr_addr)
207 {
208 	u32 const value[4] = { 0, 0, 0, 0 };
209 
210 	dim2_write_ctr(ctr_addr, value);
211 }
212 
213 static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
214 			       bool read_not_write)
215 {
216 	bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
217 	bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
218 	u16 const cat =
219 		(read_not_write << CAT_RNW_BIT) |
220 		(ch_type << CAT_CT_SHIFT) |
221 		(ch_addr << CAT_CL_SHIFT) |
222 		(isoc_fce << CAT_FCE_BIT) |
223 		(sync_mfe << CAT_MFE_BIT) |
224 		(false << CAT_MT_BIT) |
225 		(true << CAT_CE_BIT);
226 	u8 const ctr_addr = cat_base + ch_addr / 8;
227 	u8 const idx = (ch_addr % 8) / 2;
228 	u8 const shift = (ch_addr % 2) * 16;
229 	u32 mask[4] = { 0, 0, 0, 0 };
230 	u32 value[4] = { 0, 0, 0, 0 };
231 
232 	mask[idx] = (u32)0xFFFF << shift;
233 	value[idx] = cat << shift;
234 	dim2_write_ctr_mask(ctr_addr, mask, value);
235 }
236 
237 static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
238 {
239 	u8 const ctr_addr = cat_base + ch_addr / 8;
240 	u8 const idx = (ch_addr % 8) / 2;
241 	u8 const shift = (ch_addr % 2) * 16;
242 	u32 mask[4] = { 0, 0, 0, 0 };
243 	u32 value[4] = { 0, 0, 0, 0 };
244 
245 	mask[idx] = (u32)0xFFFF << shift;
246 	dim2_write_ctr_mask(ctr_addr, mask, value);
247 }
248 
249 static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
250 			       u16 packet_length)
251 {
252 	u32 cdt[4] = { 0, 0, 0, 0 };
253 
254 	if (packet_length)
255 		cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
256 
257 	cdt[3] =
258 		((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
259 		(dbr_address << CDT3_BA_SHIFT);
260 	dim2_write_ctr(CDT + ch_addr, cdt);
261 }
262 
263 static u16 dim2_rpc(u8 ch_addr)
264 {
265 	u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
266 
267 	return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
268 }
269 
270 static void dim2_clear_cdt(u8 ch_addr)
271 {
272 	u32 cdt[4] = { 0, 0, 0, 0 };
273 
274 	dim2_write_ctr(CDT + ch_addr, cdt);
275 }
276 
277 static void dim2_configure_adt(u8 ch_addr)
278 {
279 	u32 adt[4] = { 0, 0, 0, 0 };
280 
281 	adt[0] =
282 		(true << ADT0_CE_BIT) |
283 		(true << ADT0_LE_BIT) |
284 		(0 << ADT0_PG_BIT);
285 
286 	dim2_write_ctr(ADT + ch_addr, adt);
287 }
288 
289 static void dim2_clear_adt(u8 ch_addr)
290 {
291 	u32 adt[4] = { 0, 0, 0, 0 };
292 
293 	dim2_write_ctr(ADT + ch_addr, adt);
294 }
295 
296 static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
297 				  u16 buffer_size)
298 {
299 	u8 const shift = idx * 16;
300 
301 	u32 mask[4] = { 0, 0, 0, 0 };
302 	u32 adt[4] = { 0, 0, 0, 0 };
303 
304 	mask[1] =
305 		bit_mask(ADT1_PS_BIT + shift) |
306 		bit_mask(ADT1_RDY_BIT + shift) |
307 		(ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
308 	adt[1] =
309 		(true << (ADT1_PS_BIT + shift)) |
310 		(true << (ADT1_RDY_BIT + shift)) |
311 		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
312 
313 	mask[idx + 2] = 0xFFFFFFFF;
314 	adt[idx + 2] = buf_addr;
315 
316 	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
317 }
318 
319 static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
320 				 u16 buffer_size)
321 {
322 	u8 const shift = idx * 16;
323 
324 	u32 mask[4] = { 0, 0, 0, 0 };
325 	u32 adt[4] = { 0, 0, 0, 0 };
326 
327 	mask[1] =
328 		bit_mask(ADT1_RDY_BIT + shift) |
329 		(ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
330 	adt[1] =
331 		(true << (ADT1_RDY_BIT + shift)) |
332 		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
333 
334 	mask[idx + 2] = 0xFFFFFFFF;
335 	adt[idx + 2] = buf_addr;
336 
337 	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
338 }
339 
340 static void dim2_clear_ctram(void)
341 {
342 	u32 ctr_addr;
343 
344 	for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
345 		dim2_clear_ctr(ctr_addr);
346 }
347 
348 static void dim2_configure_channel(
349 	u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
350 	u16 packet_length)
351 {
352 	dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
353 	dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
354 
355 	dim2_configure_adt(ch_addr);
356 	dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
357 
358 	/* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
359 	dimcb_io_write(&g.dim2->ACMR0,
360 		       dimcb_io_read(&g.dim2->ACMR0) | bit_mask(ch_addr));
361 }
362 
363 static void dim2_clear_channel(u8 ch_addr)
364 {
365 	/* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
366 	dimcb_io_write(&g.dim2->ACMR0,
367 		       dimcb_io_read(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
368 
369 	dim2_clear_cat(AHB_CAT, ch_addr);
370 	dim2_clear_adt(ch_addr);
371 
372 	dim2_clear_cat(MLB_CAT, ch_addr);
373 	dim2_clear_cdt(ch_addr);
374 
375 	/* clear channel status bit */
376 	dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
377 }
378 
379 /* -------------------------------------------------------------------------- */
380 /* trace async tx dbr fill state */
381 
382 static inline u16 norm_pc(u16 pc)
383 {
384 	return pc & CDT0_RPC_MASK;
385 }
386 
387 static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
388 {
389 	g.atx_dbr.rest_size = dbr_size;
390 	g.atx_dbr.rpc = dim2_rpc(ch_addr);
391 	g.atx_dbr.wpc = g.atx_dbr.rpc;
392 }
393 
394 static void dbrcnt_enq(int buf_sz)
395 {
396 	g.atx_dbr.rest_size -= buf_sz;
397 	g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
398 	g.atx_dbr.wpc++;
399 }
400 
401 u16 dim_dbr_space(struct dim_channel *ch)
402 {
403 	u16 cur_rpc;
404 	struct async_tx_dbr *dbr = &g.atx_dbr;
405 
406 	if (ch->addr != dbr->ch_addr)
407 		return 0xFFFF;
408 
409 	cur_rpc = dim2_rpc(ch->addr);
410 
411 	while (norm_pc(dbr->rpc) != cur_rpc) {
412 		dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
413 		dbr->rpc++;
414 	}
415 
416 	if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
417 		return 0;
418 
419 	return dbr->rest_size;
420 }
421 
422 /* -------------------------------------------------------------------------- */
423 /* channel state helpers */
424 
425 static void state_init(struct int_ch_state *state)
426 {
427 	state->request_counter = 0;
428 	state->service_counter = 0;
429 
430 	state->idx1 = 0;
431 	state->idx2 = 0;
432 	state->level = 0;
433 }
434 
435 /* -------------------------------------------------------------------------- */
436 /* macro helper functions */
437 
438 static inline bool check_channel_address(u32 ch_address)
439 {
440 	return ch_address > 0 && (ch_address % 2) == 0 &&
441 	       (ch_address / 2) <= (u32)CAT_CL_MASK;
442 }
443 
444 static inline bool check_packet_length(u32 packet_length)
445 {
446 	u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
447 
448 	if (packet_length <= 0)
449 		return false; /* too small */
450 
451 	if (packet_length > max_size)
452 		return false; /* too big */
453 
454 	if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
455 		return false; /* too big */
456 
457 	return true;
458 }
459 
460 static inline bool check_bytes_per_frame(u32 bytes_per_frame)
461 {
462 	u16 const bd_factor = g.fcnt + 2;
463 	u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
464 
465 	if (bytes_per_frame <= 0)
466 		return false; /* too small */
467 
468 	if (bytes_per_frame > max_size)
469 		return false; /* too big */
470 
471 	return true;
472 }
473 
474 static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
475 {
476 	u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
477 
478 	if (buf_size > max_size)
479 		return max_size;
480 
481 	return buf_size;
482 }
483 
484 static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
485 {
486 	u16 n;
487 	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
488 
489 	if (buf_size > max_size)
490 		buf_size = max_size;
491 
492 	n = buf_size / packet_length;
493 
494 	if (n < 2u)
495 		return 0; /* too small buffer for given packet_length */
496 
497 	return packet_length * n;
498 }
499 
500 static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
501 {
502 	u16 n;
503 	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
504 	u32 const unit = bytes_per_frame << g.fcnt;
505 
506 	if (buf_size > max_size)
507 		buf_size = max_size;
508 
509 	n = buf_size / unit;
510 
511 	if (n < 1u)
512 		return 0; /* too small buffer for given bytes_per_frame */
513 
514 	return unit * n;
515 }
516 
517 static void dim2_cleanup(void)
518 {
519 	/* disable MediaLB */
520 	dimcb_io_write(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
521 
522 	dim2_clear_ctram();
523 
524 	/* disable mlb_int interrupt */
525 	dimcb_io_write(&g.dim2->MIEN, 0);
526 
527 	/* clear status for all dma channels */
528 	dimcb_io_write(&g.dim2->ACSR0, 0xFFFFFFFF);
529 	dimcb_io_write(&g.dim2->ACSR1, 0xFFFFFFFF);
530 
531 	/* mask interrupts for all channels */
532 	dimcb_io_write(&g.dim2->ACMR0, 0);
533 	dimcb_io_write(&g.dim2->ACMR1, 0);
534 }
535 
536 static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
537 {
538 	dim2_cleanup();
539 
540 	/* configure and enable MediaLB */
541 	dimcb_io_write(&g.dim2->MLBC0,
542 		       enable_6pin << MLBC0_MLBPEN_BIT |
543 		       mlb_clock << MLBC0_MLBCLK_SHIFT |
544 		       g.fcnt << MLBC0_FCNT_SHIFT |
545 		       true << MLBC0_MLBEN_BIT);
546 
547 	/* activate all HBI channels */
548 	dimcb_io_write(&g.dim2->HCMR0, 0xFFFFFFFF);
549 	dimcb_io_write(&g.dim2->HCMR1, 0xFFFFFFFF);
550 
551 	/* enable HBI */
552 	dimcb_io_write(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
553 
554 	/* configure DMA */
555 	dimcb_io_write(&g.dim2->ACTL,
556 		       ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
557 		       true << ACTL_SCE_BIT);
558 }
559 
560 static bool dim2_is_mlb_locked(void)
561 {
562 	u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
563 	u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
564 			  bit_mask(MLBC1_LOCKERR_BIT);
565 	u32 const c1 = dimcb_io_read(&g.dim2->MLBC1);
566 	u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
567 
568 	dimcb_io_write(&g.dim2->MLBC1, c1 & nda_mask);
569 	return (dimcb_io_read(&g.dim2->MLBC1) & mask1) == 0 &&
570 	       (dimcb_io_read(&g.dim2->MLBC0) & mask0) != 0;
571 }
572 
573 /* -------------------------------------------------------------------------- */
574 /* channel help routines */
575 
576 static inline bool service_channel(u8 ch_addr, u8 idx)
577 {
578 	u8 const shift = idx * 16;
579 	u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
580 	u32 mask[4] = { 0, 0, 0, 0 };
581 	u32 adt_w[4] = { 0, 0, 0, 0 };
582 
583 	if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
584 		return false;
585 
586 	mask[1] =
587 		bit_mask(ADT1_DNE_BIT + shift) |
588 		bit_mask(ADT1_ERR_BIT + shift) |
589 		bit_mask(ADT1_RDY_BIT + shift);
590 	dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
591 
592 	/* clear channel status bit */
593 	dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
594 
595 	return true;
596 }
597 
598 /* -------------------------------------------------------------------------- */
599 /* channel init routines */
600 
601 static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
602 {
603 	state_init(&ch->state);
604 
605 	ch->addr = ch_addr;
606 
607 	ch->packet_length = packet_length;
608 	ch->bytes_per_frame = 0;
609 	ch->done_sw_buffers_number = 0;
610 }
611 
612 static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
613 {
614 	state_init(&ch->state);
615 
616 	ch->addr = ch_addr;
617 
618 	ch->packet_length = 0;
619 	ch->bytes_per_frame = bytes_per_frame;
620 	ch->done_sw_buffers_number = 0;
621 }
622 
623 static void channel_init(struct dim_channel *ch, u8 ch_addr)
624 {
625 	state_init(&ch->state);
626 
627 	ch->addr = ch_addr;
628 
629 	ch->packet_length = 0;
630 	ch->bytes_per_frame = 0;
631 	ch->done_sw_buffers_number = 0;
632 }
633 
634 /* returns true if channel interrupt state is cleared */
635 static bool channel_service_interrupt(struct dim_channel *ch)
636 {
637 	struct int_ch_state *const state = &ch->state;
638 
639 	if (!service_channel(ch->addr, state->idx2))
640 		return false;
641 
642 	state->idx2 ^= 1;
643 	state->request_counter++;
644 	return true;
645 }
646 
647 static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
648 {
649 	struct int_ch_state *const state = &ch->state;
650 
651 	if (buf_size <= 0)
652 		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
653 
654 	if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
655 	    buf_size != norm_ctrl_async_buffer_size(buf_size))
656 		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
657 				    "Bad control/async buffer size");
658 
659 	if (ch->packet_length &&
660 	    buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
661 		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
662 				    "Bad isochronous buffer size");
663 
664 	if (ch->bytes_per_frame &&
665 	    buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
666 		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
667 				    "Bad synchronous buffer size");
668 
669 	if (state->level >= 2u)
670 		return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
671 
672 	++state->level;
673 
674 	if (ch->addr == g.atx_dbr.ch_addr)
675 		dbrcnt_enq(buf_size);
676 
677 	if (ch->packet_length || ch->bytes_per_frame)
678 		dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
679 	else
680 		dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
681 				      buf_size);
682 	state->idx1 ^= 1;
683 
684 	return true;
685 }
686 
687 static u8 channel_service(struct dim_channel *ch)
688 {
689 	struct int_ch_state *const state = &ch->state;
690 
691 	if (state->service_counter != state->request_counter) {
692 		state->service_counter++;
693 		if (state->level == 0)
694 			return DIM_ERR_UNDERFLOW;
695 
696 		--state->level;
697 		ch->done_sw_buffers_number++;
698 	}
699 
700 	return DIM_NO_ERROR;
701 }
702 
703 static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
704 {
705 	if (buffers_number > ch->done_sw_buffers_number)
706 		return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
707 
708 	ch->done_sw_buffers_number -= buffers_number;
709 	return true;
710 }
711 
712 /* -------------------------------------------------------------------------- */
713 /* API */
714 
715 u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
716 	       u32 fcnt)
717 {
718 	g.dim_is_initialized = false;
719 
720 	if (!dim_base_address)
721 		return DIM_INIT_ERR_DIM_ADDR;
722 
723 	/* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
724 	/* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
725 	if (mlb_clock >= 8)
726 		return DIM_INIT_ERR_MLB_CLOCK;
727 
728 	if (fcnt > MLBC0_FCNT_MAX_VAL)
729 		return DIM_INIT_ERR_MLB_CLOCK;
730 
731 	g.dim2 = dim_base_address;
732 	g.fcnt = fcnt;
733 	g.dbr_map[0] = 0;
734 	g.dbr_map[1] = 0;
735 
736 	dim2_initialize(mlb_clock >= 3, mlb_clock);
737 
738 	g.dim_is_initialized = true;
739 
740 	return DIM_NO_ERROR;
741 }
742 
743 void dim_shutdown(void)
744 {
745 	g.dim_is_initialized = false;
746 	dim2_cleanup();
747 }
748 
749 bool dim_get_lock_state(void)
750 {
751 	return dim2_is_mlb_locked();
752 }
753 
754 static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
755 			  u16 ch_address, u16 hw_buffer_size)
756 {
757 	if (!g.dim_is_initialized || !ch)
758 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
759 
760 	if (!check_channel_address(ch_address))
761 		return DIM_INIT_ERR_CHANNEL_ADDRESS;
762 
763 	if (!ch->dbr_size)
764 		ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
765 	ch->dbr_addr = alloc_dbr(ch->dbr_size);
766 	if (ch->dbr_addr >= DBR_SIZE)
767 		return DIM_INIT_ERR_OUT_OF_MEMORY;
768 
769 	channel_init(ch, ch_address / 2);
770 
771 	dim2_configure_channel(ch->addr, type, is_tx,
772 			       ch->dbr_addr, ch->dbr_size, 0);
773 
774 	return DIM_NO_ERROR;
775 }
776 
777 void dim_service_mlb_int_irq(void)
778 {
779 	dimcb_io_write(&g.dim2->MS0, 0);
780 	dimcb_io_write(&g.dim2->MS1, 0);
781 }
782 
783 u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
784 {
785 	return norm_ctrl_async_buffer_size(buf_size);
786 }
787 
788 /**
789  * Retrieves maximal possible correct buffer size for isochronous data type
790  * conform to given packet length and not bigger than given buffer size.
791  *
792  * Returns non-zero correct buffer size or zero by error.
793  */
794 u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
795 {
796 	if (!check_packet_length(packet_length))
797 		return 0;
798 
799 	return norm_isoc_buffer_size(buf_size, packet_length);
800 }
801 
802 /**
803  * Retrieves maximal possible correct buffer size for synchronous data type
804  * conform to given bytes per frame and not bigger than given buffer size.
805  *
806  * Returns non-zero correct buffer size or zero by error.
807  */
808 u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
809 {
810 	if (!check_bytes_per_frame(bytes_per_frame))
811 		return 0;
812 
813 	return norm_sync_buffer_size(buf_size, bytes_per_frame);
814 }
815 
816 u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
817 		    u16 max_buffer_size)
818 {
819 	return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
820 			       max_buffer_size);
821 }
822 
823 u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
824 		  u16 max_buffer_size)
825 {
826 	u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
827 				 max_buffer_size);
828 
829 	if (is_tx && !g.atx_dbr.ch_addr) {
830 		g.atx_dbr.ch_addr = ch->addr;
831 		dbrcnt_init(ch->addr, ch->dbr_size);
832 		dimcb_io_write(&g.dim2->MIEN, bit_mask(20));
833 	}
834 
835 	return ret;
836 }
837 
838 u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
839 		 u16 packet_length)
840 {
841 	if (!g.dim_is_initialized || !ch)
842 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
843 
844 	if (!check_channel_address(ch_address))
845 		return DIM_INIT_ERR_CHANNEL_ADDRESS;
846 
847 	if (!check_packet_length(packet_length))
848 		return DIM_ERR_BAD_CONFIG;
849 
850 	if (!ch->dbr_size)
851 		ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
852 	ch->dbr_addr = alloc_dbr(ch->dbr_size);
853 	if (ch->dbr_addr >= DBR_SIZE)
854 		return DIM_INIT_ERR_OUT_OF_MEMORY;
855 
856 	isoc_init(ch, ch_address / 2, packet_length);
857 
858 	dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
859 			       ch->dbr_size, packet_length);
860 
861 	return DIM_NO_ERROR;
862 }
863 
864 u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
865 		 u16 bytes_per_frame)
866 {
867 	u16 bd_factor = g.fcnt + 2;
868 
869 	if (!g.dim_is_initialized || !ch)
870 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
871 
872 	if (!check_channel_address(ch_address))
873 		return DIM_INIT_ERR_CHANNEL_ADDRESS;
874 
875 	if (!check_bytes_per_frame(bytes_per_frame))
876 		return DIM_ERR_BAD_CONFIG;
877 
878 	if (!ch->dbr_size)
879 		ch->dbr_size = bytes_per_frame << bd_factor;
880 	ch->dbr_addr = alloc_dbr(ch->dbr_size);
881 	if (ch->dbr_addr >= DBR_SIZE)
882 		return DIM_INIT_ERR_OUT_OF_MEMORY;
883 
884 	sync_init(ch, ch_address / 2, bytes_per_frame);
885 
886 	dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
887 	dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
888 			       ch->dbr_addr, ch->dbr_size, 0);
889 
890 	return DIM_NO_ERROR;
891 }
892 
893 u8 dim_destroy_channel(struct dim_channel *ch)
894 {
895 	if (!g.dim_is_initialized || !ch)
896 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
897 
898 	if (ch->addr == g.atx_dbr.ch_addr) {
899 		dimcb_io_write(&g.dim2->MIEN, 0);
900 		g.atx_dbr.ch_addr = 0;
901 	}
902 
903 	dim2_clear_channel(ch->addr);
904 	if (ch->dbr_addr < DBR_SIZE)
905 		free_dbr(ch->dbr_addr, ch->dbr_size);
906 	ch->dbr_addr = DBR_SIZE;
907 
908 	return DIM_NO_ERROR;
909 }
910 
911 void dim_service_ahb_int_irq(struct dim_channel *const *channels)
912 {
913 	bool state_changed;
914 
915 	if (!g.dim_is_initialized) {
916 		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
917 			     "DIM is not initialized");
918 		return;
919 	}
920 
921 	if (!channels) {
922 		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
923 		return;
924 	}
925 
926 	/*
927 	 * Use while-loop and a flag to make sure the age is changed back at
928 	 * least once, otherwise the interrupt may never come if CPU generates
929 	 * interrupt on changing age.
930 	 * This cycle runs not more than number of channels, because
931 	 * channel_service_interrupt() routine doesn't start the channel again.
932 	 */
933 	do {
934 		struct dim_channel *const *ch = channels;
935 
936 		state_changed = false;
937 
938 		while (*ch) {
939 			state_changed |= channel_service_interrupt(*ch);
940 			++ch;
941 		}
942 	} while (state_changed);
943 }
944 
945 u8 dim_service_channel(struct dim_channel *ch)
946 {
947 	if (!g.dim_is_initialized || !ch)
948 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
949 
950 	return channel_service(ch);
951 }
952 
953 struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
954 					     struct dim_ch_state_t *state_ptr)
955 {
956 	if (!ch || !state_ptr)
957 		return NULL;
958 
959 	state_ptr->ready = ch->state.level < 2;
960 	state_ptr->done_buffers = ch->done_sw_buffers_number;
961 
962 	return state_ptr;
963 }
964 
965 bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
966 			u16 buffer_size)
967 {
968 	if (!ch)
969 		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
970 				    "Bad channel");
971 
972 	return channel_start(ch, buffer_addr, buffer_size);
973 }
974 
975 bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
976 {
977 	if (!ch)
978 		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
979 				    "Bad channel");
980 
981 	return channel_detach_buffers(ch, buffers_number);
982 }
983