1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * QMC driver
4 *
5 * Copyright 2022 CS GROUP France
6 *
7 * Author: Herve Codina <herve.codina@bootlin.com>
8 */
9
10 #include <soc/fsl/qe/qmc.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/hdlc.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <soc/fsl/cpm.h>
21 #include <sysdev/fsl_soc.h>
22 #include "tsa.h"
23
24 /* SCC general mode register high (32 bits) */
25 #define SCC_GSMRL 0x00
26 #define SCC_GSMRL_ENR (1 << 5)
27 #define SCC_GSMRL_ENT (1 << 4)
28 #define SCC_GSMRL_MODE_QMC (0x0A << 0)
29
30 /* SCC general mode register low (32 bits) */
31 #define SCC_GSMRH 0x04
32 #define SCC_GSMRH_CTSS (1 << 7)
33 #define SCC_GSMRH_CDS (1 << 8)
34 #define SCC_GSMRH_CTSP (1 << 9)
35 #define SCC_GSMRH_CDP (1 << 10)
36
37 /* SCC event register (16 bits) */
38 #define SCC_SCCE 0x10
39 #define SCC_SCCE_IQOV (1 << 3)
40 #define SCC_SCCE_GINT (1 << 2)
41 #define SCC_SCCE_GUN (1 << 1)
42 #define SCC_SCCE_GOV (1 << 0)
43
44 /* SCC mask register (16 bits) */
45 #define SCC_SCCM 0x14
46 /* Multichannel base pointer (32 bits) */
47 #define QMC_GBL_MCBASE 0x00
48 /* Multichannel controller state (16 bits) */
49 #define QMC_GBL_QMCSTATE 0x04
50 /* Maximum receive buffer length (16 bits) */
51 #define QMC_GBL_MRBLR 0x06
52 /* Tx time-slot assignment table pointer (16 bits) */
53 #define QMC_GBL_TX_S_PTR 0x08
54 /* Rx pointer (16 bits) */
55 #define QMC_GBL_RXPTR 0x0A
56 /* Global receive frame threshold (16 bits) */
57 #define QMC_GBL_GRFTHR 0x0C
58 /* Global receive frame count (16 bits) */
59 #define QMC_GBL_GRFCNT 0x0E
60 /* Multichannel interrupt base address (32 bits) */
61 #define QMC_GBL_INTBASE 0x10
62 /* Multichannel interrupt pointer (32 bits) */
63 #define QMC_GBL_INTPTR 0x14
64 /* Rx time-slot assignment table pointer (16 bits) */
65 #define QMC_GBL_RX_S_PTR 0x18
66 /* Tx pointer (16 bits) */
67 #define QMC_GBL_TXPTR 0x1A
68 /* CRC constant (32 bits) */
69 #define QMC_GBL_C_MASK32 0x1C
70 /* Time slot assignment table Rx (32 x 16 bits) */
71 #define QMC_GBL_TSATRX 0x20
72 /* Time slot assignment table Tx (32 x 16 bits) */
73 #define QMC_GBL_TSATTX 0x60
74 /* CRC constant (16 bits) */
75 #define QMC_GBL_C_MASK16 0xA0
76
77 /* TSA entry (16bit entry in TSATRX and TSATTX) */
78 #define QMC_TSA_VALID (1 << 15)
79 #define QMC_TSA_WRAP (1 << 14)
80 #define QMC_TSA_MASK (0x303F)
81 #define QMC_TSA_CHANNEL(x) ((x) << 6)
82
83 /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
84 #define QMC_SPE_TBASE 0x00
85
86 /* Channel mode register (16 bits) */
87 #define QMC_SPE_CHAMR 0x02
88 #define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
89 #define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
90 #define QMC_SPE_CHAMR_ENT (1 << 12)
91 #define QMC_SPE_CHAMR_POL (1 << 8)
92 #define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
93 #define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
94 #define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
95 #define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
96 #define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
97
98 /* Tx internal state (32 bits) */
99 #define QMC_SPE_TSTATE 0x04
100 /* Tx buffer descriptor pointer (16 bits) */
101 #define QMC_SPE_TBPTR 0x0C
102 /* Zero-insertion state (32 bits) */
103 #define QMC_SPE_ZISTATE 0x14
104 /* Channel’s interrupt mask flags (16 bits) */
105 #define QMC_SPE_INTMSK 0x1C
106 /* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
107 #define QMC_SPE_RBASE 0x20
108 /* HDLC: Maximum frame length register (16 bits) */
109 #define QMC_SPE_MFLR 0x22
110 /* TRANSPARENT: Transparent maximum receive length (16 bits) */
111 #define QMC_SPE_TMRBLR 0x22
112 /* Rx internal state (32 bits) */
113 #define QMC_SPE_RSTATE 0x24
114 /* Rx buffer descriptor pointer (16 bits) */
115 #define QMC_SPE_RBPTR 0x2C
116 /* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
117 #define QMC_SPE_RPACK 0x30
118 /* Zero deletion state (32 bits) */
119 #define QMC_SPE_ZDSTATE 0x34
120
121 /* Transparent synchronization (16 bits) */
122 #define QMC_SPE_TRNSYNC 0x3C
123 #define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
124 #define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
125
126 /* Interrupt related registers bits */
127 #define QMC_INT_V (1 << 15)
128 #define QMC_INT_W (1 << 14)
129 #define QMC_INT_NID (1 << 13)
130 #define QMC_INT_IDL (1 << 12)
131 #define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
132 #define QMC_INT_MRF (1 << 5)
133 #define QMC_INT_UN (1 << 4)
134 #define QMC_INT_RXF (1 << 3)
135 #define QMC_INT_BSY (1 << 2)
136 #define QMC_INT_TXB (1 << 1)
137 #define QMC_INT_RXB (1 << 0)
138
139 /* BD related registers bits */
140 #define QMC_BD_RX_E (1 << 15)
141 #define QMC_BD_RX_W (1 << 13)
142 #define QMC_BD_RX_I (1 << 12)
143 #define QMC_BD_RX_L (1 << 11)
144 #define QMC_BD_RX_F (1 << 10)
145 #define QMC_BD_RX_CM (1 << 9)
146 #define QMC_BD_RX_UB (1 << 7)
147 #define QMC_BD_RX_LG (1 << 5)
148 #define QMC_BD_RX_NO (1 << 4)
149 #define QMC_BD_RX_AB (1 << 3)
150 #define QMC_BD_RX_CR (1 << 2)
151
152 #define QMC_BD_TX_R (1 << 15)
153 #define QMC_BD_TX_W (1 << 13)
154 #define QMC_BD_TX_I (1 << 12)
155 #define QMC_BD_TX_L (1 << 11)
156 #define QMC_BD_TX_TC (1 << 10)
157 #define QMC_BD_TX_CM (1 << 9)
158 #define QMC_BD_TX_UB (1 << 7)
159 #define QMC_BD_TX_PAD (0x0f << 0)
160
161 /* Numbers of BDs and interrupt items */
162 #define QMC_NB_TXBDS 8
163 #define QMC_NB_RXBDS 8
164 #define QMC_NB_INTS 128
165
166 struct qmc_xfer_desc {
167 union {
168 void (*tx_complete)(void *context);
169 void (*rx_complete)(void *context, size_t length);
170 };
171 void *context;
172 };
173
174 struct qmc_chan {
175 struct list_head list;
176 unsigned int id;
177 struct qmc *qmc;
178 void __iomem *s_param;
179 enum qmc_mode mode;
180 u64 tx_ts_mask;
181 u64 rx_ts_mask;
182 bool is_reverse_data;
183
184 spinlock_t tx_lock;
185 cbd_t __iomem *txbds;
186 cbd_t __iomem *txbd_free;
187 cbd_t __iomem *txbd_done;
188 struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
189 u64 nb_tx_underrun;
190 bool is_tx_stopped;
191
192 spinlock_t rx_lock;
193 cbd_t __iomem *rxbds;
194 cbd_t __iomem *rxbd_free;
195 cbd_t __iomem *rxbd_done;
196 struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
197 u64 nb_rx_busy;
198 int rx_pending;
199 bool is_rx_halted;
200 bool is_rx_stopped;
201 };
202
203 struct qmc {
204 struct device *dev;
205 struct tsa_serial *tsa_serial;
206 void __iomem *scc_regs;
207 void __iomem *scc_pram;
208 void __iomem *dpram;
209 u16 scc_pram_offset;
210 cbd_t __iomem *bd_table;
211 dma_addr_t bd_dma_addr;
212 size_t bd_size;
213 u16 __iomem *int_table;
214 u16 __iomem *int_curr;
215 dma_addr_t int_dma_addr;
216 size_t int_size;
217 struct list_head chan_head;
218 struct qmc_chan *chans[64];
219 };
220
qmc_write16(void __iomem * addr,u16 val)221 static inline void qmc_write16(void __iomem *addr, u16 val)
222 {
223 iowrite16be(val, addr);
224 }
225
qmc_read16(void __iomem * addr)226 static inline u16 qmc_read16(void __iomem *addr)
227 {
228 return ioread16be(addr);
229 }
230
qmc_setbits16(void __iomem * addr,u16 set)231 static inline void qmc_setbits16(void __iomem *addr, u16 set)
232 {
233 qmc_write16(addr, qmc_read16(addr) | set);
234 }
235
qmc_clrbits16(void __iomem * addr,u16 clr)236 static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
237 {
238 qmc_write16(addr, qmc_read16(addr) & ~clr);
239 }
240
qmc_write32(void __iomem * addr,u32 val)241 static inline void qmc_write32(void __iomem *addr, u32 val)
242 {
243 iowrite32be(val, addr);
244 }
245
qmc_read32(void __iomem * addr)246 static inline u32 qmc_read32(void __iomem *addr)
247 {
248 return ioread32be(addr);
249 }
250
qmc_setbits32(void __iomem * addr,u32 set)251 static inline void qmc_setbits32(void __iomem *addr, u32 set)
252 {
253 qmc_write32(addr, qmc_read32(addr) | set);
254 }
255
qmc_chan_get_info(struct qmc_chan * chan,struct qmc_chan_info * info)256 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
257 {
258 struct tsa_serial_info tsa_info;
259 int ret;
260
261 /* Retrieve info from the TSA related serial */
262 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
263 if (ret)
264 return ret;
265
266 info->mode = chan->mode;
267 info->rx_fs_rate = tsa_info.rx_fs_rate;
268 info->rx_bit_rate = tsa_info.rx_bit_rate;
269 info->nb_tx_ts = hweight64(chan->tx_ts_mask);
270 info->tx_fs_rate = tsa_info.tx_fs_rate;
271 info->tx_bit_rate = tsa_info.tx_bit_rate;
272 info->nb_rx_ts = hweight64(chan->rx_ts_mask);
273
274 return 0;
275 }
276 EXPORT_SYMBOL(qmc_chan_get_info);
277
qmc_chan_set_param(struct qmc_chan * chan,const struct qmc_chan_param * param)278 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
279 {
280 if (param->mode != chan->mode)
281 return -EINVAL;
282
283 switch (param->mode) {
284 case QMC_HDLC:
285 if ((param->hdlc.max_rx_buf_size % 4) ||
286 (param->hdlc.max_rx_buf_size < 8))
287 return -EINVAL;
288
289 qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
290 param->hdlc.max_rx_buf_size - 8);
291 qmc_write16(chan->s_param + QMC_SPE_MFLR,
292 param->hdlc.max_rx_frame_size);
293 if (param->hdlc.is_crc32) {
294 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
295 QMC_SPE_CHAMR_HDLC_CRC);
296 } else {
297 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
298 QMC_SPE_CHAMR_HDLC_CRC);
299 }
300 break;
301
302 case QMC_TRANSPARENT:
303 qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
304 param->transp.max_rx_buf_size);
305 break;
306
307 default:
308 return -EINVAL;
309 }
310
311 return 0;
312 }
313 EXPORT_SYMBOL(qmc_chan_set_param);
314
qmc_chan_write_submit(struct qmc_chan * chan,dma_addr_t addr,size_t length,void (* complete)(void * context),void * context)315 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
316 void (*complete)(void *context), void *context)
317 {
318 struct qmc_xfer_desc *xfer_desc;
319 unsigned long flags;
320 cbd_t __iomem *bd;
321 u16 ctrl;
322 int ret;
323
324 /*
325 * R bit UB bit
326 * 0 0 : The BD is free
327 * 1 1 : The BD is in used, waiting for transfer
328 * 0 1 : The BD is in used, waiting for completion
329 * 1 0 : Should not append
330 */
331
332 spin_lock_irqsave(&chan->tx_lock, flags);
333 bd = chan->txbd_free;
334
335 ctrl = qmc_read16(&bd->cbd_sc);
336 if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
337 /* We are full ... */
338 ret = -EBUSY;
339 goto end;
340 }
341
342 qmc_write16(&bd->cbd_datlen, length);
343 qmc_write32(&bd->cbd_bufaddr, addr);
344
345 xfer_desc = &chan->tx_desc[bd - chan->txbds];
346 xfer_desc->tx_complete = complete;
347 xfer_desc->context = context;
348
349 /* Activate the descriptor */
350 ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
351 wmb(); /* Be sure to flush the descriptor before control update */
352 qmc_write16(&bd->cbd_sc, ctrl);
353
354 if (!chan->is_tx_stopped)
355 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
356
357 if (ctrl & QMC_BD_TX_W)
358 chan->txbd_free = chan->txbds;
359 else
360 chan->txbd_free++;
361
362 ret = 0;
363
364 end:
365 spin_unlock_irqrestore(&chan->tx_lock, flags);
366 return ret;
367 }
368 EXPORT_SYMBOL(qmc_chan_write_submit);
369
qmc_chan_write_done(struct qmc_chan * chan)370 static void qmc_chan_write_done(struct qmc_chan *chan)
371 {
372 struct qmc_xfer_desc *xfer_desc;
373 void (*complete)(void *context);
374 unsigned long flags;
375 void *context;
376 cbd_t __iomem *bd;
377 u16 ctrl;
378
379 /*
380 * R bit UB bit
381 * 0 0 : The BD is free
382 * 1 1 : The BD is in used, waiting for transfer
383 * 0 1 : The BD is in used, waiting for completion
384 * 1 0 : Should not append
385 */
386
387 spin_lock_irqsave(&chan->tx_lock, flags);
388 bd = chan->txbd_done;
389
390 ctrl = qmc_read16(&bd->cbd_sc);
391 while (!(ctrl & QMC_BD_TX_R)) {
392 if (!(ctrl & QMC_BD_TX_UB))
393 goto end;
394
395 xfer_desc = &chan->tx_desc[bd - chan->txbds];
396 complete = xfer_desc->tx_complete;
397 context = xfer_desc->context;
398 xfer_desc->tx_complete = NULL;
399 xfer_desc->context = NULL;
400
401 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
402
403 if (ctrl & QMC_BD_TX_W)
404 chan->txbd_done = chan->txbds;
405 else
406 chan->txbd_done++;
407
408 if (complete) {
409 spin_unlock_irqrestore(&chan->tx_lock, flags);
410 complete(context);
411 spin_lock_irqsave(&chan->tx_lock, flags);
412 }
413
414 bd = chan->txbd_done;
415 ctrl = qmc_read16(&bd->cbd_sc);
416 }
417
418 end:
419 spin_unlock_irqrestore(&chan->tx_lock, flags);
420 }
421
qmc_chan_read_submit(struct qmc_chan * chan,dma_addr_t addr,size_t length,void (* complete)(void * context,size_t length),void * context)422 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
423 void (*complete)(void *context, size_t length), void *context)
424 {
425 struct qmc_xfer_desc *xfer_desc;
426 unsigned long flags;
427 cbd_t __iomem *bd;
428 u16 ctrl;
429 int ret;
430
431 /*
432 * E bit UB bit
433 * 0 0 : The BD is free
434 * 1 1 : The BD is in used, waiting for transfer
435 * 0 1 : The BD is in used, waiting for completion
436 * 1 0 : Should not append
437 */
438
439 spin_lock_irqsave(&chan->rx_lock, flags);
440 bd = chan->rxbd_free;
441
442 ctrl = qmc_read16(&bd->cbd_sc);
443 if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
444 /* We are full ... */
445 ret = -EBUSY;
446 goto end;
447 }
448
449 qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
450 qmc_write32(&bd->cbd_bufaddr, addr);
451
452 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
453 xfer_desc->rx_complete = complete;
454 xfer_desc->context = context;
455
456 /* Activate the descriptor */
457 ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
458 wmb(); /* Be sure to flush data before descriptor activation */
459 qmc_write16(&bd->cbd_sc, ctrl);
460
461 /* Restart receiver if needed */
462 if (chan->is_rx_halted && !chan->is_rx_stopped) {
463 /* Restart receiver */
464 if (chan->mode == QMC_TRANSPARENT)
465 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
466 else
467 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
468 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
469 chan->is_rx_halted = false;
470 }
471 chan->rx_pending++;
472
473 if (ctrl & QMC_BD_RX_W)
474 chan->rxbd_free = chan->rxbds;
475 else
476 chan->rxbd_free++;
477
478 ret = 0;
479 end:
480 spin_unlock_irqrestore(&chan->rx_lock, flags);
481 return ret;
482 }
483 EXPORT_SYMBOL(qmc_chan_read_submit);
484
qmc_chan_read_done(struct qmc_chan * chan)485 static void qmc_chan_read_done(struct qmc_chan *chan)
486 {
487 void (*complete)(void *context, size_t size);
488 struct qmc_xfer_desc *xfer_desc;
489 unsigned long flags;
490 cbd_t __iomem *bd;
491 void *context;
492 u16 datalen;
493 u16 ctrl;
494
495 /*
496 * E bit UB bit
497 * 0 0 : The BD is free
498 * 1 1 : The BD is in used, waiting for transfer
499 * 0 1 : The BD is in used, waiting for completion
500 * 1 0 : Should not append
501 */
502
503 spin_lock_irqsave(&chan->rx_lock, flags);
504 bd = chan->rxbd_done;
505
506 ctrl = qmc_read16(&bd->cbd_sc);
507 while (!(ctrl & QMC_BD_RX_E)) {
508 if (!(ctrl & QMC_BD_RX_UB))
509 goto end;
510
511 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
512 complete = xfer_desc->rx_complete;
513 context = xfer_desc->context;
514 xfer_desc->rx_complete = NULL;
515 xfer_desc->context = NULL;
516
517 datalen = qmc_read16(&bd->cbd_datlen);
518 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
519
520 if (ctrl & QMC_BD_RX_W)
521 chan->rxbd_done = chan->rxbds;
522 else
523 chan->rxbd_done++;
524
525 chan->rx_pending--;
526
527 if (complete) {
528 spin_unlock_irqrestore(&chan->rx_lock, flags);
529 complete(context, datalen);
530 spin_lock_irqsave(&chan->rx_lock, flags);
531 }
532
533 bd = chan->rxbd_done;
534 ctrl = qmc_read16(&bd->cbd_sc);
535 }
536
537 end:
538 spin_unlock_irqrestore(&chan->rx_lock, flags);
539 }
540
qmc_chan_command(struct qmc_chan * chan,u8 qmc_opcode)541 static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
542 {
543 return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
544 }
545
qmc_chan_stop_rx(struct qmc_chan * chan)546 static int qmc_chan_stop_rx(struct qmc_chan *chan)
547 {
548 unsigned long flags;
549 int ret;
550
551 spin_lock_irqsave(&chan->rx_lock, flags);
552
553 /* Send STOP RECEIVE command */
554 ret = qmc_chan_command(chan, 0x0);
555 if (ret) {
556 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
557 chan->id, ret);
558 goto end;
559 }
560
561 chan->is_rx_stopped = true;
562
563 end:
564 spin_unlock_irqrestore(&chan->rx_lock, flags);
565 return ret;
566 }
567
qmc_chan_stop_tx(struct qmc_chan * chan)568 static int qmc_chan_stop_tx(struct qmc_chan *chan)
569 {
570 unsigned long flags;
571 int ret;
572
573 spin_lock_irqsave(&chan->tx_lock, flags);
574
575 /* Send STOP TRANSMIT command */
576 ret = qmc_chan_command(chan, 0x1);
577 if (ret) {
578 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
579 chan->id, ret);
580 goto end;
581 }
582
583 chan->is_tx_stopped = true;
584
585 end:
586 spin_unlock_irqrestore(&chan->tx_lock, flags);
587 return ret;
588 }
589
qmc_chan_stop(struct qmc_chan * chan,int direction)590 int qmc_chan_stop(struct qmc_chan *chan, int direction)
591 {
592 int ret;
593
594 if (direction & QMC_CHAN_READ) {
595 ret = qmc_chan_stop_rx(chan);
596 if (ret)
597 return ret;
598 }
599
600 if (direction & QMC_CHAN_WRITE) {
601 ret = qmc_chan_stop_tx(chan);
602 if (ret)
603 return ret;
604 }
605
606 return 0;
607 }
608 EXPORT_SYMBOL(qmc_chan_stop);
609
qmc_chan_start_rx(struct qmc_chan * chan)610 static void qmc_chan_start_rx(struct qmc_chan *chan)
611 {
612 unsigned long flags;
613
614 spin_lock_irqsave(&chan->rx_lock, flags);
615
616 /* Restart the receiver */
617 if (chan->mode == QMC_TRANSPARENT)
618 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
619 else
620 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
621 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
622 chan->is_rx_halted = false;
623
624 chan->is_rx_stopped = false;
625
626 spin_unlock_irqrestore(&chan->rx_lock, flags);
627 }
628
qmc_chan_start_tx(struct qmc_chan * chan)629 static void qmc_chan_start_tx(struct qmc_chan *chan)
630 {
631 unsigned long flags;
632
633 spin_lock_irqsave(&chan->tx_lock, flags);
634
635 /*
636 * Enable channel transmitter as it could be disabled if
637 * qmc_chan_reset() was called.
638 */
639 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
640
641 /* Set the POL bit in the channel mode register */
642 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
643
644 chan->is_tx_stopped = false;
645
646 spin_unlock_irqrestore(&chan->tx_lock, flags);
647 }
648
qmc_chan_start(struct qmc_chan * chan,int direction)649 int qmc_chan_start(struct qmc_chan *chan, int direction)
650 {
651 if (direction & QMC_CHAN_READ)
652 qmc_chan_start_rx(chan);
653
654 if (direction & QMC_CHAN_WRITE)
655 qmc_chan_start_tx(chan);
656
657 return 0;
658 }
659 EXPORT_SYMBOL(qmc_chan_start);
660
qmc_chan_reset_rx(struct qmc_chan * chan)661 static void qmc_chan_reset_rx(struct qmc_chan *chan)
662 {
663 struct qmc_xfer_desc *xfer_desc;
664 unsigned long flags;
665 cbd_t __iomem *bd;
666 u16 ctrl;
667
668 spin_lock_irqsave(&chan->rx_lock, flags);
669 bd = chan->rxbds;
670 do {
671 ctrl = qmc_read16(&bd->cbd_sc);
672 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
673
674 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
675 xfer_desc->rx_complete = NULL;
676 xfer_desc->context = NULL;
677
678 bd++;
679 } while (!(ctrl & QMC_BD_RX_W));
680
681 chan->rxbd_free = chan->rxbds;
682 chan->rxbd_done = chan->rxbds;
683 qmc_write16(chan->s_param + QMC_SPE_RBPTR,
684 qmc_read16(chan->s_param + QMC_SPE_RBASE));
685
686 chan->rx_pending = 0;
687
688 spin_unlock_irqrestore(&chan->rx_lock, flags);
689 }
690
qmc_chan_reset_tx(struct qmc_chan * chan)691 static void qmc_chan_reset_tx(struct qmc_chan *chan)
692 {
693 struct qmc_xfer_desc *xfer_desc;
694 unsigned long flags;
695 cbd_t __iomem *bd;
696 u16 ctrl;
697
698 spin_lock_irqsave(&chan->tx_lock, flags);
699
700 /* Disable transmitter. It will be re-enable on qmc_chan_start() */
701 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
702
703 bd = chan->txbds;
704 do {
705 ctrl = qmc_read16(&bd->cbd_sc);
706 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
707
708 xfer_desc = &chan->tx_desc[bd - chan->txbds];
709 xfer_desc->tx_complete = NULL;
710 xfer_desc->context = NULL;
711
712 bd++;
713 } while (!(ctrl & QMC_BD_TX_W));
714
715 chan->txbd_free = chan->txbds;
716 chan->txbd_done = chan->txbds;
717 qmc_write16(chan->s_param + QMC_SPE_TBPTR,
718 qmc_read16(chan->s_param + QMC_SPE_TBASE));
719
720 /* Reset TSTATE and ZISTATE to their initial value */
721 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
722 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
723
724 spin_unlock_irqrestore(&chan->tx_lock, flags);
725 }
726
qmc_chan_reset(struct qmc_chan * chan,int direction)727 int qmc_chan_reset(struct qmc_chan *chan, int direction)
728 {
729 if (direction & QMC_CHAN_READ)
730 qmc_chan_reset_rx(chan);
731
732 if (direction & QMC_CHAN_WRITE)
733 qmc_chan_reset_tx(chan);
734
735 return 0;
736 }
737 EXPORT_SYMBOL(qmc_chan_reset);
738
qmc_check_chans(struct qmc * qmc)739 static int qmc_check_chans(struct qmc *qmc)
740 {
741 struct tsa_serial_info info;
742 bool is_one_table = false;
743 struct qmc_chan *chan;
744 u64 tx_ts_mask = 0;
745 u64 rx_ts_mask = 0;
746 u64 tx_ts_assigned_mask;
747 u64 rx_ts_assigned_mask;
748 int ret;
749
750 /* Retrieve info from the TSA related serial */
751 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
752 if (ret)
753 return ret;
754
755 if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
756 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
757 return -EINVAL;
758 }
759
760 /*
761 * If more than 32 TS are assigned to this serial, one common table is
762 * used for Tx and Rx and so masks must be equal for all channels.
763 */
764 if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
765 if (info.nb_tx_ts != info.nb_rx_ts) {
766 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
767 return -EINVAL;
768 }
769 is_one_table = true;
770 }
771
772 tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
773 rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
774
775 list_for_each_entry(chan, &qmc->chan_head, list) {
776 if (chan->tx_ts_mask > tx_ts_assigned_mask) {
777 dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
778 return -EINVAL;
779 }
780 if (tx_ts_mask & chan->tx_ts_mask) {
781 dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
782 return -EINVAL;
783 }
784
785 if (chan->rx_ts_mask > rx_ts_assigned_mask) {
786 dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
787 return -EINVAL;
788 }
789 if (rx_ts_mask & chan->rx_ts_mask) {
790 dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
791 return -EINVAL;
792 }
793
794 if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
795 dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
796 return -EINVAL;
797 }
798
799 tx_ts_mask |= chan->tx_ts_mask;
800 rx_ts_mask |= chan->rx_ts_mask;
801 }
802
803 return 0;
804 }
805
qmc_nb_chans(struct qmc * qmc)806 static unsigned int qmc_nb_chans(struct qmc *qmc)
807 {
808 unsigned int count = 0;
809 struct qmc_chan *chan;
810
811 list_for_each_entry(chan, &qmc->chan_head, list)
812 count++;
813
814 return count;
815 }
816
qmc_of_parse_chans(struct qmc * qmc,struct device_node * np)817 static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
818 {
819 struct device_node *chan_np;
820 struct qmc_chan *chan;
821 const char *mode;
822 u32 chan_id;
823 u64 ts_mask;
824 int ret;
825
826 for_each_available_child_of_node(np, chan_np) {
827 ret = of_property_read_u32(chan_np, "reg", &chan_id);
828 if (ret) {
829 dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
830 of_node_put(chan_np);
831 return ret;
832 }
833 if (chan_id > 63) {
834 dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
835 of_node_put(chan_np);
836 return -EINVAL;
837 }
838
839 chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
840 if (!chan) {
841 of_node_put(chan_np);
842 return -ENOMEM;
843 }
844
845 chan->id = chan_id;
846 spin_lock_init(&chan->rx_lock);
847 spin_lock_init(&chan->tx_lock);
848
849 ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
850 if (ret) {
851 dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
852 chan_np);
853 of_node_put(chan_np);
854 return ret;
855 }
856 chan->tx_ts_mask = ts_mask;
857
858 ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
859 if (ret) {
860 dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
861 chan_np);
862 of_node_put(chan_np);
863 return ret;
864 }
865 chan->rx_ts_mask = ts_mask;
866
867 mode = "transparent";
868 ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
869 if (ret && ret != -EINVAL) {
870 dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
871 chan_np);
872 of_node_put(chan_np);
873 return ret;
874 }
875 if (!strcmp(mode, "transparent")) {
876 chan->mode = QMC_TRANSPARENT;
877 } else if (!strcmp(mode, "hdlc")) {
878 chan->mode = QMC_HDLC;
879 } else {
880 dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
881 chan_np, mode);
882 of_node_put(chan_np);
883 return -EINVAL;
884 }
885
886 chan->is_reverse_data = of_property_read_bool(chan_np,
887 "fsl,reverse-data");
888
889 list_add_tail(&chan->list, &qmc->chan_head);
890 qmc->chans[chan->id] = chan;
891 }
892
893 return qmc_check_chans(qmc);
894 }
895
qmc_setup_tsa_64rxtx(struct qmc * qmc,const struct tsa_serial_info * info)896 static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
897 {
898 struct qmc_chan *chan;
899 unsigned int i;
900 u16 val;
901
902 /*
903 * Use a common Tx/Rx 64 entries table.
904 * Everything was previously checked, Tx and Rx related stuffs are
905 * identical -> Used Rx related stuff to build the table
906 */
907
908 /* Invalidate all entries */
909 for (i = 0; i < 64; i++)
910 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
911
912 /* Set entries based on Rx stuff*/
913 list_for_each_entry(chan, &qmc->chan_head, list) {
914 for (i = 0; i < info->nb_rx_ts; i++) {
915 if (!(chan->rx_ts_mask & (((u64)1) << i)))
916 continue;
917
918 val = QMC_TSA_VALID | QMC_TSA_MASK |
919 QMC_TSA_CHANNEL(chan->id);
920 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
921 }
922 }
923
924 /* Set Wrap bit on last entry */
925 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
926 QMC_TSA_WRAP);
927
928 /* Init pointers to the table */
929 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
930 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
931 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
932 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
933 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
934
935 return 0;
936 }
937
qmc_setup_tsa_32rx_32tx(struct qmc * qmc,const struct tsa_serial_info * info)938 static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
939 {
940 struct qmc_chan *chan;
941 unsigned int i;
942 u16 val;
943
944 /*
945 * Use a Tx 32 entries table and a Rx 32 entries table.
946 * Everything was previously checked.
947 */
948
949 /* Invalidate all entries */
950 for (i = 0; i < 32; i++) {
951 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
952 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
953 }
954
955 /* Set entries based on Rx and Tx stuff*/
956 list_for_each_entry(chan, &qmc->chan_head, list) {
957 /* Rx part */
958 for (i = 0; i < info->nb_rx_ts; i++) {
959 if (!(chan->rx_ts_mask & (((u64)1) << i)))
960 continue;
961
962 val = QMC_TSA_VALID | QMC_TSA_MASK |
963 QMC_TSA_CHANNEL(chan->id);
964 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
965 }
966 /* Tx part */
967 for (i = 0; i < info->nb_tx_ts; i++) {
968 if (!(chan->tx_ts_mask & (((u64)1) << i)))
969 continue;
970
971 val = QMC_TSA_VALID | QMC_TSA_MASK |
972 QMC_TSA_CHANNEL(chan->id);
973 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
974 }
975 }
976
977 /* Set Wrap bit on last entries */
978 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
979 QMC_TSA_WRAP);
980 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
981 QMC_TSA_WRAP);
982
983 /* Init Rx pointers ...*/
984 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
985 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
986 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
987
988 /* ... and Tx pointers */
989 val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
990 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
991 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
992
993 return 0;
994 }
995
qmc_setup_tsa(struct qmc * qmc)996 static int qmc_setup_tsa(struct qmc *qmc)
997 {
998 struct tsa_serial_info info;
999 int ret;
1000
1001 /* Retrieve info from the TSA related serial */
1002 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1003 if (ret)
1004 return ret;
1005
1006 /*
1007 * Setup one common 64 entries table or two 32 entries (one for Tx and
1008 * one for Tx) according to assigned TS numbers.
1009 */
1010 return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1011 qmc_setup_tsa_64rxtx(qmc, &info) :
1012 qmc_setup_tsa_32rx_32tx(qmc, &info);
1013 }
1014
qmc_setup_chan_trnsync(struct qmc * qmc,struct qmc_chan * chan)1015 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
1016 {
1017 struct tsa_serial_info info;
1018 u16 first_rx, last_tx;
1019 u16 trnsync;
1020 int ret;
1021
1022 /* Retrieve info from the TSA related serial */
1023 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
1024 if (ret)
1025 return ret;
1026
1027 /* Find the first Rx TS allocated to the channel */
1028 first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
1029
1030 /* Find the last Tx TS allocated to the channel */
1031 last_tx = fls64(chan->tx_ts_mask);
1032
1033 trnsync = 0;
1034 if (info.nb_rx_ts)
1035 trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
1036 if (info.nb_tx_ts)
1037 trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
1038
1039 qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
1040
1041 dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
1042 chan->id, trnsync,
1043 first_rx, info.nb_rx_ts, chan->rx_ts_mask,
1044 last_tx, info.nb_tx_ts, chan->tx_ts_mask);
1045
1046 return 0;
1047 }
1048
qmc_setup_chan(struct qmc * qmc,struct qmc_chan * chan)1049 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1050 {
1051 unsigned int i;
1052 cbd_t __iomem *bd;
1053 int ret;
1054 u16 val;
1055
1056 chan->qmc = qmc;
1057
1058 /* Set channel specific parameter base address */
1059 chan->s_param = qmc->dpram + (chan->id * 64);
1060 /* 16 bd per channel (8 rx and 8 tx) */
1061 chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1062 chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1063
1064 chan->txbd_free = chan->txbds;
1065 chan->txbd_done = chan->txbds;
1066 chan->rxbd_free = chan->rxbds;
1067 chan->rxbd_done = chan->rxbds;
1068
1069 /* TBASE and TBPTR*/
1070 val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1071 qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1072 qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1073
1074 /* RBASE and RBPTR*/
1075 val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1076 qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1077 qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1078 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
1079 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1080 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
1081 if (chan->mode == QMC_TRANSPARENT) {
1082 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1083 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1084 val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
1085 if (chan->is_reverse_data)
1086 val |= QMC_SPE_CHAMR_TRANSP_RD;
1087 qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1088 ret = qmc_setup_chan_trnsync(qmc, chan);
1089 if (ret)
1090 return ret;
1091 } else {
1092 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1093 qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1094 qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1095 QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1096 }
1097
1098 /* Do not enable interrupts now. They will be enabled later */
1099 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1100
1101 /* Init Rx BDs and set Wrap bit on last descriptor */
1102 BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1103 val = QMC_BD_RX_I;
1104 for (i = 0; i < QMC_NB_RXBDS; i++) {
1105 bd = chan->rxbds + i;
1106 qmc_write16(&bd->cbd_sc, val);
1107 }
1108 bd = chan->rxbds + QMC_NB_RXBDS - 1;
1109 qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
1110
1111 /* Init Tx BDs and set Wrap bit on last descriptor */
1112 BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1113 val = QMC_BD_TX_I;
1114 if (chan->mode == QMC_HDLC)
1115 val |= QMC_BD_TX_L | QMC_BD_TX_TC;
1116 for (i = 0; i < QMC_NB_TXBDS; i++) {
1117 bd = chan->txbds + i;
1118 qmc_write16(&bd->cbd_sc, val);
1119 }
1120 bd = chan->txbds + QMC_NB_TXBDS - 1;
1121 qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
1122
1123 return 0;
1124 }
1125
qmc_setup_chans(struct qmc * qmc)1126 static int qmc_setup_chans(struct qmc *qmc)
1127 {
1128 struct qmc_chan *chan;
1129 int ret;
1130
1131 list_for_each_entry(chan, &qmc->chan_head, list) {
1132 ret = qmc_setup_chan(qmc, chan);
1133 if (ret)
1134 return ret;
1135 }
1136
1137 return 0;
1138 }
1139
qmc_finalize_chans(struct qmc * qmc)1140 static int qmc_finalize_chans(struct qmc *qmc)
1141 {
1142 struct qmc_chan *chan;
1143 int ret;
1144
1145 list_for_each_entry(chan, &qmc->chan_head, list) {
1146 /* Unmask channel interrupts */
1147 if (chan->mode == QMC_HDLC) {
1148 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1149 QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1150 QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1151 QMC_INT_TXB | QMC_INT_RXB);
1152 } else {
1153 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1154 QMC_INT_UN | QMC_INT_BSY |
1155 QMC_INT_TXB | QMC_INT_RXB);
1156 }
1157
1158 /* Forced stop the channel */
1159 ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1160 if (ret)
1161 return ret;
1162 }
1163
1164 return 0;
1165 }
1166
qmc_setup_ints(struct qmc * qmc)1167 static int qmc_setup_ints(struct qmc *qmc)
1168 {
1169 unsigned int i;
1170 u16 __iomem *last;
1171
1172 /* Raz all entries */
1173 for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1174 qmc_write16(qmc->int_table + i, 0x0000);
1175
1176 /* Set Wrap bit on last entry */
1177 if (qmc->int_size >= sizeof(u16)) {
1178 last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1179 qmc_write16(last, QMC_INT_W);
1180 }
1181
1182 return 0;
1183 }
1184
qmc_irq_gint(struct qmc * qmc)1185 static void qmc_irq_gint(struct qmc *qmc)
1186 {
1187 struct qmc_chan *chan;
1188 unsigned int chan_id;
1189 unsigned long flags;
1190 u16 int_entry;
1191
1192 int_entry = qmc_read16(qmc->int_curr);
1193 while (int_entry & QMC_INT_V) {
1194 /* Clear all but the Wrap bit */
1195 qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
1196
1197 chan_id = QMC_INT_GET_CHANNEL(int_entry);
1198 chan = qmc->chans[chan_id];
1199 if (!chan) {
1200 dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1201 goto int_next;
1202 }
1203
1204 if (int_entry & QMC_INT_TXB)
1205 qmc_chan_write_done(chan);
1206
1207 if (int_entry & QMC_INT_UN) {
1208 dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1209 int_entry);
1210 chan->nb_tx_underrun++;
1211 }
1212
1213 if (int_entry & QMC_INT_BSY) {
1214 dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1215 int_entry);
1216 chan->nb_rx_busy++;
1217 /* Restart the receiver if needed */
1218 spin_lock_irqsave(&chan->rx_lock, flags);
1219 if (chan->rx_pending && !chan->is_rx_stopped) {
1220 if (chan->mode == QMC_TRANSPARENT)
1221 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1222 else
1223 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1224 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1225 chan->is_rx_halted = false;
1226 } else {
1227 chan->is_rx_halted = true;
1228 }
1229 spin_unlock_irqrestore(&chan->rx_lock, flags);
1230 }
1231
1232 if (int_entry & QMC_INT_RXB)
1233 qmc_chan_read_done(chan);
1234
1235 int_next:
1236 if (int_entry & QMC_INT_W)
1237 qmc->int_curr = qmc->int_table;
1238 else
1239 qmc->int_curr++;
1240 int_entry = qmc_read16(qmc->int_curr);
1241 }
1242 }
1243
qmc_irq_handler(int irq,void * priv)1244 static irqreturn_t qmc_irq_handler(int irq, void *priv)
1245 {
1246 struct qmc *qmc = (struct qmc *)priv;
1247 u16 scce;
1248
1249 scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
1250 qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
1251
1252 if (unlikely(scce & SCC_SCCE_IQOV))
1253 dev_info(qmc->dev, "IRQ queue overflow\n");
1254
1255 if (unlikely(scce & SCC_SCCE_GUN))
1256 dev_err(qmc->dev, "Global transmitter underrun\n");
1257
1258 if (unlikely(scce & SCC_SCCE_GOV))
1259 dev_err(qmc->dev, "Global receiver overrun\n");
1260
1261 /* normal interrupt */
1262 if (likely(scce & SCC_SCCE_GINT))
1263 qmc_irq_gint(qmc);
1264
1265 return IRQ_HANDLED;
1266 }
1267
qmc_cpm1_init_resources(struct qmc * qmc,struct platform_device * pdev)1268 static int qmc_cpm1_init_resources(struct qmc *qmc, struct platform_device *pdev)
1269 {
1270 struct resource *res;
1271
1272 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
1273 if (IS_ERR(qmc->scc_regs))
1274 return PTR_ERR(qmc->scc_regs);
1275
1276 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1277 if (!res)
1278 return -EINVAL;
1279 qmc->scc_pram_offset = res->start - get_immrbase();
1280 qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
1281 if (IS_ERR(qmc->scc_pram))
1282 return PTR_ERR(qmc->scc_pram);
1283
1284 qmc->dpram = devm_platform_ioremap_resource_byname(pdev, "dpram");
1285 if (IS_ERR(qmc->dpram))
1286 return PTR_ERR(qmc->dpram);
1287
1288 return 0;
1289 }
1290
qmc_init_resources(struct qmc * qmc,struct platform_device * pdev)1291 static int qmc_init_resources(struct qmc *qmc, struct platform_device *pdev)
1292 {
1293 return qmc_cpm1_init_resources(qmc, pdev);
1294 }
1295
qmc_cpm1_init_scc(struct qmc * qmc)1296 static int qmc_cpm1_init_scc(struct qmc *qmc)
1297 {
1298 u32 val;
1299 int ret;
1300
1301 /* Connect the serial (SCC) to TSA */
1302 ret = tsa_serial_connect(qmc->tsa_serial);
1303 if (ret)
1304 return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
1305
1306 /* Init GMSR_H and GMSR_L registers */
1307 val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP;
1308 qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
1309
1310 /* enable QMC mode */
1311 qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
1312
1313 /* Disable and clear interrupts */
1314 qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1315 qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1316
1317 return 0;
1318 }
1319
qmc_init_xcc(struct qmc * qmc)1320 static int qmc_init_xcc(struct qmc *qmc)
1321 {
1322 return qmc_cpm1_init_scc(qmc);
1323 }
1324
qmc_exit_xcc(struct qmc * qmc)1325 static void qmc_exit_xcc(struct qmc *qmc)
1326 {
1327 /* Disconnect the serial from TSA */
1328 tsa_serial_disconnect(qmc->tsa_serial);
1329 }
1330
qmc_probe(struct platform_device * pdev)1331 static int qmc_probe(struct platform_device *pdev)
1332 {
1333 struct device_node *np = pdev->dev.of_node;
1334 unsigned int nb_chans;
1335 struct qmc *qmc;
1336 int irq;
1337 int ret;
1338
1339 qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
1340 if (!qmc)
1341 return -ENOMEM;
1342
1343 qmc->dev = &pdev->dev;
1344 INIT_LIST_HEAD(&qmc->chan_head);
1345
1346 qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
1347 if (IS_ERR(qmc->tsa_serial)) {
1348 return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
1349 "Failed to get TSA serial\n");
1350 }
1351
1352 ret = qmc_init_resources(qmc, pdev);
1353 if (ret)
1354 return ret;
1355
1356 /* Parse channels informationss */
1357 ret = qmc_of_parse_chans(qmc, np);
1358 if (ret)
1359 return ret;
1360
1361 nb_chans = qmc_nb_chans(qmc);
1362
1363 /*
1364 * Allocate the buffer descriptor table
1365 * 8 rx and 8 tx descriptors per channel
1366 */
1367 qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1368 qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
1369 &qmc->bd_dma_addr, GFP_KERNEL);
1370 if (!qmc->bd_table) {
1371 dev_err(qmc->dev, "Failed to allocate bd table\n");
1372 return -ENOMEM;
1373 }
1374 memset(qmc->bd_table, 0, qmc->bd_size);
1375
1376 qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
1377
1378 /* Allocate the interrupt table */
1379 qmc->int_size = QMC_NB_INTS * sizeof(u16);
1380 qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
1381 &qmc->int_dma_addr, GFP_KERNEL);
1382 if (!qmc->int_table) {
1383 dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1384 return -ENOMEM;
1385 }
1386 memset(qmc->int_table, 0, qmc->int_size);
1387
1388 qmc->int_curr = qmc->int_table;
1389 qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
1390 qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
1391
1392 /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1393 qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1394
1395 qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
1396 qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
1397
1398 qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
1399 qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
1400
1401 ret = qmc_setup_tsa(qmc);
1402 if (ret)
1403 return ret;
1404
1405 qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
1406
1407 ret = qmc_setup_chans(qmc);
1408 if (ret)
1409 return ret;
1410
1411 /* Init interrupts table */
1412 ret = qmc_setup_ints(qmc);
1413 if (ret)
1414 return ret;
1415
1416 /* Init SCC */
1417 ret = qmc_init_xcc(qmc);
1418 if (ret)
1419 return ret;
1420
1421 /* Set the irq handler */
1422 irq = platform_get_irq(pdev, 0);
1423 if (irq < 0) {
1424 ret = irq;
1425 goto err_exit_xcc;
1426 }
1427 ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
1428 if (ret < 0)
1429 goto err_exit_xcc;
1430
1431 /* Enable interrupts */
1432 qmc_write16(qmc->scc_regs + SCC_SCCM,
1433 SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
1434
1435 ret = qmc_finalize_chans(qmc);
1436 if (ret < 0)
1437 goto err_disable_intr;
1438
1439 /* Enable transmiter and receiver */
1440 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
1441
1442 platform_set_drvdata(pdev, qmc);
1443
1444 return 0;
1445
1446 err_disable_intr:
1447 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1448
1449 err_exit_xcc:
1450 qmc_exit_xcc(qmc);
1451 return ret;
1452 }
1453
qmc_remove(struct platform_device * pdev)1454 static void qmc_remove(struct platform_device *pdev)
1455 {
1456 struct qmc *qmc = platform_get_drvdata(pdev);
1457
1458 /* Disable transmiter and receiver */
1459 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
1460
1461 /* Disable interrupts */
1462 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1463
1464 /* Exit SCC */
1465 qmc_exit_xcc(qmc);
1466 }
1467
1468 static const struct of_device_id qmc_id_table[] = {
1469 { .compatible = "fsl,cpm1-scc-qmc" },
1470 {} /* sentinel */
1471 };
1472 MODULE_DEVICE_TABLE(of, qmc_id_table);
1473
1474 static struct platform_driver qmc_driver = {
1475 .driver = {
1476 .name = "fsl-qmc",
1477 .of_match_table = of_match_ptr(qmc_id_table),
1478 },
1479 .probe = qmc_probe,
1480 .remove_new = qmc_remove,
1481 };
1482 module_platform_driver(qmc_driver);
1483
qmc_chan_get_byphandle(struct device_node * np,const char * phandle_name)1484 struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
1485 {
1486 struct of_phandle_args out_args;
1487 struct platform_device *pdev;
1488 struct qmc_chan *qmc_chan;
1489 struct qmc *qmc;
1490 int ret;
1491
1492 ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
1493 &out_args);
1494 if (ret < 0)
1495 return ERR_PTR(ret);
1496
1497 if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
1498 of_node_put(out_args.np);
1499 return ERR_PTR(-EINVAL);
1500 }
1501
1502 pdev = of_find_device_by_node(out_args.np);
1503 of_node_put(out_args.np);
1504 if (!pdev)
1505 return ERR_PTR(-ENODEV);
1506
1507 qmc = platform_get_drvdata(pdev);
1508 if (!qmc) {
1509 platform_device_put(pdev);
1510 return ERR_PTR(-EPROBE_DEFER);
1511 }
1512
1513 if (out_args.args_count != 1) {
1514 platform_device_put(pdev);
1515 return ERR_PTR(-EINVAL);
1516 }
1517
1518 if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
1519 platform_device_put(pdev);
1520 return ERR_PTR(-EINVAL);
1521 }
1522
1523 qmc_chan = qmc->chans[out_args.args[0]];
1524 if (!qmc_chan) {
1525 platform_device_put(pdev);
1526 return ERR_PTR(-ENOENT);
1527 }
1528
1529 return qmc_chan;
1530 }
1531 EXPORT_SYMBOL(qmc_chan_get_byphandle);
1532
qmc_chan_put(struct qmc_chan * chan)1533 void qmc_chan_put(struct qmc_chan *chan)
1534 {
1535 put_device(chan->qmc->dev);
1536 }
1537 EXPORT_SYMBOL(qmc_chan_put);
1538
devm_qmc_chan_release(struct device * dev,void * res)1539 static void devm_qmc_chan_release(struct device *dev, void *res)
1540 {
1541 struct qmc_chan **qmc_chan = res;
1542
1543 qmc_chan_put(*qmc_chan);
1544 }
1545
devm_qmc_chan_get_byphandle(struct device * dev,struct device_node * np,const char * phandle_name)1546 struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
1547 struct device_node *np,
1548 const char *phandle_name)
1549 {
1550 struct qmc_chan *qmc_chan;
1551 struct qmc_chan **dr;
1552
1553 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1554 if (!dr)
1555 return ERR_PTR(-ENOMEM);
1556
1557 qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
1558 if (!IS_ERR(qmc_chan)) {
1559 *dr = qmc_chan;
1560 devres_add(dev, dr);
1561 } else {
1562 devres_free(dr);
1563 }
1564
1565 return qmc_chan;
1566 }
1567 EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
1568
1569 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
1570 MODULE_DESCRIPTION("CPM QMC driver");
1571 MODULE_LICENSE("GPL");
1572