Lines Matching +full:bd +full:- +full:address
1 // SPDX-License-Identifier: GPL-2.0-only
10 #include <linux/dma-mapping.h>
87 #define PESQI_BDDONE BIT(9) /* BD processing complete */
94 #define PESQI_BDP_START BIT(2) /* start BD processor */
106 #define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
117 #define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
118 #define BD_EN BIT(31) /* BD owned by H/W */
121 * struct ring_desc - Representation of SQI ring descriptor
123 * @bd: PESQI controller buffer descriptor
124 * @bd_dma: DMA address of PESQI controller buffer descriptor
129 struct buf_desc *bd; member
146 void *bd; member
170 div = clk_get_rate(sqi->base_clk) / (2 * sck); in pic32_sqi_set_clk_rate()
173 val = readl(sqi->regs + PESQI_CLK_CTRL_REG); in pic32_sqi_set_clk_rate()
177 writel(val, sqi->regs + PESQI_CLK_CTRL_REG); in pic32_sqi_set_clk_rate()
180 return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val, in pic32_sqi_set_clk_rate()
188 writel(mask, sqi->regs + PESQI_INT_ENABLE_REG); in pic32_sqi_enable_int()
189 /* INT_SIGEN works as interrupt-gate to INTR line */ in pic32_sqi_enable_int()
190 writel(mask, sqi->regs + PESQI_INT_SIGEN_REG); in pic32_sqi_enable_int()
195 writel(0, sqi->regs + PESQI_INT_ENABLE_REG); in pic32_sqi_disable_int()
196 writel(0, sqi->regs + PESQI_INT_SIGEN_REG); in pic32_sqi_disable_int()
204 enable = readl(sqi->regs + PESQI_INT_ENABLE_REG); in pic32_sqi_isr()
205 status = readl(sqi->regs + PESQI_INT_STAT_REG); in pic32_sqi_isr()
230 complete(&sqi->xfer_done); in pic32_sqi_isr()
235 writel(enable, sqi->regs + PESQI_INT_ENABLE_REG); in pic32_sqi_isr()
244 if (list_empty(&sqi->bd_list_free)) in ring_desc_get()
247 rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list); in ring_desc_get()
248 list_move_tail(&rdesc->list, &sqi->bd_list_used); in ring_desc_get()
254 list_move(&rdesc->list, &sqi->bd_list_free); in ring_desc_put()
261 struct spi_device *spi = mesg->spi; in pic32_sqi_one_transfer()
264 struct buf_desc *bd; in pic32_sqi_one_transfer() local
272 /* half-duplex: select transfer buffer, direction and lane */ in pic32_sqi_one_transfer()
273 if (xfer->rx_buf) { in pic32_sqi_one_transfer()
275 nbits = xfer->rx_nbits; in pic32_sqi_one_transfer()
276 sgl = xfer->rx_sg.sgl; in pic32_sqi_one_transfer()
277 nents = xfer->rx_sg.nents; in pic32_sqi_one_transfer()
279 nbits = xfer->tx_nbits; in pic32_sqi_one_transfer()
280 sgl = xfer->tx_sg.sgl; in pic32_sqi_one_transfer()
281 nents = xfer->tx_sg.nents; in pic32_sqi_one_transfer()
290 if (spi->mode & SPI_LSB_FIRST) in pic32_sqi_one_transfer()
302 bd = rdesc->bd; in pic32_sqi_one_transfer()
304 /* BD CTRL: length */ in pic32_sqi_one_transfer()
305 rdesc->xfer_len = sg_dma_len(sg); in pic32_sqi_one_transfer()
306 bd->bd_ctrl = bd_ctrl; in pic32_sqi_one_transfer()
307 bd->bd_ctrl |= rdesc->xfer_len; in pic32_sqi_one_transfer()
309 /* BD STAT */ in pic32_sqi_one_transfer()
310 bd->bd_status = 0; in pic32_sqi_one_transfer()
312 /* BD BUFFER ADDRESS */ in pic32_sqi_one_transfer()
313 bd->bd_addr = sg->dma_address; in pic32_sqi_one_transfer()
324 pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN); in pic32_sqi_prepare_hardware()
326 pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN); in pic32_sqi_prepare_hardware()
342 struct spi_device *spi = msg->spi; in pic32_sqi_one_message()
352 reinit_completion(&sqi->xfer_done); in pic32_sqi_one_message()
353 msg->actual_length = 0; in pic32_sqi_one_message()
357 * can be handled at best during spi chip-select switch. in pic32_sqi_one_message()
359 if (sqi->cur_spi != spi) { in pic32_sqi_one_message()
361 if (sqi->cur_speed != spi->max_speed_hz) { in pic32_sqi_one_message()
362 sqi->cur_speed = spi->max_speed_hz; in pic32_sqi_one_message()
363 ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz); in pic32_sqi_one_message()
365 dev_warn(&spi->dev, "set_clk, %d\n", ret); in pic32_sqi_one_message()
369 mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST); in pic32_sqi_one_message()
370 if (sqi->cur_mode != mode) { in pic32_sqi_one_message()
371 val = readl(sqi->regs + PESQI_CONF_REG); in pic32_sqi_one_message()
378 writel(val, sqi->regs + PESQI_CONF_REG); in pic32_sqi_one_message()
380 sqi->cur_mode = mode; in pic32_sqi_one_message()
382 sqi->cur_spi = spi; in pic32_sqi_one_message()
385 /* prepare hardware desc-list(BD) for transfer(s) */ in pic32_sqi_one_message()
386 list_for_each_entry(xfer, &msg->transfers, transfer_list) { in pic32_sqi_one_message()
389 dev_err(&spi->dev, "xfer %p err\n", xfer); in pic32_sqi_one_message()
397 rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list); in pic32_sqi_one_message()
398 rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT | in pic32_sqi_one_message()
401 /* set base address BD list for DMA engine */ in pic32_sqi_one_message()
402 rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list); in pic32_sqi_one_message()
403 writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG); in pic32_sqi_one_message()
410 writel(val, sqi->regs + PESQI_BD_CTRL_REG); in pic32_sqi_one_message()
413 timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ); in pic32_sqi_one_message()
415 dev_err(&sqi->host->dev, "wait timedout/interrupted\n"); in pic32_sqi_one_message()
416 ret = -ETIMEDOUT; in pic32_sqi_one_message()
417 msg->status = ret; in pic32_sqi_one_message()
420 msg->status = 0; in pic32_sqi_one_message()
425 writel(0, sqi->regs + PESQI_BD_CTRL_REG); in pic32_sqi_one_message()
431 &sqi->bd_list_used, list) { in pic32_sqi_one_message()
433 msg->actual_length += rdesc->xfer_len; in pic32_sqi_one_message()
437 spi_finalize_current_message(spi->controller); in pic32_sqi_one_message()
447 pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN); in pic32_sqi_unprepare_hardware()
449 pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN); in pic32_sqi_unprepare_hardware()
457 struct buf_desc *bd; in ring_desc_ring_alloc() local
461 sqi->bd = dma_alloc_coherent(&sqi->host->dev, in ring_desc_ring_alloc()
462 sizeof(*bd) * PESQI_BD_COUNT, in ring_desc_ring_alloc()
463 &sqi->bd_dma, GFP_KERNEL); in ring_desc_ring_alloc()
464 if (!sqi->bd) { in ring_desc_ring_alloc()
465 dev_err(&sqi->host->dev, "failed allocating dma buffer\n"); in ring_desc_ring_alloc()
466 return -ENOMEM; in ring_desc_ring_alloc()
470 sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL); in ring_desc_ring_alloc()
471 if (!sqi->ring) { in ring_desc_ring_alloc()
472 dma_free_coherent(&sqi->host->dev, in ring_desc_ring_alloc()
473 sizeof(*bd) * PESQI_BD_COUNT, in ring_desc_ring_alloc()
474 sqi->bd, sqi->bd_dma); in ring_desc_ring_alloc()
475 return -ENOMEM; in ring_desc_ring_alloc()
478 bd = (struct buf_desc *)sqi->bd; in ring_desc_ring_alloc()
480 INIT_LIST_HEAD(&sqi->bd_list_free); in ring_desc_ring_alloc()
481 INIT_LIST_HEAD(&sqi->bd_list_used); in ring_desc_ring_alloc()
483 /* initialize ring-desc */ in ring_desc_ring_alloc()
484 for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) { in ring_desc_ring_alloc()
485 INIT_LIST_HEAD(&rdesc->list); in ring_desc_ring_alloc()
486 rdesc->bd = &bd[i]; in ring_desc_ring_alloc()
487 rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd; in ring_desc_ring_alloc()
488 list_add_tail(&rdesc->list, &sqi->bd_list_free); in ring_desc_ring_alloc()
491 /* Prepare BD: chain to next BD(s) */ in ring_desc_ring_alloc()
492 for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++) in ring_desc_ring_alloc()
493 bd[i].bd_nextp = rdesc[i + 1].bd_dma; in ring_desc_ring_alloc()
494 bd[PESQI_BD_COUNT - 1].bd_nextp = 0; in ring_desc_ring_alloc()
501 dma_free_coherent(&sqi->host->dev, in ring_desc_ring_free()
503 sqi->bd, sqi->bd_dma); in ring_desc_ring_free()
504 kfree(sqi->ring); in ring_desc_ring_free()
512 /* Soft-reset of PESQI controller triggers interrupt. in pic32_sqi_hw_init()
518 /* assert soft-reset */ in pic32_sqi_hw_init()
519 writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG); in pic32_sqi_hw_init()
522 readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val, in pic32_sqi_hw_init()
532 val = readl(sqi->regs + PESQI_CMD_THRES_REG); in pic32_sqi_hw_init()
536 writel(val, sqi->regs + PESQI_CMD_THRES_REG); in pic32_sqi_hw_init()
538 val = readl(sqi->regs + PESQI_INT_THRES_REG); in pic32_sqi_hw_init()
542 writel(val, sqi->regs + PESQI_INT_THRES_REG); in pic32_sqi_hw_init()
545 val = readl(sqi->regs + PESQI_CONF_REG); in pic32_sqi_hw_init()
550 writel(val, sqi->regs + PESQI_CONF_REG); in pic32_sqi_hw_init()
552 /* DATAEN - SQIID0-ID3 */ in pic32_sqi_hw_init()
558 /* CSEN - all CS */ in pic32_sqi_hw_init()
560 writel(val, sqi->regs + PESQI_CONF_REG); in pic32_sqi_hw_init()
563 writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG); in pic32_sqi_hw_init()
565 sqi->cur_speed = 0; in pic32_sqi_hw_init()
566 sqi->cur_mode = -1; in pic32_sqi_hw_init()
575 host = spi_alloc_host(&pdev->dev, sizeof(*sqi)); in pic32_sqi_probe()
577 return -ENOMEM; in pic32_sqi_probe()
580 sqi->host = host; in pic32_sqi_probe()
582 sqi->regs = devm_platform_ioremap_resource(pdev, 0); in pic32_sqi_probe()
583 if (IS_ERR(sqi->regs)) { in pic32_sqi_probe()
584 ret = PTR_ERR(sqi->regs); in pic32_sqi_probe()
589 sqi->irq = platform_get_irq(pdev, 0); in pic32_sqi_probe()
590 if (sqi->irq < 0) { in pic32_sqi_probe()
591 ret = sqi->irq; in pic32_sqi_probe()
596 sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck"); in pic32_sqi_probe()
597 if (IS_ERR(sqi->sys_clk)) { in pic32_sqi_probe()
598 ret = PTR_ERR(sqi->sys_clk); in pic32_sqi_probe()
599 dev_err(&pdev->dev, "no sys_clk ?\n"); in pic32_sqi_probe()
603 sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck"); in pic32_sqi_probe()
604 if (IS_ERR(sqi->base_clk)) { in pic32_sqi_probe()
605 ret = PTR_ERR(sqi->base_clk); in pic32_sqi_probe()
606 dev_err(&pdev->dev, "no base clk ?\n"); in pic32_sqi_probe()
610 ret = clk_prepare_enable(sqi->sys_clk); in pic32_sqi_probe()
612 dev_err(&pdev->dev, "sys clk enable failed\n"); in pic32_sqi_probe()
616 ret = clk_prepare_enable(sqi->base_clk); in pic32_sqi_probe()
618 dev_err(&pdev->dev, "base clk enable failed\n"); in pic32_sqi_probe()
619 clk_disable_unprepare(sqi->sys_clk); in pic32_sqi_probe()
623 init_completion(&sqi->xfer_done); in pic32_sqi_probe()
631 dev_err(&pdev->dev, "ring alloc failed\n"); in pic32_sqi_probe()
636 ret = request_irq(sqi->irq, pic32_sqi_isr, 0, in pic32_sqi_probe()
637 dev_name(&pdev->dev), sqi); in pic32_sqi_probe()
639 dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq); in pic32_sqi_probe()
644 host->num_chipselect = 2; in pic32_sqi_probe()
645 host->max_speed_hz = clk_get_rate(sqi->base_clk); in pic32_sqi_probe()
646 host->dma_alignment = 32; in pic32_sqi_probe()
647 host->max_dma_len = PESQI_BD_BUF_LEN_MAX; in pic32_sqi_probe()
648 host->dev.of_node = pdev->dev.of_node; in pic32_sqi_probe()
649 host->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL | in pic32_sqi_probe()
651 host->flags = SPI_CONTROLLER_HALF_DUPLEX; in pic32_sqi_probe()
652 host->can_dma = pic32_sqi_can_dma; in pic32_sqi_probe()
653 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); in pic32_sqi_probe()
654 host->transfer_one_message = pic32_sqi_one_message; in pic32_sqi_probe()
655 host->prepare_transfer_hardware = pic32_sqi_prepare_hardware; in pic32_sqi_probe()
656 host->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware; in pic32_sqi_probe()
658 ret = devm_spi_register_controller(&pdev->dev, host); in pic32_sqi_probe()
660 dev_err(&host->dev, "failed registering spi host\n"); in pic32_sqi_probe()
661 free_irq(sqi->irq, sqi); in pic32_sqi_probe()
673 clk_disable_unprepare(sqi->base_clk); in pic32_sqi_probe()
674 clk_disable_unprepare(sqi->sys_clk); in pic32_sqi_probe()
686 free_irq(sqi->irq, sqi); in pic32_sqi_remove()
690 clk_disable_unprepare(sqi->base_clk); in pic32_sqi_remove()
691 clk_disable_unprepare(sqi->sys_clk); in pic32_sqi_remove()
695 {.compatible = "microchip,pic32mzda-sqi",},
702 .name = "sqi-pic32",