104000dc6SGirish Mahadevan // SPDX-License-Identifier: GPL-2.0
204000dc6SGirish Mahadevan // Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
304000dc6SGirish Mahadevan
404000dc6SGirish Mahadevan #include <linux/clk.h>
5b5762d95SVijaya Krishna Nivarthi #include <linux/dmapool.h>
6b5762d95SVijaya Krishna Nivarthi #include <linux/dma-mapping.h>
7cff80645SAkash Asthana #include <linux/interconnect.h>
804000dc6SGirish Mahadevan #include <linux/interrupt.h>
904000dc6SGirish Mahadevan #include <linux/io.h>
1004000dc6SGirish Mahadevan #include <linux/module.h>
1104000dc6SGirish Mahadevan #include <linux/of.h>
12749396cbSRob Herring #include <linux/platform_device.h>
130098c527SDouglas Anderson #include <linux/pinctrl/consumer.h>
1404000dc6SGirish Mahadevan #include <linux/pm_runtime.h>
15f79a158dSRajendra Nayak #include <linux/pm_opp.h>
1604000dc6SGirish Mahadevan #include <linux/spi/spi.h>
1704000dc6SGirish Mahadevan #include <linux/spi/spi-mem.h>
1804000dc6SGirish Mahadevan
1904000dc6SGirish Mahadevan
2004000dc6SGirish Mahadevan #define QSPI_NUM_CS 2
2104000dc6SGirish Mahadevan #define QSPI_BYTES_PER_WORD 4
2204000dc6SGirish Mahadevan
2304000dc6SGirish Mahadevan #define MSTR_CONFIG 0x0000
2404000dc6SGirish Mahadevan #define FULL_CYCLE_MODE BIT(3)
2504000dc6SGirish Mahadevan #define FB_CLK_EN BIT(4)
2604000dc6SGirish Mahadevan #define PIN_HOLDN BIT(6)
2704000dc6SGirish Mahadevan #define PIN_WPN BIT(7)
2804000dc6SGirish Mahadevan #define DMA_ENABLE BIT(8)
2904000dc6SGirish Mahadevan #define BIG_ENDIAN_MODE BIT(9)
3004000dc6SGirish Mahadevan #define SPI_MODE_MSK 0xc00
3104000dc6SGirish Mahadevan #define SPI_MODE_SHFT 10
3204000dc6SGirish Mahadevan #define CHIP_SELECT_NUM BIT(12)
3304000dc6SGirish Mahadevan #define SBL_EN BIT(13)
3404000dc6SGirish Mahadevan #define LPA_BASE_MSK 0x3c000
3504000dc6SGirish Mahadevan #define LPA_BASE_SHFT 14
3604000dc6SGirish Mahadevan #define TX_DATA_DELAY_MSK 0xc0000
3704000dc6SGirish Mahadevan #define TX_DATA_DELAY_SHFT 18
3804000dc6SGirish Mahadevan #define TX_CLK_DELAY_MSK 0x300000
3904000dc6SGirish Mahadevan #define TX_CLK_DELAY_SHFT 20
4004000dc6SGirish Mahadevan #define TX_CS_N_DELAY_MSK 0xc00000
4104000dc6SGirish Mahadevan #define TX_CS_N_DELAY_SHFT 22
4204000dc6SGirish Mahadevan #define TX_DATA_OE_DELAY_MSK 0x3000000
4304000dc6SGirish Mahadevan #define TX_DATA_OE_DELAY_SHFT 24
4404000dc6SGirish Mahadevan
4504000dc6SGirish Mahadevan #define AHB_MASTER_CFG 0x0004
4604000dc6SGirish Mahadevan #define HMEM_TYPE_START_MID_TRANS_MSK 0x7
4704000dc6SGirish Mahadevan #define HMEM_TYPE_START_MID_TRANS_SHFT 0
4804000dc6SGirish Mahadevan #define HMEM_TYPE_LAST_TRANS_MSK 0x38
4904000dc6SGirish Mahadevan #define HMEM_TYPE_LAST_TRANS_SHFT 3
5004000dc6SGirish Mahadevan #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0
5104000dc6SGirish Mahadevan #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6
5204000dc6SGirish Mahadevan #define HMEMTYPE_READ_TRANS_MSK 0x700
5304000dc6SGirish Mahadevan #define HMEMTYPE_READ_TRANS_SHFT 8
5404000dc6SGirish Mahadevan #define HSHARED BIT(11)
5504000dc6SGirish Mahadevan #define HINNERSHARED BIT(12)
5604000dc6SGirish Mahadevan
5704000dc6SGirish Mahadevan #define MSTR_INT_EN 0x000C
5804000dc6SGirish Mahadevan #define MSTR_INT_STATUS 0x0010
5904000dc6SGirish Mahadevan #define RESP_FIFO_UNDERRUN BIT(0)
6004000dc6SGirish Mahadevan #define RESP_FIFO_NOT_EMPTY BIT(1)
6104000dc6SGirish Mahadevan #define RESP_FIFO_RDY BIT(2)
6204000dc6SGirish Mahadevan #define HRESP_FROM_NOC_ERR BIT(3)
6304000dc6SGirish Mahadevan #define WR_FIFO_EMPTY BIT(9)
6404000dc6SGirish Mahadevan #define WR_FIFO_FULL BIT(10)
6504000dc6SGirish Mahadevan #define WR_FIFO_OVERRUN BIT(11)
6604000dc6SGirish Mahadevan #define TRANSACTION_DONE BIT(16)
67b5762d95SVijaya Krishna Nivarthi #define DMA_CHAIN_DONE BIT(31)
6804000dc6SGirish Mahadevan #define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
6904000dc6SGirish Mahadevan WR_FIFO_OVERRUN)
7004000dc6SGirish Mahadevan #define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
7104000dc6SGirish Mahadevan WR_FIFO_EMPTY | WR_FIFO_FULL | \
72916a4edfSVijaya Krishna Nivarthi TRANSACTION_DONE | DMA_CHAIN_DONE)
7304000dc6SGirish Mahadevan
7404000dc6SGirish Mahadevan #define PIO_XFER_CTRL 0x0014
7504000dc6SGirish Mahadevan #define REQUEST_COUNT_MSK 0xffff
7604000dc6SGirish Mahadevan
7704000dc6SGirish Mahadevan #define PIO_XFER_CFG 0x0018
7804000dc6SGirish Mahadevan #define TRANSFER_DIRECTION BIT(0)
7904000dc6SGirish Mahadevan #define MULTI_IO_MODE_MSK 0xe
8004000dc6SGirish Mahadevan #define MULTI_IO_MODE_SHFT 1
8104000dc6SGirish Mahadevan #define TRANSFER_FRAGMENT BIT(8)
8204000dc6SGirish Mahadevan #define SDR_1BIT 1
8304000dc6SGirish Mahadevan #define SDR_2BIT 2
8404000dc6SGirish Mahadevan #define SDR_4BIT 3
8504000dc6SGirish Mahadevan #define DDR_1BIT 5
8604000dc6SGirish Mahadevan #define DDR_2BIT 6
8704000dc6SGirish Mahadevan #define DDR_4BIT 7
8804000dc6SGirish Mahadevan #define DMA_DESC_SINGLE_SPI 1
8904000dc6SGirish Mahadevan #define DMA_DESC_DUAL_SPI 2
9004000dc6SGirish Mahadevan #define DMA_DESC_QUAD_SPI 3
9104000dc6SGirish Mahadevan
9204000dc6SGirish Mahadevan #define PIO_XFER_STATUS 0x001c
9304000dc6SGirish Mahadevan #define WR_FIFO_BYTES_MSK 0xffff0000
9404000dc6SGirish Mahadevan #define WR_FIFO_BYTES_SHFT 16
9504000dc6SGirish Mahadevan
9604000dc6SGirish Mahadevan #define PIO_DATAOUT_1B 0x0020
9704000dc6SGirish Mahadevan #define PIO_DATAOUT_4B 0x0024
9804000dc6SGirish Mahadevan
99478652f3SRyan Case #define RD_FIFO_CFG 0x0028
100478652f3SRyan Case #define CONTINUOUS_MODE BIT(0)
101478652f3SRyan Case
10204000dc6SGirish Mahadevan #define RD_FIFO_STATUS 0x002c
10304000dc6SGirish Mahadevan #define FIFO_EMPTY BIT(11)
10404000dc6SGirish Mahadevan #define WR_CNTS_MSK 0x7f0
10504000dc6SGirish Mahadevan #define WR_CNTS_SHFT 4
10604000dc6SGirish Mahadevan #define RDY_64BYTE BIT(3)
10704000dc6SGirish Mahadevan #define RDY_32BYTE BIT(2)
10804000dc6SGirish Mahadevan #define RDY_16BYTE BIT(1)
10904000dc6SGirish Mahadevan #define FIFO_RDY BIT(0)
11004000dc6SGirish Mahadevan
11104000dc6SGirish Mahadevan #define RD_FIFO_RESET 0x0030
11204000dc6SGirish Mahadevan #define RESET_FIFO BIT(0)
11304000dc6SGirish Mahadevan
114b5762d95SVijaya Krishna Nivarthi #define NEXT_DMA_DESC_ADDR 0x0040
115b5762d95SVijaya Krishna Nivarthi #define CURRENT_DMA_DESC_ADDR 0x0044
116b5762d95SVijaya Krishna Nivarthi #define CURRENT_MEM_ADDR 0x0048
117b5762d95SVijaya Krishna Nivarthi
11804000dc6SGirish Mahadevan #define CUR_MEM_ADDR 0x0048
11904000dc6SGirish Mahadevan #define HW_VERSION 0x004c
12004000dc6SGirish Mahadevan #define RD_FIFO 0x0050
12104000dc6SGirish Mahadevan #define SAMPLING_CLK_CFG 0x0090
12204000dc6SGirish Mahadevan #define SAMPLING_CLK_STATUS 0x0094
12304000dc6SGirish Mahadevan
124b5762d95SVijaya Krishna Nivarthi #define QSPI_ALIGN_REQ 32
12504000dc6SGirish Mahadevan
12604000dc6SGirish Mahadevan enum qspi_dir {
12704000dc6SGirish Mahadevan QSPI_READ,
12804000dc6SGirish Mahadevan QSPI_WRITE,
12904000dc6SGirish Mahadevan };
13004000dc6SGirish Mahadevan
131b5762d95SVijaya Krishna Nivarthi struct qspi_cmd_desc {
132b5762d95SVijaya Krishna Nivarthi u32 data_address;
133b5762d95SVijaya Krishna Nivarthi u32 next_descriptor;
134b5762d95SVijaya Krishna Nivarthi u32 direction:1;
135b5762d95SVijaya Krishna Nivarthi u32 multi_io_mode:3;
136b5762d95SVijaya Krishna Nivarthi u32 reserved1:4;
137b5762d95SVijaya Krishna Nivarthi u32 fragment:1;
138b5762d95SVijaya Krishna Nivarthi u32 reserved2:7;
139b5762d95SVijaya Krishna Nivarthi u32 length:16;
140b5762d95SVijaya Krishna Nivarthi };
141b5762d95SVijaya Krishna Nivarthi
14204000dc6SGirish Mahadevan struct qspi_xfer {
14304000dc6SGirish Mahadevan union {
14404000dc6SGirish Mahadevan const void *tx_buf;
14504000dc6SGirish Mahadevan void *rx_buf;
14604000dc6SGirish Mahadevan };
14704000dc6SGirish Mahadevan unsigned int rem_bytes;
14804000dc6SGirish Mahadevan unsigned int buswidth;
14904000dc6SGirish Mahadevan enum qspi_dir dir;
15004000dc6SGirish Mahadevan bool is_last;
15104000dc6SGirish Mahadevan };
15204000dc6SGirish Mahadevan
15304000dc6SGirish Mahadevan enum qspi_clocks {
15404000dc6SGirish Mahadevan QSPI_CLK_CORE,
15504000dc6SGirish Mahadevan QSPI_CLK_IFACE,
15604000dc6SGirish Mahadevan QSPI_NUM_CLKS
15704000dc6SGirish Mahadevan };
15804000dc6SGirish Mahadevan
159b5762d95SVijaya Krishna Nivarthi /*
160b5762d95SVijaya Krishna Nivarthi * Number of entries in sgt returned from spi framework that-
161b5762d95SVijaya Krishna Nivarthi * will be supported. Can be modified as required.
162b5762d95SVijaya Krishna Nivarthi * In practice, given max_dma_len is 64KB, the number of
163b5762d95SVijaya Krishna Nivarthi * entries is not expected to exceed 1.
164b5762d95SVijaya Krishna Nivarthi */
165b5762d95SVijaya Krishna Nivarthi #define QSPI_MAX_SG 5
166b5762d95SVijaya Krishna Nivarthi
16704000dc6SGirish Mahadevan struct qcom_qspi {
16804000dc6SGirish Mahadevan void __iomem *base;
16904000dc6SGirish Mahadevan struct device *dev;
170b8d40d77SMatthias Kaehlcke struct clk_bulk_data *clks;
17104000dc6SGirish Mahadevan struct qspi_xfer xfer;
172b5762d95SVijaya Krishna Nivarthi struct dma_pool *dma_cmd_pool;
173b5762d95SVijaya Krishna Nivarthi dma_addr_t dma_cmd_desc[QSPI_MAX_SG];
174b5762d95SVijaya Krishna Nivarthi void *virt_cmd_desc[QSPI_MAX_SG];
175b5762d95SVijaya Krishna Nivarthi unsigned int n_cmd_desc;
176cff80645SAkash Asthana struct icc_path *icc_path_cpu_to_qspi;
17721243314SDouglas Anderson unsigned long last_speed;
178cff80645SAkash Asthana /* Lock to protect data accessed by IRQs */
17904000dc6SGirish Mahadevan spinlock_t lock;
18004000dc6SGirish Mahadevan };
18104000dc6SGirish Mahadevan
qspi_buswidth_to_iomode(struct qcom_qspi * ctrl,unsigned int buswidth)18204000dc6SGirish Mahadevan static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
18304000dc6SGirish Mahadevan unsigned int buswidth)
18404000dc6SGirish Mahadevan {
18504000dc6SGirish Mahadevan switch (buswidth) {
18604000dc6SGirish Mahadevan case 1:
187b5762d95SVijaya Krishna Nivarthi return SDR_1BIT;
18804000dc6SGirish Mahadevan case 2:
189b5762d95SVijaya Krishna Nivarthi return SDR_2BIT;
19004000dc6SGirish Mahadevan case 4:
191b5762d95SVijaya Krishna Nivarthi return SDR_4BIT;
19204000dc6SGirish Mahadevan default:
19304000dc6SGirish Mahadevan dev_warn_once(ctrl->dev,
19404000dc6SGirish Mahadevan "Unexpected bus width: %u\n", buswidth);
195b5762d95SVijaya Krishna Nivarthi return SDR_1BIT;
19604000dc6SGirish Mahadevan }
19704000dc6SGirish Mahadevan }
19804000dc6SGirish Mahadevan
qcom_qspi_pio_xfer_cfg(struct qcom_qspi * ctrl)19904000dc6SGirish Mahadevan static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
20004000dc6SGirish Mahadevan {
20104000dc6SGirish Mahadevan u32 pio_xfer_cfg;
202b5762d95SVijaya Krishna Nivarthi u32 iomode;
20304000dc6SGirish Mahadevan const struct qspi_xfer *xfer;
20404000dc6SGirish Mahadevan
20504000dc6SGirish Mahadevan xfer = &ctrl->xfer;
20604000dc6SGirish Mahadevan pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG);
20704000dc6SGirish Mahadevan pio_xfer_cfg &= ~TRANSFER_DIRECTION;
20804000dc6SGirish Mahadevan pio_xfer_cfg |= xfer->dir;
20904000dc6SGirish Mahadevan if (xfer->is_last)
21004000dc6SGirish Mahadevan pio_xfer_cfg &= ~TRANSFER_FRAGMENT;
21104000dc6SGirish Mahadevan else
21204000dc6SGirish Mahadevan pio_xfer_cfg |= TRANSFER_FRAGMENT;
21304000dc6SGirish Mahadevan pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
214b5762d95SVijaya Krishna Nivarthi iomode = qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
215b5762d95SVijaya Krishna Nivarthi pio_xfer_cfg |= iomode << MULTI_IO_MODE_SHFT;
21604000dc6SGirish Mahadevan
21704000dc6SGirish Mahadevan writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
21804000dc6SGirish Mahadevan }
21904000dc6SGirish Mahadevan
qcom_qspi_pio_xfer_ctrl(struct qcom_qspi * ctrl)22004000dc6SGirish Mahadevan static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl)
22104000dc6SGirish Mahadevan {
22204000dc6SGirish Mahadevan u32 pio_xfer_ctrl;
22304000dc6SGirish Mahadevan
22404000dc6SGirish Mahadevan pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL);
22504000dc6SGirish Mahadevan pio_xfer_ctrl &= ~REQUEST_COUNT_MSK;
22604000dc6SGirish Mahadevan pio_xfer_ctrl |= ctrl->xfer.rem_bytes;
22704000dc6SGirish Mahadevan writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL);
22804000dc6SGirish Mahadevan }
22904000dc6SGirish Mahadevan
qcom_qspi_pio_xfer(struct qcom_qspi * ctrl)23004000dc6SGirish Mahadevan static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
23104000dc6SGirish Mahadevan {
23204000dc6SGirish Mahadevan u32 ints;
23304000dc6SGirish Mahadevan
23404000dc6SGirish Mahadevan qcom_qspi_pio_xfer_cfg(ctrl);
23504000dc6SGirish Mahadevan
23604000dc6SGirish Mahadevan /* Ack any previous interrupts that might be hanging around */
23704000dc6SGirish Mahadevan writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS);
23804000dc6SGirish Mahadevan
23904000dc6SGirish Mahadevan /* Setup new interrupts */
24004000dc6SGirish Mahadevan if (ctrl->xfer.dir == QSPI_WRITE)
24104000dc6SGirish Mahadevan ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY;
24204000dc6SGirish Mahadevan else
24304000dc6SGirish Mahadevan ints = QSPI_ERR_IRQS | RESP_FIFO_RDY;
24404000dc6SGirish Mahadevan writel(ints, ctrl->base + MSTR_INT_EN);
24504000dc6SGirish Mahadevan
24604000dc6SGirish Mahadevan /* Kick off the transfer */
24704000dc6SGirish Mahadevan qcom_qspi_pio_xfer_ctrl(ctrl);
24804000dc6SGirish Mahadevan }
24904000dc6SGirish Mahadevan
qcom_qspi_handle_err(struct spi_controller * host,struct spi_message * msg)250*8d3ad99aSYang Yingliang static void qcom_qspi_handle_err(struct spi_controller *host,
25104000dc6SGirish Mahadevan struct spi_message *msg)
25204000dc6SGirish Mahadevan {
253b5762d95SVijaya Krishna Nivarthi u32 int_status;
254*8d3ad99aSYang Yingliang struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
25504000dc6SGirish Mahadevan unsigned long flags;
256b5762d95SVijaya Krishna Nivarthi int i;
25704000dc6SGirish Mahadevan
25804000dc6SGirish Mahadevan spin_lock_irqsave(&ctrl->lock, flags);
25904000dc6SGirish Mahadevan writel(0, ctrl->base + MSTR_INT_EN);
260b5762d95SVijaya Krishna Nivarthi int_status = readl(ctrl->base + MSTR_INT_STATUS);
261b5762d95SVijaya Krishna Nivarthi writel(int_status, ctrl->base + MSTR_INT_STATUS);
26204000dc6SGirish Mahadevan ctrl->xfer.rem_bytes = 0;
263b5762d95SVijaya Krishna Nivarthi
264b5762d95SVijaya Krishna Nivarthi /* free cmd descriptors if they are around (DMA mode) */
265b5762d95SVijaya Krishna Nivarthi for (i = 0; i < ctrl->n_cmd_desc; i++)
266b5762d95SVijaya Krishna Nivarthi dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
267b5762d95SVijaya Krishna Nivarthi ctrl->dma_cmd_desc[i]);
268b5762d95SVijaya Krishna Nivarthi ctrl->n_cmd_desc = 0;
26904000dc6SGirish Mahadevan spin_unlock_irqrestore(&ctrl->lock, flags);
27004000dc6SGirish Mahadevan }
27104000dc6SGirish Mahadevan
qcom_qspi_set_speed(struct qcom_qspi * ctrl,unsigned long speed_hz)27221243314SDouglas Anderson static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
27304000dc6SGirish Mahadevan {
27404000dc6SGirish Mahadevan int ret;
275cff80645SAkash Asthana unsigned int avg_bw_cpu;
27604000dc6SGirish Mahadevan
27721243314SDouglas Anderson if (speed_hz == ctrl->last_speed)
27821243314SDouglas Anderson return 0;
27904000dc6SGirish Mahadevan
28004000dc6SGirish Mahadevan /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
281f79a158dSRajendra Nayak ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
28204000dc6SGirish Mahadevan if (ret) {
28304000dc6SGirish Mahadevan dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
28404000dc6SGirish Mahadevan return ret;
28504000dc6SGirish Mahadevan }
28604000dc6SGirish Mahadevan
287cff80645SAkash Asthana /*
288b5762d95SVijaya Krishna Nivarthi * Set BW quota for CPU.
289cff80645SAkash Asthana * We don't have explicit peak requirement so keep it equal to avg_bw.
290cff80645SAkash Asthana */
291cff80645SAkash Asthana avg_bw_cpu = Bps_to_icc(speed_hz);
292cff80645SAkash Asthana ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu);
293cff80645SAkash Asthana if (ret) {
294cff80645SAkash Asthana dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
295cff80645SAkash Asthana __func__, ret);
296cff80645SAkash Asthana return ret;
297cff80645SAkash Asthana }
298cff80645SAkash Asthana
29921243314SDouglas Anderson ctrl->last_speed = speed_hz;
30021243314SDouglas Anderson
30121243314SDouglas Anderson return 0;
30221243314SDouglas Anderson }
30321243314SDouglas Anderson
qcom_qspi_alloc_desc(struct qcom_qspi * ctrl,dma_addr_t dma_ptr,uint32_t n_bytes)304b5762d95SVijaya Krishna Nivarthi static int qcom_qspi_alloc_desc(struct qcom_qspi *ctrl, dma_addr_t dma_ptr,
305b5762d95SVijaya Krishna Nivarthi uint32_t n_bytes)
306b5762d95SVijaya Krishna Nivarthi {
307b5762d95SVijaya Krishna Nivarthi struct qspi_cmd_desc *virt_cmd_desc, *prev;
308b5762d95SVijaya Krishna Nivarthi dma_addr_t dma_cmd_desc;
309b5762d95SVijaya Krishna Nivarthi
310b5762d95SVijaya Krishna Nivarthi /* allocate for dma cmd descriptor */
311f7ba36d3SVijaya Krishna Nivarthi virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_ATOMIC | __GFP_ZERO, &dma_cmd_desc);
312f7ba36d3SVijaya Krishna Nivarthi if (!virt_cmd_desc) {
313f7ba36d3SVijaya Krishna Nivarthi dev_warn_once(ctrl->dev, "Couldn't find memory for descriptor\n");
314f7ba36d3SVijaya Krishna Nivarthi return -EAGAIN;
315f7ba36d3SVijaya Krishna Nivarthi }
316b5762d95SVijaya Krishna Nivarthi
317b5762d95SVijaya Krishna Nivarthi ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc;
318b5762d95SVijaya Krishna Nivarthi ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc;
319b5762d95SVijaya Krishna Nivarthi ctrl->n_cmd_desc++;
320b5762d95SVijaya Krishna Nivarthi
321b5762d95SVijaya Krishna Nivarthi /* setup cmd descriptor */
322b5762d95SVijaya Krishna Nivarthi virt_cmd_desc->data_address = dma_ptr;
323b5762d95SVijaya Krishna Nivarthi virt_cmd_desc->direction = ctrl->xfer.dir;
324b5762d95SVijaya Krishna Nivarthi virt_cmd_desc->multi_io_mode = qspi_buswidth_to_iomode(ctrl, ctrl->xfer.buswidth);
325b5762d95SVijaya Krishna Nivarthi virt_cmd_desc->fragment = !ctrl->xfer.is_last;
326b5762d95SVijaya Krishna Nivarthi virt_cmd_desc->length = n_bytes;
327b5762d95SVijaya Krishna Nivarthi
328b5762d95SVijaya Krishna Nivarthi /* update previous descriptor */
329b5762d95SVijaya Krishna Nivarthi if (ctrl->n_cmd_desc >= 2) {
330b5762d95SVijaya Krishna Nivarthi prev = (ctrl->virt_cmd_desc)[ctrl->n_cmd_desc - 2];
331b5762d95SVijaya Krishna Nivarthi prev->next_descriptor = dma_cmd_desc;
332b5762d95SVijaya Krishna Nivarthi prev->fragment = 1;
333b5762d95SVijaya Krishna Nivarthi }
334b5762d95SVijaya Krishna Nivarthi
335b5762d95SVijaya Krishna Nivarthi return 0;
336b5762d95SVijaya Krishna Nivarthi }
337b5762d95SVijaya Krishna Nivarthi
qcom_qspi_setup_dma_desc(struct qcom_qspi * ctrl,struct spi_transfer * xfer)338b5762d95SVijaya Krishna Nivarthi static int qcom_qspi_setup_dma_desc(struct qcom_qspi *ctrl,
339b5762d95SVijaya Krishna Nivarthi struct spi_transfer *xfer)
340b5762d95SVijaya Krishna Nivarthi {
341b5762d95SVijaya Krishna Nivarthi int ret;
342b5762d95SVijaya Krishna Nivarthi struct sg_table *sgt;
343b5762d95SVijaya Krishna Nivarthi dma_addr_t dma_ptr_sg;
344b5762d95SVijaya Krishna Nivarthi unsigned int dma_len_sg;
345b5762d95SVijaya Krishna Nivarthi int i;
346b5762d95SVijaya Krishna Nivarthi
347b5762d95SVijaya Krishna Nivarthi if (ctrl->n_cmd_desc) {
348b5762d95SVijaya Krishna Nivarthi dev_err(ctrl->dev, "Remnant dma buffers n_cmd_desc-%d\n", ctrl->n_cmd_desc);
349b5762d95SVijaya Krishna Nivarthi return -EIO;
350b5762d95SVijaya Krishna Nivarthi }
351b5762d95SVijaya Krishna Nivarthi
352b5762d95SVijaya Krishna Nivarthi sgt = (ctrl->xfer.dir == QSPI_READ) ? &xfer->rx_sg : &xfer->tx_sg;
353b5762d95SVijaya Krishna Nivarthi if (!sgt->nents || sgt->nents > QSPI_MAX_SG) {
354b5762d95SVijaya Krishna Nivarthi dev_warn_once(ctrl->dev, "Cannot handle %d entries in scatter list\n", sgt->nents);
355b5762d95SVijaya Krishna Nivarthi return -EAGAIN;
356b5762d95SVijaya Krishna Nivarthi }
357b5762d95SVijaya Krishna Nivarthi
358b5762d95SVijaya Krishna Nivarthi for (i = 0; i < sgt->nents; i++) {
359b5762d95SVijaya Krishna Nivarthi dma_ptr_sg = sg_dma_address(sgt->sgl + i);
360138d73b6SDouglas Anderson dma_len_sg = sg_dma_len(sgt->sgl + i);
361b5762d95SVijaya Krishna Nivarthi if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) {
362b5762d95SVijaya Krishna Nivarthi dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ);
363b5762d95SVijaya Krishna Nivarthi return -EAGAIN;
364b5762d95SVijaya Krishna Nivarthi }
365138d73b6SDouglas Anderson /*
366138d73b6SDouglas Anderson * When reading with DMA the controller writes to memory 1 word
367138d73b6SDouglas Anderson * at a time. If the length isn't a multiple of 4 bytes then
368138d73b6SDouglas Anderson * the controller can clobber the things later in memory.
369138d73b6SDouglas Anderson * Fallback to PIO to be safe.
370138d73b6SDouglas Anderson */
371138d73b6SDouglas Anderson if (ctrl->xfer.dir == QSPI_READ && (dma_len_sg & 0x03)) {
372138d73b6SDouglas Anderson dev_warn_once(ctrl->dev, "fallback to PIO for read of size %#010x\n",
373138d73b6SDouglas Anderson dma_len_sg);
374138d73b6SDouglas Anderson return -EAGAIN;
375138d73b6SDouglas Anderson }
376b5762d95SVijaya Krishna Nivarthi }
377b5762d95SVijaya Krishna Nivarthi
378b5762d95SVijaya Krishna Nivarthi for (i = 0; i < sgt->nents; i++) {
379b5762d95SVijaya Krishna Nivarthi dma_ptr_sg = sg_dma_address(sgt->sgl + i);
380b5762d95SVijaya Krishna Nivarthi dma_len_sg = sg_dma_len(sgt->sgl + i);
381b5762d95SVijaya Krishna Nivarthi
382b5762d95SVijaya Krishna Nivarthi ret = qcom_qspi_alloc_desc(ctrl, dma_ptr_sg, dma_len_sg);
383b5762d95SVijaya Krishna Nivarthi if (ret)
384b5762d95SVijaya Krishna Nivarthi goto cleanup;
385b5762d95SVijaya Krishna Nivarthi }
386b5762d95SVijaya Krishna Nivarthi return 0;
387b5762d95SVijaya Krishna Nivarthi
388b5762d95SVijaya Krishna Nivarthi cleanup:
389b5762d95SVijaya Krishna Nivarthi for (i = 0; i < ctrl->n_cmd_desc; i++)
390b5762d95SVijaya Krishna Nivarthi dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
391b5762d95SVijaya Krishna Nivarthi ctrl->dma_cmd_desc[i]);
392b5762d95SVijaya Krishna Nivarthi ctrl->n_cmd_desc = 0;
393b5762d95SVijaya Krishna Nivarthi return ret;
394b5762d95SVijaya Krishna Nivarthi }
395b5762d95SVijaya Krishna Nivarthi
qcom_qspi_dma_xfer(struct qcom_qspi * ctrl)396b5762d95SVijaya Krishna Nivarthi static void qcom_qspi_dma_xfer(struct qcom_qspi *ctrl)
397b5762d95SVijaya Krishna Nivarthi {
398b5762d95SVijaya Krishna Nivarthi /* Setup new interrupts */
399b5762d95SVijaya Krishna Nivarthi writel(DMA_CHAIN_DONE, ctrl->base + MSTR_INT_EN);
400b5762d95SVijaya Krishna Nivarthi
401b5762d95SVijaya Krishna Nivarthi /* kick off transfer */
402b5762d95SVijaya Krishna Nivarthi writel((u32)((ctrl->dma_cmd_desc)[0]), ctrl->base + NEXT_DMA_DESC_ADDR);
403b5762d95SVijaya Krishna Nivarthi }
404b5762d95SVijaya Krishna Nivarthi
405b5762d95SVijaya Krishna Nivarthi /* Switch to DMA if transfer length exceeds this */
406b5762d95SVijaya Krishna Nivarthi #define QSPI_MAX_BYTES_FIFO 64
407b5762d95SVijaya Krishna Nivarthi
qcom_qspi_can_dma(struct spi_controller * ctlr,struct spi_device * slv,struct spi_transfer * xfer)408b5762d95SVijaya Krishna Nivarthi static bool qcom_qspi_can_dma(struct spi_controller *ctlr,
409b5762d95SVijaya Krishna Nivarthi struct spi_device *slv, struct spi_transfer *xfer)
410b5762d95SVijaya Krishna Nivarthi {
411b5762d95SVijaya Krishna Nivarthi return xfer->len > QSPI_MAX_BYTES_FIFO;
412b5762d95SVijaya Krishna Nivarthi }
413b5762d95SVijaya Krishna Nivarthi
qcom_qspi_transfer_one(struct spi_controller * host,struct spi_device * slv,struct spi_transfer * xfer)414*8d3ad99aSYang Yingliang static int qcom_qspi_transfer_one(struct spi_controller *host,
41521243314SDouglas Anderson struct spi_device *slv,
41621243314SDouglas Anderson struct spi_transfer *xfer)
41721243314SDouglas Anderson {
418*8d3ad99aSYang Yingliang struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
41921243314SDouglas Anderson int ret;
42021243314SDouglas Anderson unsigned long speed_hz;
42121243314SDouglas Anderson unsigned long flags;
422b5762d95SVijaya Krishna Nivarthi u32 mstr_cfg;
42321243314SDouglas Anderson
42421243314SDouglas Anderson speed_hz = slv->max_speed_hz;
42521243314SDouglas Anderson if (xfer->speed_hz)
42621243314SDouglas Anderson speed_hz = xfer->speed_hz;
42721243314SDouglas Anderson
42821243314SDouglas Anderson ret = qcom_qspi_set_speed(ctrl, speed_hz);
42921243314SDouglas Anderson if (ret)
43021243314SDouglas Anderson return ret;
43121243314SDouglas Anderson
43204000dc6SGirish Mahadevan spin_lock_irqsave(&ctrl->lock, flags);
433b5762d95SVijaya Krishna Nivarthi mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
43404000dc6SGirish Mahadevan
43504000dc6SGirish Mahadevan /* We are half duplex, so either rx or tx will be set */
43604000dc6SGirish Mahadevan if (xfer->rx_buf) {
43704000dc6SGirish Mahadevan ctrl->xfer.dir = QSPI_READ;
43804000dc6SGirish Mahadevan ctrl->xfer.buswidth = xfer->rx_nbits;
43904000dc6SGirish Mahadevan ctrl->xfer.rx_buf = xfer->rx_buf;
44004000dc6SGirish Mahadevan } else {
44104000dc6SGirish Mahadevan ctrl->xfer.dir = QSPI_WRITE;
44204000dc6SGirish Mahadevan ctrl->xfer.buswidth = xfer->tx_nbits;
44304000dc6SGirish Mahadevan ctrl->xfer.tx_buf = xfer->tx_buf;
44404000dc6SGirish Mahadevan }
44504000dc6SGirish Mahadevan ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
446*8d3ad99aSYang Yingliang &host->cur_msg->transfers);
44704000dc6SGirish Mahadevan ctrl->xfer.rem_bytes = xfer->len;
448b5762d95SVijaya Krishna Nivarthi
449b5762d95SVijaya Krishna Nivarthi if (xfer->rx_sg.nents || xfer->tx_sg.nents) {
450b5762d95SVijaya Krishna Nivarthi /* do DMA transfer */
451b5762d95SVijaya Krishna Nivarthi if (!(mstr_cfg & DMA_ENABLE)) {
452b5762d95SVijaya Krishna Nivarthi mstr_cfg |= DMA_ENABLE;
453b5762d95SVijaya Krishna Nivarthi writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
454b5762d95SVijaya Krishna Nivarthi }
455b5762d95SVijaya Krishna Nivarthi
456b5762d95SVijaya Krishna Nivarthi ret = qcom_qspi_setup_dma_desc(ctrl, xfer);
457b5762d95SVijaya Krishna Nivarthi if (ret != -EAGAIN) {
458cfb81f22SVijaya Krishna Nivarthi if (!ret) {
459cfb81f22SVijaya Krishna Nivarthi dma_wmb();
460b5762d95SVijaya Krishna Nivarthi qcom_qspi_dma_xfer(ctrl);
461cfb81f22SVijaya Krishna Nivarthi }
462b5762d95SVijaya Krishna Nivarthi goto exit;
463b5762d95SVijaya Krishna Nivarthi }
46455c33e5eSDouglas Anderson dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n");
465b5762d95SVijaya Krishna Nivarthi ret = 0; /* We'll retry w/ PIO */
466b5762d95SVijaya Krishna Nivarthi }
467b5762d95SVijaya Krishna Nivarthi
468b5762d95SVijaya Krishna Nivarthi if (mstr_cfg & DMA_ENABLE) {
469b5762d95SVijaya Krishna Nivarthi mstr_cfg &= ~DMA_ENABLE;
470b5762d95SVijaya Krishna Nivarthi writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
471b5762d95SVijaya Krishna Nivarthi }
47204000dc6SGirish Mahadevan qcom_qspi_pio_xfer(ctrl);
47304000dc6SGirish Mahadevan
474b5762d95SVijaya Krishna Nivarthi exit:
47504000dc6SGirish Mahadevan spin_unlock_irqrestore(&ctrl->lock, flags);
47604000dc6SGirish Mahadevan
477b5762d95SVijaya Krishna Nivarthi if (ret)
478b5762d95SVijaya Krishna Nivarthi return ret;
479b5762d95SVijaya Krishna Nivarthi
48004000dc6SGirish Mahadevan /* We'll call spi_finalize_current_transfer() when done */
48104000dc6SGirish Mahadevan return 1;
48204000dc6SGirish Mahadevan }
48304000dc6SGirish Mahadevan
qcom_qspi_prepare_message(struct spi_controller * host,struct spi_message * message)484*8d3ad99aSYang Yingliang static int qcom_qspi_prepare_message(struct spi_controller *host,
48504000dc6SGirish Mahadevan struct spi_message *message)
48604000dc6SGirish Mahadevan {
48704000dc6SGirish Mahadevan u32 mstr_cfg;
48804000dc6SGirish Mahadevan struct qcom_qspi *ctrl;
48904000dc6SGirish Mahadevan int tx_data_oe_delay = 1;
49004000dc6SGirish Mahadevan int tx_data_delay = 1;
49104000dc6SGirish Mahadevan unsigned long flags;
49204000dc6SGirish Mahadevan
493*8d3ad99aSYang Yingliang ctrl = spi_controller_get_devdata(host);
49404000dc6SGirish Mahadevan spin_lock_irqsave(&ctrl->lock, flags);
49504000dc6SGirish Mahadevan
49604000dc6SGirish Mahadevan mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
49704000dc6SGirish Mahadevan mstr_cfg &= ~CHIP_SELECT_NUM;
4989e264f3fSAmit Kumar Mahapatra via Alsa-devel if (spi_get_chipselect(message->spi, 0))
49904000dc6SGirish Mahadevan mstr_cfg |= CHIP_SELECT_NUM;
50004000dc6SGirish Mahadevan
50104000dc6SGirish Mahadevan mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE;
50204000dc6SGirish Mahadevan mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK);
50304000dc6SGirish Mahadevan mstr_cfg |= message->spi->mode << SPI_MODE_SHFT;
50404000dc6SGirish Mahadevan mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT;
50504000dc6SGirish Mahadevan mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT;
50604000dc6SGirish Mahadevan mstr_cfg &= ~DMA_ENABLE;
50704000dc6SGirish Mahadevan
50804000dc6SGirish Mahadevan writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
50904000dc6SGirish Mahadevan spin_unlock_irqrestore(&ctrl->lock, flags);
51004000dc6SGirish Mahadevan
51104000dc6SGirish Mahadevan return 0;
51204000dc6SGirish Mahadevan }
51304000dc6SGirish Mahadevan
qcom_qspi_alloc_dma(struct qcom_qspi * ctrl)514b5762d95SVijaya Krishna Nivarthi static int qcom_qspi_alloc_dma(struct qcom_qspi *ctrl)
515b5762d95SVijaya Krishna Nivarthi {
516b5762d95SVijaya Krishna Nivarthi ctrl->dma_cmd_pool = dmam_pool_create("qspi cmd desc pool",
517b5762d95SVijaya Krishna Nivarthi ctrl->dev, sizeof(struct qspi_cmd_desc), 0, 0);
518b5762d95SVijaya Krishna Nivarthi if (!ctrl->dma_cmd_pool)
519b5762d95SVijaya Krishna Nivarthi return -ENOMEM;
520b5762d95SVijaya Krishna Nivarthi
521b5762d95SVijaya Krishna Nivarthi return 0;
522b5762d95SVijaya Krishna Nivarthi }
523b5762d95SVijaya Krishna Nivarthi
pio_read(struct qcom_qspi * ctrl)52404000dc6SGirish Mahadevan static irqreturn_t pio_read(struct qcom_qspi *ctrl)
52504000dc6SGirish Mahadevan {
52604000dc6SGirish Mahadevan u32 rd_fifo_status;
52704000dc6SGirish Mahadevan u32 rd_fifo;
52804000dc6SGirish Mahadevan unsigned int wr_cnts;
52904000dc6SGirish Mahadevan unsigned int bytes_to_read;
53004000dc6SGirish Mahadevan unsigned int words_to_read;
53104000dc6SGirish Mahadevan u32 *word_buf;
53204000dc6SGirish Mahadevan u8 *byte_buf;
53304000dc6SGirish Mahadevan int i;
53404000dc6SGirish Mahadevan
53504000dc6SGirish Mahadevan rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS);
53604000dc6SGirish Mahadevan
53704000dc6SGirish Mahadevan if (!(rd_fifo_status & FIFO_RDY)) {
53804000dc6SGirish Mahadevan dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status);
53904000dc6SGirish Mahadevan return IRQ_NONE;
54004000dc6SGirish Mahadevan }
54104000dc6SGirish Mahadevan
54204000dc6SGirish Mahadevan wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT;
54304000dc6SGirish Mahadevan wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes);
54404000dc6SGirish Mahadevan
54504000dc6SGirish Mahadevan words_to_read = wr_cnts / QSPI_BYTES_PER_WORD;
54604000dc6SGirish Mahadevan bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD;
54704000dc6SGirish Mahadevan
54804000dc6SGirish Mahadevan if (words_to_read) {
54904000dc6SGirish Mahadevan word_buf = ctrl->xfer.rx_buf;
55004000dc6SGirish Mahadevan ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD;
55104000dc6SGirish Mahadevan ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read);
55204000dc6SGirish Mahadevan ctrl->xfer.rx_buf = word_buf + words_to_read;
55304000dc6SGirish Mahadevan }
55404000dc6SGirish Mahadevan
55504000dc6SGirish Mahadevan if (bytes_to_read) {
55604000dc6SGirish Mahadevan byte_buf = ctrl->xfer.rx_buf;
55704000dc6SGirish Mahadevan rd_fifo = readl(ctrl->base + RD_FIFO);
55804000dc6SGirish Mahadevan ctrl->xfer.rem_bytes -= bytes_to_read;
55904000dc6SGirish Mahadevan for (i = 0; i < bytes_to_read; i++)
56004000dc6SGirish Mahadevan *byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE);
56104000dc6SGirish Mahadevan ctrl->xfer.rx_buf = byte_buf;
56204000dc6SGirish Mahadevan }
56304000dc6SGirish Mahadevan
56404000dc6SGirish Mahadevan return IRQ_HANDLED;
56504000dc6SGirish Mahadevan }
56604000dc6SGirish Mahadevan
pio_write(struct qcom_qspi * ctrl)56704000dc6SGirish Mahadevan static irqreturn_t pio_write(struct qcom_qspi *ctrl)
56804000dc6SGirish Mahadevan {
56904000dc6SGirish Mahadevan const void *xfer_buf = ctrl->xfer.tx_buf;
57004000dc6SGirish Mahadevan const int *word_buf;
57104000dc6SGirish Mahadevan const char *byte_buf;
57204000dc6SGirish Mahadevan unsigned int wr_fifo_bytes;
57304000dc6SGirish Mahadevan unsigned int wr_fifo_words;
57404000dc6SGirish Mahadevan unsigned int wr_size;
57504000dc6SGirish Mahadevan unsigned int rem_words;
57604000dc6SGirish Mahadevan
57704000dc6SGirish Mahadevan wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS);
57804000dc6SGirish Mahadevan wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT;
57904000dc6SGirish Mahadevan
58004000dc6SGirish Mahadevan if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) {
58104000dc6SGirish Mahadevan /* Process the last 1-3 bytes */
58204000dc6SGirish Mahadevan wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
58304000dc6SGirish Mahadevan ctrl->xfer.rem_bytes -= wr_size;
58404000dc6SGirish Mahadevan
58504000dc6SGirish Mahadevan byte_buf = xfer_buf;
58604000dc6SGirish Mahadevan while (wr_size--)
58704000dc6SGirish Mahadevan writel(*byte_buf++,
58804000dc6SGirish Mahadevan ctrl->base + PIO_DATAOUT_1B);
58904000dc6SGirish Mahadevan ctrl->xfer.tx_buf = byte_buf;
59004000dc6SGirish Mahadevan } else {
59104000dc6SGirish Mahadevan /*
59204000dc6SGirish Mahadevan * Process all the whole words; to keep things simple we'll
59304000dc6SGirish Mahadevan * just wait for the next interrupt to handle the last 1-3
59404000dc6SGirish Mahadevan * bytes if we don't have an even number of words.
59504000dc6SGirish Mahadevan */
59604000dc6SGirish Mahadevan rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
59704000dc6SGirish Mahadevan wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD;
59804000dc6SGirish Mahadevan
59904000dc6SGirish Mahadevan wr_size = min(rem_words, wr_fifo_words);
60004000dc6SGirish Mahadevan ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD;
60104000dc6SGirish Mahadevan
60204000dc6SGirish Mahadevan word_buf = xfer_buf;
60304000dc6SGirish Mahadevan iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size);
60404000dc6SGirish Mahadevan ctrl->xfer.tx_buf = word_buf + wr_size;
60504000dc6SGirish Mahadevan
60604000dc6SGirish Mahadevan }
60704000dc6SGirish Mahadevan
60804000dc6SGirish Mahadevan return IRQ_HANDLED;
60904000dc6SGirish Mahadevan }
61004000dc6SGirish Mahadevan
qcom_qspi_irq(int irq,void * dev_id)61104000dc6SGirish Mahadevan static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
61204000dc6SGirish Mahadevan {
61304000dc6SGirish Mahadevan u32 int_status;
61404000dc6SGirish Mahadevan struct qcom_qspi *ctrl = dev_id;
61504000dc6SGirish Mahadevan irqreturn_t ret = IRQ_NONE;
61604000dc6SGirish Mahadevan
617bfc430caSBarry Song spin_lock(&ctrl->lock);
61804000dc6SGirish Mahadevan
61904000dc6SGirish Mahadevan int_status = readl(ctrl->base + MSTR_INT_STATUS);
62004000dc6SGirish Mahadevan writel(int_status, ctrl->base + MSTR_INT_STATUS);
62104000dc6SGirish Mahadevan
62217aaf9eaSVijaya Krishna Nivarthi /* Ignore disabled interrupts */
62317aaf9eaSVijaya Krishna Nivarthi int_status &= readl(ctrl->base + MSTR_INT_EN);
62417aaf9eaSVijaya Krishna Nivarthi
625b5762d95SVijaya Krishna Nivarthi /* PIO mode handling */
62604000dc6SGirish Mahadevan if (ctrl->xfer.dir == QSPI_WRITE) {
62704000dc6SGirish Mahadevan if (int_status & WR_FIFO_EMPTY)
62804000dc6SGirish Mahadevan ret = pio_write(ctrl);
62904000dc6SGirish Mahadevan } else {
63004000dc6SGirish Mahadevan if (int_status & RESP_FIFO_RDY)
63104000dc6SGirish Mahadevan ret = pio_read(ctrl);
63204000dc6SGirish Mahadevan }
63304000dc6SGirish Mahadevan
63404000dc6SGirish Mahadevan if (int_status & QSPI_ERR_IRQS) {
63504000dc6SGirish Mahadevan if (int_status & RESP_FIFO_UNDERRUN)
63604000dc6SGirish Mahadevan dev_err(ctrl->dev, "IRQ error: FIFO underrun\n");
63704000dc6SGirish Mahadevan if (int_status & WR_FIFO_OVERRUN)
63804000dc6SGirish Mahadevan dev_err(ctrl->dev, "IRQ error: FIFO overrun\n");
63904000dc6SGirish Mahadevan if (int_status & HRESP_FROM_NOC_ERR)
64004000dc6SGirish Mahadevan dev_err(ctrl->dev, "IRQ error: NOC response error\n");
64104000dc6SGirish Mahadevan ret = IRQ_HANDLED;
64204000dc6SGirish Mahadevan }
64304000dc6SGirish Mahadevan
64404000dc6SGirish Mahadevan if (!ctrl->xfer.rem_bytes) {
64504000dc6SGirish Mahadevan writel(0, ctrl->base + MSTR_INT_EN);
64604000dc6SGirish Mahadevan spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
64704000dc6SGirish Mahadevan }
64804000dc6SGirish Mahadevan
649b5762d95SVijaya Krishna Nivarthi /* DMA mode handling */
650b5762d95SVijaya Krishna Nivarthi if (int_status & DMA_CHAIN_DONE) {
651b5762d95SVijaya Krishna Nivarthi int i;
652b5762d95SVijaya Krishna Nivarthi
653b5762d95SVijaya Krishna Nivarthi writel(0, ctrl->base + MSTR_INT_EN);
654b5762d95SVijaya Krishna Nivarthi ctrl->xfer.rem_bytes = 0;
655b5762d95SVijaya Krishna Nivarthi
656b5762d95SVijaya Krishna Nivarthi for (i = 0; i < ctrl->n_cmd_desc; i++)
657b5762d95SVijaya Krishna Nivarthi dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
658b5762d95SVijaya Krishna Nivarthi ctrl->dma_cmd_desc[i]);
659b5762d95SVijaya Krishna Nivarthi ctrl->n_cmd_desc = 0;
660b5762d95SVijaya Krishna Nivarthi
661b5762d95SVijaya Krishna Nivarthi ret = IRQ_HANDLED;
662b5762d95SVijaya Krishna Nivarthi spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
663b5762d95SVijaya Krishna Nivarthi }
664b5762d95SVijaya Krishna Nivarthi
665bfc430caSBarry Song spin_unlock(&ctrl->lock);
66604000dc6SGirish Mahadevan return ret;
66704000dc6SGirish Mahadevan }
66804000dc6SGirish Mahadevan
qcom_qspi_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)669cc71c42bSDouglas Anderson static int qcom_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
670cc71c42bSDouglas Anderson {
671cc71c42bSDouglas Anderson /*
672cc71c42bSDouglas Anderson * If qcom_qspi_can_dma() is going to return false we don't need to
673cc71c42bSDouglas Anderson * adjust anything.
674cc71c42bSDouglas Anderson */
675cc71c42bSDouglas Anderson if (op->data.nbytes <= QSPI_MAX_BYTES_FIFO)
676cc71c42bSDouglas Anderson return 0;
677cc71c42bSDouglas Anderson
678cc71c42bSDouglas Anderson /*
679cc71c42bSDouglas Anderson * When reading, the transfer needs to be a multiple of 4 bytes so
680cc71c42bSDouglas Anderson * shrink the transfer if that's not true. The caller will then do a
681cc71c42bSDouglas Anderson * second transfer to finish things up.
682cc71c42bSDouglas Anderson */
683cc71c42bSDouglas Anderson if (op->data.dir == SPI_MEM_DATA_IN && (op->data.nbytes & 0x3))
684cc71c42bSDouglas Anderson op->data.nbytes &= ~0x3;
685cc71c42bSDouglas Anderson
686cc71c42bSDouglas Anderson return 0;
687cc71c42bSDouglas Anderson }
688cc71c42bSDouglas Anderson
689cc71c42bSDouglas Anderson static const struct spi_controller_mem_ops qcom_qspi_mem_ops = {
690cc71c42bSDouglas Anderson .adjust_op_size = qcom_qspi_adjust_op_size,
691cc71c42bSDouglas Anderson };
692cc71c42bSDouglas Anderson
qcom_qspi_probe(struct platform_device * pdev)69304000dc6SGirish Mahadevan static int qcom_qspi_probe(struct platform_device *pdev)
69404000dc6SGirish Mahadevan {
69504000dc6SGirish Mahadevan int ret;
69604000dc6SGirish Mahadevan struct device *dev;
697*8d3ad99aSYang Yingliang struct spi_controller *host;
69804000dc6SGirish Mahadevan struct qcom_qspi *ctrl;
69904000dc6SGirish Mahadevan
70004000dc6SGirish Mahadevan dev = &pdev->dev;
70104000dc6SGirish Mahadevan
702*8d3ad99aSYang Yingliang host = devm_spi_alloc_host(dev, sizeof(*ctrl));
703*8d3ad99aSYang Yingliang if (!host)
70404000dc6SGirish Mahadevan return -ENOMEM;
70504000dc6SGirish Mahadevan
706*8d3ad99aSYang Yingliang platform_set_drvdata(pdev, host);
70704000dc6SGirish Mahadevan
708*8d3ad99aSYang Yingliang ctrl = spi_controller_get_devdata(host);
70904000dc6SGirish Mahadevan
71004000dc6SGirish Mahadevan spin_lock_init(&ctrl->lock);
71104000dc6SGirish Mahadevan ctrl->dev = dev;
712e0ea3cc2SYueHaibing ctrl->base = devm_platform_ioremap_resource(pdev, 0);
7136cfd39e2SLukas Wunner if (IS_ERR(ctrl->base))
7146cfd39e2SLukas Wunner return PTR_ERR(ctrl->base);
71504000dc6SGirish Mahadevan
716b8d40d77SMatthias Kaehlcke ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
717b8d40d77SMatthias Kaehlcke sizeof(*ctrl->clks), GFP_KERNEL);
7186cfd39e2SLukas Wunner if (!ctrl->clks)
7196cfd39e2SLukas Wunner return -ENOMEM;
720b8d40d77SMatthias Kaehlcke
72104000dc6SGirish Mahadevan ctrl->clks[QSPI_CLK_CORE].id = "core";
72204000dc6SGirish Mahadevan ctrl->clks[QSPI_CLK_IFACE].id = "iface";
72304000dc6SGirish Mahadevan ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
72404000dc6SGirish Mahadevan if (ret)
7256cfd39e2SLukas Wunner return ret;
72604000dc6SGirish Mahadevan
727cff80645SAkash Asthana ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
7286cfd39e2SLukas Wunner if (IS_ERR(ctrl->icc_path_cpu_to_qspi))
7296cfd39e2SLukas Wunner return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi),
73003453268SKrzysztof Kozlowski "Failed to get cpu path\n");
7316cfd39e2SLukas Wunner
732cff80645SAkash Asthana /* Set BW vote for register access */
733cff80645SAkash Asthana ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
734cff80645SAkash Asthana Bps_to_icc(1000));
735cff80645SAkash Asthana if (ret) {
736cff80645SAkash Asthana dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
737cff80645SAkash Asthana __func__, ret);
7386cfd39e2SLukas Wunner return ret;
739cff80645SAkash Asthana }
740cff80645SAkash Asthana
741cff80645SAkash Asthana ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
742cff80645SAkash Asthana if (ret) {
743cff80645SAkash Asthana dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
744cff80645SAkash Asthana __func__, ret);
7456cfd39e2SLukas Wunner return ret;
746cff80645SAkash Asthana }
747cff80645SAkash Asthana
74804000dc6SGirish Mahadevan ret = platform_get_irq(pdev, 0);
7496b8ac10eSStephen Boyd if (ret < 0)
7506cfd39e2SLukas Wunner return ret;
751eaecba87SStephen Boyd ret = devm_request_irq(dev, ret, qcom_qspi_irq, 0, dev_name(dev), ctrl);
75204000dc6SGirish Mahadevan if (ret) {
75304000dc6SGirish Mahadevan dev_err(dev, "Failed to request irq %d\n", ret);
7546cfd39e2SLukas Wunner return ret;
75504000dc6SGirish Mahadevan }
75604000dc6SGirish Mahadevan
757b5762d95SVijaya Krishna Nivarthi ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
758b5762d95SVijaya Krishna Nivarthi if (ret)
759b5762d95SVijaya Krishna Nivarthi return dev_err_probe(dev, ret, "could not set DMA mask\n");
760b5762d95SVijaya Krishna Nivarthi
761*8d3ad99aSYang Yingliang host->max_speed_hz = 300000000;
762*8d3ad99aSYang Yingliang host->max_dma_len = 65536; /* as per HPG */
763*8d3ad99aSYang Yingliang host->dma_alignment = QSPI_ALIGN_REQ;
764*8d3ad99aSYang Yingliang host->num_chipselect = QSPI_NUM_CS;
765*8d3ad99aSYang Yingliang host->bus_num = -1;
766*8d3ad99aSYang Yingliang host->dev.of_node = pdev->dev.of_node;
767*8d3ad99aSYang Yingliang host->mode_bits = SPI_MODE_0 |
76804000dc6SGirish Mahadevan SPI_TX_DUAL | SPI_RX_DUAL |
76904000dc6SGirish Mahadevan SPI_TX_QUAD | SPI_RX_QUAD;
770*8d3ad99aSYang Yingliang host->flags = SPI_CONTROLLER_HALF_DUPLEX;
771*8d3ad99aSYang Yingliang host->prepare_message = qcom_qspi_prepare_message;
772*8d3ad99aSYang Yingliang host->transfer_one = qcom_qspi_transfer_one;
773*8d3ad99aSYang Yingliang host->handle_err = qcom_qspi_handle_err;
774b5762d95SVijaya Krishna Nivarthi if (of_property_read_bool(pdev->dev.of_node, "iommus"))
775*8d3ad99aSYang Yingliang host->can_dma = qcom_qspi_can_dma;
776*8d3ad99aSYang Yingliang host->auto_runtime_pm = true;
777*8d3ad99aSYang Yingliang host->mem_ops = &qcom_qspi_mem_ops;
77804000dc6SGirish Mahadevan
7796504dcaeSYangtao Li ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
7806504dcaeSYangtao Li if (ret)
7816504dcaeSYangtao Li return ret;
782f79a158dSRajendra Nayak /* OPP table is optional */
7836504dcaeSYangtao Li ret = devm_pm_opp_of_add_table(&pdev->dev);
784062cf7fcSViresh Kumar if (ret && ret != -ENODEV) {
785f79a158dSRajendra Nayak dev_err(&pdev->dev, "invalid OPP table in device tree\n");
7866504dcaeSYangtao Li return ret;
787f79a158dSRajendra Nayak }
788f79a158dSRajendra Nayak
789b5762d95SVijaya Krishna Nivarthi ret = qcom_qspi_alloc_dma(ctrl);
790b5762d95SVijaya Krishna Nivarthi if (ret)
791b5762d95SVijaya Krishna Nivarthi return ret;
792b5762d95SVijaya Krishna Nivarthi
7938592eb95SDouglas Anderson pm_runtime_use_autosuspend(dev);
7948592eb95SDouglas Anderson pm_runtime_set_autosuspend_delay(dev, 250);
79504000dc6SGirish Mahadevan pm_runtime_enable(dev);
79604000dc6SGirish Mahadevan
797*8d3ad99aSYang Yingliang ret = spi_register_controller(host);
79804000dc6SGirish Mahadevan if (!ret)
79904000dc6SGirish Mahadevan return 0;
80004000dc6SGirish Mahadevan
80104000dc6SGirish Mahadevan pm_runtime_disable(dev);
80204000dc6SGirish Mahadevan
80304000dc6SGirish Mahadevan return ret;
80404000dc6SGirish Mahadevan }
80504000dc6SGirish Mahadevan
qcom_qspi_remove(struct platform_device * pdev)806e0c30566SUwe Kleine-König static void qcom_qspi_remove(struct platform_device *pdev)
80704000dc6SGirish Mahadevan {
808*8d3ad99aSYang Yingliang struct spi_controller *host = platform_get_drvdata(pdev);
80904000dc6SGirish Mahadevan
81004000dc6SGirish Mahadevan /* Unregister _before_ disabling pm_runtime() so we stop transfers */
811*8d3ad99aSYang Yingliang spi_unregister_controller(host);
81204000dc6SGirish Mahadevan
81304000dc6SGirish Mahadevan pm_runtime_disable(&pdev->dev);
81404000dc6SGirish Mahadevan }
81504000dc6SGirish Mahadevan
qcom_qspi_runtime_suspend(struct device * dev)81604000dc6SGirish Mahadevan static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
81704000dc6SGirish Mahadevan {
818*8d3ad99aSYang Yingliang struct spi_controller *host = dev_get_drvdata(dev);
819*8d3ad99aSYang Yingliang struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
820cff80645SAkash Asthana int ret;
82104000dc6SGirish Mahadevan
822f79a158dSRajendra Nayak /* Drop the performance state vote */
823f79a158dSRajendra Nayak dev_pm_opp_set_rate(dev, 0);
82404000dc6SGirish Mahadevan clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
82504000dc6SGirish Mahadevan
826cff80645SAkash Asthana ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
827cff80645SAkash Asthana if (ret) {
828cff80645SAkash Asthana dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
829cff80645SAkash Asthana __func__, ret);
830cff80645SAkash Asthana return ret;
831cff80645SAkash Asthana }
832cff80645SAkash Asthana
8330098c527SDouglas Anderson pinctrl_pm_select_sleep_state(dev);
8340098c527SDouglas Anderson
83504000dc6SGirish Mahadevan return 0;
83604000dc6SGirish Mahadevan }
83704000dc6SGirish Mahadevan
qcom_qspi_runtime_resume(struct device * dev)83804000dc6SGirish Mahadevan static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
83904000dc6SGirish Mahadevan {
840*8d3ad99aSYang Yingliang struct spi_controller *host = dev_get_drvdata(dev);
841*8d3ad99aSYang Yingliang struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
842cff80645SAkash Asthana int ret;
843cff80645SAkash Asthana
8440098c527SDouglas Anderson pinctrl_pm_select_default_state(dev);
8450098c527SDouglas Anderson
846cff80645SAkash Asthana ret = icc_enable(ctrl->icc_path_cpu_to_qspi);
847cff80645SAkash Asthana if (ret) {
848cff80645SAkash Asthana dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n",
849cff80645SAkash Asthana __func__, ret);
850cff80645SAkash Asthana return ret;
851cff80645SAkash Asthana }
85204000dc6SGirish Mahadevan
85321243314SDouglas Anderson ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
85421243314SDouglas Anderson if (ret)
85521243314SDouglas Anderson return ret;
85621243314SDouglas Anderson
85721243314SDouglas Anderson return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
85804000dc6SGirish Mahadevan }
85904000dc6SGirish Mahadevan
qcom_qspi_suspend(struct device * dev)86004000dc6SGirish Mahadevan static int __maybe_unused qcom_qspi_suspend(struct device *dev)
86104000dc6SGirish Mahadevan {
862*8d3ad99aSYang Yingliang struct spi_controller *host = dev_get_drvdata(dev);
86304000dc6SGirish Mahadevan int ret;
86404000dc6SGirish Mahadevan
865*8d3ad99aSYang Yingliang ret = spi_controller_suspend(host);
86604000dc6SGirish Mahadevan if (ret)
86704000dc6SGirish Mahadevan return ret;
86804000dc6SGirish Mahadevan
86904000dc6SGirish Mahadevan ret = pm_runtime_force_suspend(dev);
87004000dc6SGirish Mahadevan if (ret)
871*8d3ad99aSYang Yingliang spi_controller_resume(host);
87204000dc6SGirish Mahadevan
87304000dc6SGirish Mahadevan return ret;
87404000dc6SGirish Mahadevan }
87504000dc6SGirish Mahadevan
qcom_qspi_resume(struct device * dev)87604000dc6SGirish Mahadevan static int __maybe_unused qcom_qspi_resume(struct device *dev)
87704000dc6SGirish Mahadevan {
878*8d3ad99aSYang Yingliang struct spi_controller *host = dev_get_drvdata(dev);
87904000dc6SGirish Mahadevan int ret;
88004000dc6SGirish Mahadevan
88104000dc6SGirish Mahadevan ret = pm_runtime_force_resume(dev);
88204000dc6SGirish Mahadevan if (ret)
88304000dc6SGirish Mahadevan return ret;
88404000dc6SGirish Mahadevan
885*8d3ad99aSYang Yingliang ret = spi_controller_resume(host);
88604000dc6SGirish Mahadevan if (ret)
88704000dc6SGirish Mahadevan pm_runtime_force_suspend(dev);
88804000dc6SGirish Mahadevan
88904000dc6SGirish Mahadevan return ret;
89004000dc6SGirish Mahadevan }
89104000dc6SGirish Mahadevan
89204000dc6SGirish Mahadevan static const struct dev_pm_ops qcom_qspi_dev_pm_ops = {
89304000dc6SGirish Mahadevan SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend,
89404000dc6SGirish Mahadevan qcom_qspi_runtime_resume, NULL)
89504000dc6SGirish Mahadevan SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume)
89604000dc6SGirish Mahadevan };
89704000dc6SGirish Mahadevan
89804000dc6SGirish Mahadevan static const struct of_device_id qcom_qspi_dt_match[] = {
89904000dc6SGirish Mahadevan { .compatible = "qcom,qspi-v1", },
90004000dc6SGirish Mahadevan { }
90104000dc6SGirish Mahadevan };
90204000dc6SGirish Mahadevan MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match);
90304000dc6SGirish Mahadevan
90404000dc6SGirish Mahadevan static struct platform_driver qcom_qspi_driver = {
90504000dc6SGirish Mahadevan .driver = {
90604000dc6SGirish Mahadevan .name = "qcom_qspi",
90704000dc6SGirish Mahadevan .pm = &qcom_qspi_dev_pm_ops,
90804000dc6SGirish Mahadevan .of_match_table = qcom_qspi_dt_match,
90904000dc6SGirish Mahadevan },
91004000dc6SGirish Mahadevan .probe = qcom_qspi_probe,
911e0c30566SUwe Kleine-König .remove_new = qcom_qspi_remove,
91204000dc6SGirish Mahadevan };
91304000dc6SGirish Mahadevan module_platform_driver(qcom_qspi_driver);
91404000dc6SGirish Mahadevan
91504000dc6SGirish Mahadevan MODULE_DESCRIPTION("SPI driver for QSPI cores");
91604000dc6SGirish Mahadevan MODULE_LICENSE("GPL v2");
917