1 /* 2 * PXA2xx SPI DMA engine support. 3 * 4 * Copyright (C) 2013, Intel Corporation 5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/device.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/dmaengine.h> 15 #include <linux/pxa2xx_ssp.h> 16 #include <linux/scatterlist.h> 17 #include <linux/sizes.h> 18 #include <linux/spi/spi.h> 19 #include <linux/spi/pxa2xx_spi.h> 20 21 #include "spi-pxa2xx.h" 22 23 static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, 24 bool error) 25 { 26 struct spi_message *msg = drv_data->controller->cur_msg; 27 28 /* 29 * It is possible that one CPU is handling ROR interrupt and other 30 * just gets DMA completion. Calling pump_transfers() twice for the 31 * same transfer leads to problems thus we prevent concurrent calls 32 * by using ->dma_running. 33 */ 34 if (atomic_dec_and_test(&drv_data->dma_running)) { 35 /* 36 * If the other CPU is still handling the ROR interrupt we 37 * might not know about the error yet. So we re-check the 38 * ROR bit here before we clear the status register. 39 */ 40 if (!error) { 41 u32 status = pxa2xx_spi_read(drv_data, SSSR) 42 & drv_data->mask_sr; 43 error = status & SSSR_ROR; 44 } 45 46 /* Clear status & disable interrupts */ 47 pxa2xx_spi_write(drv_data, SSCR1, 48 pxa2xx_spi_read(drv_data, SSCR1) 49 & ~drv_data->dma_cr1); 50 write_SSSR_CS(drv_data, drv_data->clear_sr); 51 if (!pxa25x_ssp_comp(drv_data)) 52 pxa2xx_spi_write(drv_data, SSTO, 0); 53 54 if (error) { 55 /* In case we got an error we disable the SSP now */ 56 pxa2xx_spi_write(drv_data, SSCR0, 57 pxa2xx_spi_read(drv_data, SSCR0) 58 & ~SSCR0_SSE); 59 msg->status = -EIO; 60 } 61 62 spi_finalize_current_transfer(drv_data->controller); 63 } 64 } 65 66 static void pxa2xx_spi_dma_callback(void *data) 67 { 68 pxa2xx_spi_dma_transfer_complete(data, false); 69 } 70 71 static struct dma_async_tx_descriptor * 72 pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, 73 enum dma_transfer_direction dir, 74 struct spi_transfer *xfer) 75 { 76 struct chip_data *chip = 77 spi_get_ctldata(drv_data->controller->cur_msg->spi); 78 enum dma_slave_buswidth width; 79 struct dma_slave_config cfg; 80 struct dma_chan *chan; 81 struct sg_table *sgt; 82 int ret; 83 84 switch (drv_data->n_bytes) { 85 case 1: 86 width = DMA_SLAVE_BUSWIDTH_1_BYTE; 87 break; 88 case 2: 89 width = DMA_SLAVE_BUSWIDTH_2_BYTES; 90 break; 91 default: 92 width = DMA_SLAVE_BUSWIDTH_4_BYTES; 93 break; 94 } 95 96 memset(&cfg, 0, sizeof(cfg)); 97 cfg.direction = dir; 98 99 if (dir == DMA_MEM_TO_DEV) { 100 cfg.dst_addr = drv_data->ssdr_physical; 101 cfg.dst_addr_width = width; 102 cfg.dst_maxburst = chip->dma_burst_size; 103 104 sgt = &xfer->tx_sg; 105 chan = drv_data->controller->dma_tx; 106 } else { 107 cfg.src_addr = drv_data->ssdr_physical; 108 cfg.src_addr_width = width; 109 cfg.src_maxburst = chip->dma_burst_size; 110 111 sgt = &xfer->rx_sg; 112 chan = drv_data->controller->dma_rx; 113 } 114 115 ret = dmaengine_slave_config(chan, &cfg); 116 if (ret) { 117 dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n"); 118 return NULL; 119 } 120 121 return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir, 122 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 123 } 124 125 irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) 126 { 127 u32 status; 128 129 status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr; 130 if (status & SSSR_ROR) { 131 dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); 132 133 dmaengine_terminate_async(drv_data->controller->dma_rx); 134 dmaengine_terminate_async(drv_data->controller->dma_tx); 135 136 pxa2xx_spi_dma_transfer_complete(drv_data, true); 137 return IRQ_HANDLED; 138 } 139 140 return IRQ_NONE; 141 } 142 143 int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, 144 struct spi_transfer *xfer) 145 { 146 struct dma_async_tx_descriptor *tx_desc, *rx_desc; 147 int err; 148 149 tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer); 150 if (!tx_desc) { 151 dev_err(&drv_data->pdev->dev, 152 "failed to get DMA TX descriptor\n"); 153 err = -EBUSY; 154 goto err_tx; 155 } 156 157 rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer); 158 if (!rx_desc) { 159 dev_err(&drv_data->pdev->dev, 160 "failed to get DMA RX descriptor\n"); 161 err = -EBUSY; 162 goto err_rx; 163 } 164 165 /* We are ready when RX completes */ 166 rx_desc->callback = pxa2xx_spi_dma_callback; 167 rx_desc->callback_param = drv_data; 168 169 dmaengine_submit(rx_desc); 170 dmaengine_submit(tx_desc); 171 return 0; 172 173 err_rx: 174 dmaengine_terminate_async(drv_data->controller->dma_tx); 175 err_tx: 176 return err; 177 } 178 179 void pxa2xx_spi_dma_start(struct driver_data *drv_data) 180 { 181 dma_async_issue_pending(drv_data->controller->dma_rx); 182 dma_async_issue_pending(drv_data->controller->dma_tx); 183 184 atomic_set(&drv_data->dma_running, 1); 185 } 186 187 void pxa2xx_spi_dma_stop(struct driver_data *drv_data) 188 { 189 atomic_set(&drv_data->dma_running, 0); 190 dmaengine_terminate_sync(drv_data->controller->dma_rx); 191 dmaengine_terminate_sync(drv_data->controller->dma_tx); 192 } 193 194 int pxa2xx_spi_dma_setup(struct driver_data *drv_data) 195 { 196 struct pxa2xx_spi_controller *pdata = drv_data->controller_info; 197 struct device *dev = &drv_data->pdev->dev; 198 struct spi_controller *controller = drv_data->controller; 199 dma_cap_mask_t mask; 200 201 dma_cap_zero(mask); 202 dma_cap_set(DMA_SLAVE, mask); 203 204 controller->dma_tx = dma_request_slave_channel_compat(mask, 205 pdata->dma_filter, pdata->tx_param, dev, "tx"); 206 if (!controller->dma_tx) 207 return -ENODEV; 208 209 controller->dma_rx = dma_request_slave_channel_compat(mask, 210 pdata->dma_filter, pdata->rx_param, dev, "rx"); 211 if (!controller->dma_rx) { 212 dma_release_channel(controller->dma_tx); 213 controller->dma_tx = NULL; 214 return -ENODEV; 215 } 216 217 return 0; 218 } 219 220 void pxa2xx_spi_dma_release(struct driver_data *drv_data) 221 { 222 struct spi_controller *controller = drv_data->controller; 223 224 if (controller->dma_rx) { 225 dmaengine_terminate_sync(controller->dma_rx); 226 dma_release_channel(controller->dma_rx); 227 controller->dma_rx = NULL; 228 } 229 if (controller->dma_tx) { 230 dmaengine_terminate_sync(controller->dma_tx); 231 dma_release_channel(controller->dma_tx); 232 controller->dma_tx = NULL; 233 } 234 } 235 236 int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, 237 struct spi_device *spi, 238 u8 bits_per_word, u32 *burst_code, 239 u32 *threshold) 240 { 241 struct pxa2xx_spi_chip *chip_info = spi->controller_data; 242 struct driver_data *drv_data = spi_controller_get_devdata(spi->controller); 243 u32 dma_burst_size = drv_data->controller_info->dma_burst_size; 244 245 /* 246 * If the DMA burst size is given in chip_info we use that, 247 * otherwise we use the default. Also we use the default FIFO 248 * thresholds for now. 249 */ 250 *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size; 251 *threshold = SSCR1_RxTresh(RX_THRESH_DFLT) 252 | SSCR1_TxTresh(TX_THRESH_DFLT); 253 254 return 0; 255 } 256