1 // SPDX-License-Identifier: GPL-2.0+ 2 // imx-pcm-fiq.c -- ALSA Soc Audio Layer 3 // 4 // Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> 5 // 6 // This code is based on code copyrighted by Freescale, 7 // Liam Girdwood, Javier Martin and probably others. 8 9 #include <linux/clk.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 19 #include <sound/core.h> 20 #include <sound/dmaengine_pcm.h> 21 #include <sound/initval.h> 22 #include <sound/pcm.h> 23 #include <sound/pcm_params.h> 24 #include <sound/soc.h> 25 26 #include <asm/fiq.h> 27 28 #include <linux/platform_data/asoc-imx-ssi.h> 29 30 #include "imx-ssi.h" 31 #include "imx-pcm.h" 32 33 struct imx_pcm_runtime_data { 34 unsigned int period; 35 int periods; 36 unsigned long offset; 37 struct hrtimer hrt; 38 int poll_time_ns; 39 struct snd_pcm_substream *substream; 40 atomic_t playing; 41 atomic_t capturing; 42 }; 43 44 static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) 45 { 46 struct imx_pcm_runtime_data *iprtd = 47 container_of(hrt, struct imx_pcm_runtime_data, hrt); 48 struct snd_pcm_substream *substream = iprtd->substream; 49 struct pt_regs regs; 50 51 if (!atomic_read(&iprtd->playing) && !atomic_read(&iprtd->capturing)) 52 return HRTIMER_NORESTART; 53 54 get_fiq_regs(®s); 55 56 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 57 iprtd->offset = regs.ARM_r8 & 0xffff; 58 else 59 iprtd->offset = regs.ARM_r9 & 0xffff; 60 61 snd_pcm_period_elapsed(substream); 62 63 hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns)); 64 65 return HRTIMER_RESTART; 66 } 67 68 static struct fiq_handler fh = { 69 .name = DRV_NAME, 70 }; 71 72 static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, 73 struct snd_pcm_hw_params *params) 74 { 75 struct snd_pcm_runtime *runtime = substream->runtime; 76 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 77 78 iprtd->periods = params_periods(params); 79 iprtd->period = params_period_bytes(params); 80 iprtd->offset = 0; 81 iprtd->poll_time_ns = 1000000000 / params_rate(params) * 82 params_period_size(params); 83 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 84 85 return 0; 86 } 87 88 static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) 89 { 90 struct snd_pcm_runtime *runtime = substream->runtime; 91 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 92 struct pt_regs regs; 93 94 get_fiq_regs(®s); 95 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 96 regs.ARM_r8 = (iprtd->period * iprtd->periods - 1) << 16; 97 else 98 regs.ARM_r9 = (iprtd->period * iprtd->periods - 1) << 16; 99 100 set_fiq_regs(®s); 101 102 return 0; 103 } 104 105 static int imx_pcm_fiq; 106 107 static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 108 { 109 struct snd_pcm_runtime *runtime = substream->runtime; 110 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 111 112 switch (cmd) { 113 case SNDRV_PCM_TRIGGER_START: 114 case SNDRV_PCM_TRIGGER_RESUME: 115 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 116 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 117 atomic_set(&iprtd->playing, 1); 118 else 119 atomic_set(&iprtd->capturing, 1); 120 hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns), 121 HRTIMER_MODE_REL); 122 enable_fiq(imx_pcm_fiq); 123 break; 124 125 case SNDRV_PCM_TRIGGER_STOP: 126 case SNDRV_PCM_TRIGGER_SUSPEND: 127 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 128 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 129 atomic_set(&iprtd->playing, 0); 130 else 131 atomic_set(&iprtd->capturing, 0); 132 if (!atomic_read(&iprtd->playing) && 133 !atomic_read(&iprtd->capturing)) 134 disable_fiq(imx_pcm_fiq); 135 break; 136 137 default: 138 return -EINVAL; 139 } 140 141 return 0; 142 } 143 144 static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream) 145 { 146 struct snd_pcm_runtime *runtime = substream->runtime; 147 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 148 149 return bytes_to_frames(substream->runtime, iprtd->offset); 150 } 151 152 static const struct snd_pcm_hardware snd_imx_hardware = { 153 .info = SNDRV_PCM_INFO_INTERLEAVED | 154 SNDRV_PCM_INFO_BLOCK_TRANSFER | 155 SNDRV_PCM_INFO_MMAP | 156 SNDRV_PCM_INFO_MMAP_VALID | 157 SNDRV_PCM_INFO_PAUSE | 158 SNDRV_PCM_INFO_RESUME, 159 .formats = SNDRV_PCM_FMTBIT_S16_LE, 160 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, 161 .period_bytes_min = 128, 162 .period_bytes_max = 16 * 1024, 163 .periods_min = 4, 164 .periods_max = 255, 165 .fifo_size = 0, 166 }; 167 168 static int snd_imx_open(struct snd_pcm_substream *substream) 169 { 170 struct snd_pcm_runtime *runtime = substream->runtime; 171 struct imx_pcm_runtime_data *iprtd; 172 int ret; 173 174 iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL); 175 if (iprtd == NULL) 176 return -ENOMEM; 177 runtime->private_data = iprtd; 178 179 iprtd->substream = substream; 180 181 atomic_set(&iprtd->playing, 0); 182 atomic_set(&iprtd->capturing, 0); 183 hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 184 iprtd->hrt.function = snd_hrtimer_callback; 185 186 ret = snd_pcm_hw_constraint_integer(substream->runtime, 187 SNDRV_PCM_HW_PARAM_PERIODS); 188 if (ret < 0) { 189 kfree(iprtd); 190 return ret; 191 } 192 193 snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware); 194 return 0; 195 } 196 197 static int snd_imx_close(struct snd_pcm_substream *substream) 198 { 199 struct snd_pcm_runtime *runtime = substream->runtime; 200 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 201 202 hrtimer_cancel(&iprtd->hrt); 203 204 kfree(iprtd); 205 206 return 0; 207 } 208 209 static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream, 210 struct vm_area_struct *vma) 211 { 212 struct snd_pcm_runtime *runtime = substream->runtime; 213 int ret; 214 215 ret = dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area, 216 runtime->dma_addr, runtime->dma_bytes); 217 218 pr_debug("%s: ret: %d %p %pad 0x%08zx\n", __func__, ret, 219 runtime->dma_area, 220 &runtime->dma_addr, 221 runtime->dma_bytes); 222 return ret; 223 } 224 225 static const struct snd_pcm_ops imx_pcm_ops = { 226 .open = snd_imx_open, 227 .close = snd_imx_close, 228 .ioctl = snd_pcm_lib_ioctl, 229 .hw_params = snd_imx_pcm_hw_params, 230 .prepare = snd_imx_pcm_prepare, 231 .trigger = snd_imx_pcm_trigger, 232 .pointer = snd_imx_pcm_pointer, 233 .mmap = snd_imx_pcm_mmap, 234 }; 235 236 static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) 237 { 238 struct snd_pcm_substream *substream = pcm->streams[stream].substream; 239 struct snd_dma_buffer *buf = &substream->dma_buffer; 240 size_t size = IMX_SSI_DMABUF_SIZE; 241 242 buf->dev.type = SNDRV_DMA_TYPE_DEV; 243 buf->dev.dev = pcm->card->dev; 244 buf->private_data = NULL; 245 buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL); 246 if (!buf->area) 247 return -ENOMEM; 248 buf->bytes = size; 249 250 return 0; 251 } 252 253 static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd) 254 { 255 struct snd_card *card = rtd->card->snd_card; 256 struct snd_pcm *pcm = rtd->pcm; 257 int ret; 258 259 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); 260 if (ret) 261 return ret; 262 263 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 264 ret = imx_pcm_preallocate_dma_buffer(pcm, 265 SNDRV_PCM_STREAM_PLAYBACK); 266 if (ret) 267 return ret; 268 } 269 270 if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { 271 ret = imx_pcm_preallocate_dma_buffer(pcm, 272 SNDRV_PCM_STREAM_CAPTURE); 273 if (ret) 274 return ret; 275 } 276 277 return 0; 278 } 279 280 static int ssi_irq; 281 282 static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd) 283 { 284 struct snd_pcm *pcm = rtd->pcm; 285 struct snd_pcm_substream *substream; 286 int ret; 287 288 ret = imx_pcm_new(rtd); 289 if (ret) 290 return ret; 291 292 substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; 293 if (substream) { 294 struct snd_dma_buffer *buf = &substream->dma_buffer; 295 296 imx_ssi_fiq_tx_buffer = (unsigned long)buf->area; 297 } 298 299 substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; 300 if (substream) { 301 struct snd_dma_buffer *buf = &substream->dma_buffer; 302 303 imx_ssi_fiq_rx_buffer = (unsigned long)buf->area; 304 } 305 306 set_fiq_handler(&imx_ssi_fiq_start, 307 &imx_ssi_fiq_end - &imx_ssi_fiq_start); 308 309 return 0; 310 } 311 312 static void imx_pcm_free(struct snd_pcm *pcm) 313 { 314 struct snd_pcm_substream *substream; 315 struct snd_dma_buffer *buf; 316 int stream; 317 318 for (stream = 0; stream < 2; stream++) { 319 substream = pcm->streams[stream].substream; 320 if (!substream) 321 continue; 322 323 buf = &substream->dma_buffer; 324 if (!buf->area) 325 continue; 326 327 dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr); 328 buf->area = NULL; 329 } 330 } 331 332 static void imx_pcm_fiq_free(struct snd_pcm *pcm) 333 { 334 mxc_set_irq_fiq(ssi_irq, 0); 335 release_fiq(&fh); 336 imx_pcm_free(pcm); 337 } 338 339 static const struct snd_soc_component_driver imx_soc_component_fiq = { 340 .ops = &imx_pcm_ops, 341 .pcm_new = imx_pcm_fiq_new, 342 .pcm_free = imx_pcm_fiq_free, 343 }; 344 345 int imx_pcm_fiq_init(struct platform_device *pdev, 346 struct imx_pcm_fiq_params *params) 347 { 348 int ret; 349 350 ret = claim_fiq(&fh); 351 if (ret) { 352 dev_err(&pdev->dev, "failed to claim fiq: %d", ret); 353 return ret; 354 } 355 356 mxc_set_irq_fiq(params->irq, 1); 357 ssi_irq = params->irq; 358 359 imx_pcm_fiq = params->irq; 360 361 imx_ssi_fiq_base = (unsigned long)params->base; 362 363 params->dma_params_tx->maxburst = 4; 364 params->dma_params_rx->maxburst = 6; 365 366 ret = devm_snd_soc_register_component(&pdev->dev, &imx_soc_component_fiq, 367 NULL, 0); 368 if (ret) 369 goto failed_register; 370 371 return 0; 372 373 failed_register: 374 mxc_set_irq_fiq(ssi_irq, 0); 375 release_fiq(&fh); 376 377 return ret; 378 } 379 EXPORT_SYMBOL_GPL(imx_pcm_fiq_init); 380 381 void imx_pcm_fiq_exit(struct platform_device *pdev) 382 { 383 } 384 EXPORT_SYMBOL_GPL(imx_pcm_fiq_exit); 385 386 MODULE_LICENSE("GPL"); 387