1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Socionext UniPhier AIO DMA driver. 4 // 5 // Copyright (c) 2016-2018 Socionext Inc. 6 7 #include <linux/dma-mapping.h> 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <sound/core.h> 12 #include <sound/pcm.h> 13 #include <sound/soc.h> 14 15 #include "aio.h" 16 17 static struct snd_pcm_hardware uniphier_aiodma_hw = { 18 .info = SNDRV_PCM_INFO_MMAP | 19 SNDRV_PCM_INFO_MMAP_VALID | 20 SNDRV_PCM_INFO_INTERLEAVED, 21 .period_bytes_min = 256, 22 .period_bytes_max = 4096, 23 .periods_min = 4, 24 .periods_max = 1024, 25 .buffer_bytes_max = 128 * 1024, 26 }; 27 28 static void aiodma_pcm_irq(struct uniphier_aio_sub *sub) 29 { 30 struct snd_pcm_runtime *runtime = sub->substream->runtime; 31 int bytes = runtime->period_size * 32 runtime->channels * samples_to_bytes(runtime, 1); 33 int ret; 34 35 spin_lock(&sub->lock); 36 ret = aiodma_rb_set_threshold(sub, runtime->dma_bytes, 37 sub->threshold + bytes); 38 if (!ret) 39 sub->threshold += bytes; 40 41 aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes); 42 aiodma_rb_clear_irq(sub); 43 spin_unlock(&sub->lock); 44 45 snd_pcm_period_elapsed(sub->substream); 46 } 47 48 static void aiodma_compr_irq(struct uniphier_aio_sub *sub) 49 { 50 struct snd_compr_runtime *runtime = sub->cstream->runtime; 51 int bytes = runtime->fragment_size; 52 int ret; 53 54 spin_lock(&sub->lock); 55 ret = aiodma_rb_set_threshold(sub, sub->compr_bytes, 56 sub->threshold + bytes); 57 if (!ret) 58 sub->threshold += bytes; 59 60 aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes); 61 aiodma_rb_clear_irq(sub); 62 spin_unlock(&sub->lock); 63 64 snd_compr_fragment_elapsed(sub->cstream); 65 } 66 67 static irqreturn_t aiodma_irq(int irq, void *p) 68 { 69 struct platform_device *pdev = p; 70 struct uniphier_aio_chip *chip = platform_get_drvdata(pdev); 71 irqreturn_t ret = IRQ_NONE; 72 int i, j; 73 74 for (i = 0; i < chip->num_aios; i++) { 75 struct uniphier_aio *aio = &chip->aios[i]; 76 77 for (j = 0; j < ARRAY_SIZE(aio->sub); j++) { 78 struct uniphier_aio_sub *sub = &aio->sub[j]; 79 80 /* Skip channel that does not trigger */ 81 if (!sub->running || !aiodma_rb_is_irq(sub)) 82 continue; 83 84 if (sub->substream) 85 aiodma_pcm_irq(sub); 86 if (sub->cstream) 87 aiodma_compr_irq(sub); 88 89 ret = IRQ_HANDLED; 90 } 91 } 92 93 return ret; 94 } 95 96 static int uniphier_aiodma_open(struct snd_soc_component *component, 97 struct snd_pcm_substream *substream) 98 { 99 struct snd_pcm_runtime *runtime = substream->runtime; 100 101 snd_soc_set_runtime_hwparams(substream, &uniphier_aiodma_hw); 102 103 return snd_pcm_hw_constraint_step(runtime, 0, 104 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256); 105 } 106 107 static int uniphier_aiodma_hw_params(struct snd_soc_component *component, 108 struct snd_pcm_substream *substream, 109 struct snd_pcm_hw_params *params) 110 { 111 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 112 substream->runtime->dma_bytes = params_buffer_bytes(params); 113 114 return 0; 115 } 116 117 static int uniphier_aiodma_hw_free(struct snd_soc_component *component, 118 struct snd_pcm_substream *substream) 119 { 120 snd_pcm_set_runtime_buffer(substream, NULL); 121 substream->runtime->dma_bytes = 0; 122 123 return 0; 124 } 125 126 static int uniphier_aiodma_prepare(struct snd_soc_component *component, 127 struct snd_pcm_substream *substream) 128 { 129 struct snd_pcm_runtime *runtime = substream->runtime; 130 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); 131 struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai); 132 struct uniphier_aio_sub *sub = &aio->sub[substream->stream]; 133 int bytes = runtime->period_size * 134 runtime->channels * samples_to_bytes(runtime, 1); 135 unsigned long flags; 136 int ret; 137 138 ret = aiodma_ch_set_param(sub); 139 if (ret) 140 return ret; 141 142 spin_lock_irqsave(&sub->lock, flags); 143 ret = aiodma_rb_set_buffer(sub, runtime->dma_addr, 144 runtime->dma_addr + runtime->dma_bytes, 145 bytes); 146 spin_unlock_irqrestore(&sub->lock, flags); 147 if (ret) 148 return ret; 149 150 return 0; 151 } 152 153 static int uniphier_aiodma_trigger(struct snd_soc_component *component, 154 struct snd_pcm_substream *substream, int cmd) 155 { 156 struct snd_pcm_runtime *runtime = substream->runtime; 157 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); 158 struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai); 159 struct uniphier_aio_sub *sub = &aio->sub[substream->stream]; 160 struct device *dev = &aio->chip->pdev->dev; 161 int bytes = runtime->period_size * 162 runtime->channels * samples_to_bytes(runtime, 1); 163 unsigned long flags; 164 165 spin_lock_irqsave(&sub->lock, flags); 166 switch (cmd) { 167 case SNDRV_PCM_TRIGGER_START: 168 aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, 169 bytes); 170 aiodma_ch_set_enable(sub, 1); 171 sub->running = 1; 172 173 break; 174 case SNDRV_PCM_TRIGGER_STOP: 175 sub->running = 0; 176 aiodma_ch_set_enable(sub, 0); 177 178 break; 179 default: 180 dev_warn(dev, "Unknown trigger(%d) ignored\n", cmd); 181 break; 182 } 183 spin_unlock_irqrestore(&sub->lock, flags); 184 185 return 0; 186 } 187 188 static snd_pcm_uframes_t uniphier_aiodma_pointer( 189 struct snd_soc_component *component, 190 struct snd_pcm_substream *substream) 191 { 192 struct snd_pcm_runtime *runtime = substream->runtime; 193 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); 194 struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai); 195 struct uniphier_aio_sub *sub = &aio->sub[substream->stream]; 196 int bytes = runtime->period_size * 197 runtime->channels * samples_to_bytes(runtime, 1); 198 unsigned long flags; 199 snd_pcm_uframes_t pos; 200 201 spin_lock_irqsave(&sub->lock, flags); 202 aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes); 203 204 if (sub->swm->dir == PORT_DIR_OUTPUT) 205 pos = bytes_to_frames(runtime, sub->rd_offs); 206 else 207 pos = bytes_to_frames(runtime, sub->wr_offs); 208 spin_unlock_irqrestore(&sub->lock, flags); 209 210 return pos; 211 } 212 213 static int uniphier_aiodma_mmap(struct snd_soc_component *component, 214 struct snd_pcm_substream *substream, 215 struct vm_area_struct *vma) 216 { 217 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 218 219 return remap_pfn_range(vma, vma->vm_start, 220 substream->dma_buffer.addr >> PAGE_SHIFT, 221 vma->vm_end - vma->vm_start, vma->vm_page_prot); 222 } 223 224 static int uniphier_aiodma_new(struct snd_soc_component *component, 225 struct snd_soc_pcm_runtime *rtd) 226 { 227 struct device *dev = rtd->card->snd_card->dev; 228 struct snd_pcm *pcm = rtd->pcm; 229 int ret; 230 231 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(33)); 232 if (ret) 233 return ret; 234 235 snd_pcm_lib_preallocate_pages_for_all(pcm, 236 SNDRV_DMA_TYPE_DEV, dev, 237 uniphier_aiodma_hw.buffer_bytes_max, 238 uniphier_aiodma_hw.buffer_bytes_max); 239 return 0; 240 } 241 242 static void uniphier_aiodma_free(struct snd_soc_component *component, 243 struct snd_pcm *pcm) 244 { 245 snd_pcm_lib_preallocate_free_for_all(pcm); 246 } 247 248 static const struct snd_soc_component_driver uniphier_soc_platform = { 249 .open = uniphier_aiodma_open, 250 .ioctl = snd_soc_pcm_lib_ioctl, 251 .hw_params = uniphier_aiodma_hw_params, 252 .hw_free = uniphier_aiodma_hw_free, 253 .prepare = uniphier_aiodma_prepare, 254 .trigger = uniphier_aiodma_trigger, 255 .pointer = uniphier_aiodma_pointer, 256 .mmap = uniphier_aiodma_mmap, 257 .pcm_construct = uniphier_aiodma_new, 258 .pcm_destruct = uniphier_aiodma_free, 259 .compr_ops = &uniphier_aio_compr_ops, 260 }; 261 262 static const struct regmap_config aiodma_regmap_config = { 263 .reg_bits = 32, 264 .reg_stride = 4, 265 .val_bits = 32, 266 .max_register = 0x7fffc, 267 .cache_type = REGCACHE_NONE, 268 }; 269 270 /** 271 * uniphier_aiodma_soc_register_platform - register the AIO DMA 272 * @pdev: the platform device 273 * 274 * Register and setup the DMA of AIO to transfer the sound data to device. 275 * This function need to call once at driver startup and need NOT to call 276 * unregister function. 277 * 278 * Return: Zero if successful, otherwise a negative value on error. 279 */ 280 int uniphier_aiodma_soc_register_platform(struct platform_device *pdev) 281 { 282 struct uniphier_aio_chip *chip = platform_get_drvdata(pdev); 283 struct device *dev = &pdev->dev; 284 void __iomem *preg; 285 int irq, ret; 286 287 preg = devm_platform_ioremap_resource(pdev, 0); 288 if (IS_ERR(preg)) 289 return PTR_ERR(preg); 290 291 chip->regmap = devm_regmap_init_mmio(dev, preg, 292 &aiodma_regmap_config); 293 if (IS_ERR(chip->regmap)) 294 return PTR_ERR(chip->regmap); 295 296 irq = platform_get_irq(pdev, 0); 297 if (irq < 0) 298 return irq; 299 300 ret = devm_request_irq(dev, irq, aiodma_irq, 301 IRQF_SHARED, dev_name(dev), pdev); 302 if (ret) 303 return ret; 304 305 return devm_snd_soc_register_component(dev, &uniphier_soc_platform, 306 NULL, 0); 307 } 308 EXPORT_SYMBOL_GPL(uniphier_aiodma_soc_register_platform); 309