1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 // 3 // Copyright (c) 2018 BayLibre, SAS. 4 // Author: Jerome Brunet <jbrunet@baylibre.com> 5 6 #include <linux/bitfield.h> 7 #include <linux/clk.h> 8 #include <linux/of_irq.h> 9 #include <linux/of_platform.h> 10 #include <linux/module.h> 11 #include <linux/regmap.h> 12 #include <linux/reset.h> 13 #include <sound/pcm_params.h> 14 #include <sound/soc.h> 15 #include <sound/soc-dai.h> 16 17 #include "axg-fifo.h" 18 19 /* 20 * This file implements the platform operations common to the playback and 21 * capture frontend DAI. The logic behind this two types of fifo is very 22 * similar but some difference exist. 23 * These differences are handled in the respective DAI drivers 24 */ 25 26 static struct snd_pcm_hardware axg_fifo_hw = { 27 .info = (SNDRV_PCM_INFO_INTERLEAVED | 28 SNDRV_PCM_INFO_MMAP | 29 SNDRV_PCM_INFO_MMAP_VALID | 30 SNDRV_PCM_INFO_BLOCK_TRANSFER | 31 SNDRV_PCM_INFO_PAUSE | 32 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP), 33 .formats = AXG_FIFO_FORMATS, 34 .rate_min = 5512, 35 .rate_max = 192000, 36 .channels_min = 1, 37 .channels_max = AXG_FIFO_CH_MAX, 38 .period_bytes_min = AXG_FIFO_BURST, 39 .period_bytes_max = UINT_MAX, 40 .periods_min = 2, 41 .periods_max = UINT_MAX, 42 43 /* No real justification for this */ 44 .buffer_bytes_max = 1 * 1024 * 1024, 45 }; 46 47 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss) 48 { 49 struct snd_soc_pcm_runtime *rtd = ss->private_data; 50 51 return asoc_rtd_to_cpu(rtd, 0); 52 } 53 54 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss) 55 { 56 struct snd_soc_dai *dai = axg_fifo_dai(ss); 57 58 return snd_soc_dai_get_drvdata(dai); 59 } 60 61 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss) 62 { 63 struct snd_soc_dai *dai = axg_fifo_dai(ss); 64 65 return dai->dev; 66 } 67 68 static void __dma_enable(struct axg_fifo *fifo, bool enable) 69 { 70 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN, 71 enable ? CTRL0_DMA_EN : 0); 72 } 73 74 int axg_fifo_pcm_trigger(struct snd_soc_component *component, 75 struct snd_pcm_substream *ss, int cmd) 76 { 77 struct axg_fifo *fifo = axg_fifo_data(ss); 78 79 switch (cmd) { 80 case SNDRV_PCM_TRIGGER_START: 81 case SNDRV_PCM_TRIGGER_RESUME: 82 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 83 __dma_enable(fifo, true); 84 break; 85 case SNDRV_PCM_TRIGGER_SUSPEND: 86 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 87 case SNDRV_PCM_TRIGGER_STOP: 88 __dma_enable(fifo, false); 89 break; 90 default: 91 return -EINVAL; 92 } 93 94 return 0; 95 } 96 EXPORT_SYMBOL_GPL(axg_fifo_pcm_trigger); 97 98 snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component, 99 struct snd_pcm_substream *ss) 100 { 101 struct axg_fifo *fifo = axg_fifo_data(ss); 102 struct snd_pcm_runtime *runtime = ss->runtime; 103 unsigned int addr; 104 105 regmap_read(fifo->map, FIFO_STATUS2, &addr); 106 107 return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr); 108 } 109 EXPORT_SYMBOL_GPL(axg_fifo_pcm_pointer); 110 111 int axg_fifo_pcm_hw_params(struct snd_soc_component *component, 112 struct snd_pcm_substream *ss, 113 struct snd_pcm_hw_params *params) 114 { 115 struct snd_pcm_runtime *runtime = ss->runtime; 116 struct axg_fifo *fifo = axg_fifo_data(ss); 117 unsigned int burst_num, period, threshold, irq_en; 118 dma_addr_t end_ptr; 119 120 period = params_period_bytes(params); 121 122 /* Setup dma memory pointers */ 123 end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST; 124 regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr); 125 regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr); 126 127 /* Setup interrupt periodicity */ 128 burst_num = period / AXG_FIFO_BURST; 129 regmap_write(fifo->map, FIFO_INT_ADDR, burst_num); 130 131 /* 132 * Start the fifo request on the smallest of the following: 133 * - Half the fifo size 134 * - Half the period size 135 */ 136 threshold = min(period / 2, fifo->depth / 2); 137 138 /* 139 * With the threshold in bytes, register value is: 140 * V = (threshold / burst) - 1 141 */ 142 threshold /= AXG_FIFO_BURST; 143 regmap_field_write(fifo->field_threshold, 144 threshold ? threshold - 1 : 0); 145 146 /* Enable irq if necessary */ 147 irq_en = runtime->no_period_wakeup ? 0 : FIFO_INT_COUNT_REPEAT; 148 regmap_update_bits(fifo->map, FIFO_CTRL0, 149 CTRL0_INT_EN, 150 FIELD_PREP(CTRL0_INT_EN, irq_en)); 151 152 return 0; 153 } 154 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_params); 155 156 int g12a_fifo_pcm_hw_params(struct snd_soc_component *component, 157 struct snd_pcm_substream *ss, 158 struct snd_pcm_hw_params *params) 159 { 160 struct axg_fifo *fifo = axg_fifo_data(ss); 161 struct snd_pcm_runtime *runtime = ss->runtime; 162 int ret; 163 164 ret = axg_fifo_pcm_hw_params(component, ss, params); 165 if (ret) 166 return ret; 167 168 /* Set the initial memory address of the DMA */ 169 regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr); 170 171 return 0; 172 } 173 EXPORT_SYMBOL_GPL(g12a_fifo_pcm_hw_params); 174 175 int axg_fifo_pcm_hw_free(struct snd_soc_component *component, 176 struct snd_pcm_substream *ss) 177 { 178 struct axg_fifo *fifo = axg_fifo_data(ss); 179 180 /* Disable irqs */ 181 regmap_update_bits(fifo->map, FIFO_CTRL0, 182 CTRL0_INT_EN, 0); 183 184 return 0; 185 } 186 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free); 187 188 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask) 189 { 190 regmap_update_bits(fifo->map, FIFO_CTRL1, 191 CTRL1_INT_CLR, 192 FIELD_PREP(CTRL1_INT_CLR, mask)); 193 194 /* Clear must also be cleared */ 195 regmap_update_bits(fifo->map, FIFO_CTRL1, 196 CTRL1_INT_CLR, 197 FIELD_PREP(CTRL1_INT_CLR, 0)); 198 } 199 200 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id) 201 { 202 struct snd_pcm_substream *ss = dev_id; 203 struct axg_fifo *fifo = axg_fifo_data(ss); 204 unsigned int status; 205 206 regmap_read(fifo->map, FIFO_STATUS1, &status); 207 status = FIELD_GET(STATUS1_INT_STS, status); 208 axg_fifo_ack_irq(fifo, status); 209 210 /* Use the thread to call period elapsed on nonatomic links */ 211 if (status & FIFO_INT_COUNT_REPEAT) 212 return IRQ_WAKE_THREAD; 213 214 dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", 215 status); 216 217 return IRQ_NONE; 218 } 219 220 static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id) 221 { 222 struct snd_pcm_substream *ss = dev_id; 223 224 snd_pcm_period_elapsed(ss); 225 226 return IRQ_HANDLED; 227 } 228 229 int axg_fifo_pcm_open(struct snd_soc_component *component, 230 struct snd_pcm_substream *ss) 231 { 232 struct axg_fifo *fifo = axg_fifo_data(ss); 233 struct device *dev = axg_fifo_dev(ss); 234 int ret; 235 236 snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw); 237 238 /* 239 * Make sure the buffer and period size are multiple of the FIFO 240 * burst 241 */ 242 ret = snd_pcm_hw_constraint_step(ss->runtime, 0, 243 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 244 AXG_FIFO_BURST); 245 if (ret) 246 return ret; 247 248 ret = snd_pcm_hw_constraint_step(ss->runtime, 0, 249 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 250 AXG_FIFO_BURST); 251 if (ret) 252 return ret; 253 254 ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block, 255 axg_fifo_pcm_irq_block_thread, 256 IRQF_ONESHOT, dev_name(dev), ss); 257 if (ret) 258 return ret; 259 260 /* Enable pclk to access registers and clock the fifo ip */ 261 ret = clk_prepare_enable(fifo->pclk); 262 if (ret) 263 goto free_irq; 264 265 /* Setup status2 so it reports the memory pointer */ 266 regmap_update_bits(fifo->map, FIFO_CTRL1, 267 CTRL1_STATUS2_SEL, 268 FIELD_PREP(CTRL1_STATUS2_SEL, STATUS2_SEL_DDR_READ)); 269 270 /* Make sure the dma is initially disabled */ 271 __dma_enable(fifo, false); 272 273 /* Disable irqs until params are ready */ 274 regmap_update_bits(fifo->map, FIFO_CTRL0, 275 CTRL0_INT_EN, 0); 276 277 /* Clear any pending interrupt */ 278 axg_fifo_ack_irq(fifo, FIFO_INT_MASK); 279 280 /* Take memory arbitror out of reset */ 281 ret = reset_control_deassert(fifo->arb); 282 if (ret) 283 goto free_clk; 284 285 return 0; 286 287 free_clk: 288 clk_disable_unprepare(fifo->pclk); 289 free_irq: 290 free_irq(fifo->irq, ss); 291 return ret; 292 } 293 EXPORT_SYMBOL_GPL(axg_fifo_pcm_open); 294 295 int axg_fifo_pcm_close(struct snd_soc_component *component, 296 struct snd_pcm_substream *ss) 297 { 298 struct axg_fifo *fifo = axg_fifo_data(ss); 299 int ret; 300 301 /* Put the memory arbitror back in reset */ 302 ret = reset_control_assert(fifo->arb); 303 304 /* Disable fifo ip and register access */ 305 clk_disable_unprepare(fifo->pclk); 306 307 /* remove IRQ */ 308 free_irq(fifo->irq, ss); 309 310 return ret; 311 } 312 EXPORT_SYMBOL_GPL(axg_fifo_pcm_close); 313 314 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type) 315 { 316 struct snd_card *card = rtd->card->snd_card; 317 size_t size = axg_fifo_hw.buffer_bytes_max; 318 319 snd_pcm_set_managed_buffer(rtd->pcm->streams[type].substream, 320 SNDRV_DMA_TYPE_DEV, card->dev, 321 size, size); 322 return 0; 323 } 324 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new); 325 326 static const struct regmap_config axg_fifo_regmap_cfg = { 327 .reg_bits = 32, 328 .val_bits = 32, 329 .reg_stride = 4, 330 .max_register = FIFO_CTRL2, 331 }; 332 333 int axg_fifo_probe(struct platform_device *pdev) 334 { 335 struct device *dev = &pdev->dev; 336 const struct axg_fifo_match_data *data; 337 struct axg_fifo *fifo; 338 void __iomem *regs; 339 int ret; 340 341 data = of_device_get_match_data(dev); 342 if (!data) { 343 dev_err(dev, "failed to match device\n"); 344 return -ENODEV; 345 } 346 347 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 348 if (!fifo) 349 return -ENOMEM; 350 platform_set_drvdata(pdev, fifo); 351 352 regs = devm_platform_ioremap_resource(pdev, 0); 353 if (IS_ERR(regs)) 354 return PTR_ERR(regs); 355 356 fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg); 357 if (IS_ERR(fifo->map)) { 358 dev_err(dev, "failed to init regmap: %ld\n", 359 PTR_ERR(fifo->map)); 360 return PTR_ERR(fifo->map); 361 } 362 363 fifo->pclk = devm_clk_get(dev, NULL); 364 if (IS_ERR(fifo->pclk)) 365 return dev_err_probe(dev, PTR_ERR(fifo->pclk), "failed to get pclk\n"); 366 367 fifo->arb = devm_reset_control_get_exclusive(dev, NULL); 368 if (IS_ERR(fifo->arb)) 369 return dev_err_probe(dev, PTR_ERR(fifo->arb), "failed to get arb reset\n"); 370 371 fifo->irq = of_irq_get(dev->of_node, 0); 372 if (fifo->irq <= 0) { 373 dev_err(dev, "failed to get irq: %d\n", fifo->irq); 374 return fifo->irq; 375 } 376 377 fifo->field_threshold = 378 devm_regmap_field_alloc(dev, fifo->map, data->field_threshold); 379 if (IS_ERR(fifo->field_threshold)) 380 return PTR_ERR(fifo->field_threshold); 381 382 ret = of_property_read_u32(dev->of_node, "amlogic,fifo-depth", 383 &fifo->depth); 384 if (ret) { 385 /* Error out for anything but a missing property */ 386 if (ret != -EINVAL) 387 return ret; 388 /* 389 * If the property is missing, it might be because of an old 390 * DT. In such case, assume the smallest known fifo depth 391 */ 392 fifo->depth = 256; 393 dev_warn(dev, "fifo depth not found, assume %u bytes\n", 394 fifo->depth); 395 } 396 397 return devm_snd_soc_register_component(dev, data->component_drv, 398 data->dai_drv, 1); 399 } 400 EXPORT_SYMBOL_GPL(axg_fifo_probe); 401 402 MODULE_DESCRIPTION("Amlogic AXG/G12A fifo driver"); 403 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); 404 MODULE_LICENSE("GPL v2"); 405