1 /* 2 * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar 3 * 4 * Based on original driver by Krzysztof Ha?asa: 5 * Copyright (C) 2015 Industrial Research Institute for Automation 6 * and Measurements PIAP 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of version 2 of the GNU General Public License 10 * as published by the Free Software Foundation. 11 * 12 * Notes 13 * ----- 14 * 15 * 1. Under stress-testing, it has been observed that the PCIe link 16 * goes down, without reason. Therefore, the driver takes special care 17 * to allow device hot-unplugging. 18 * 19 * 2. TW686X devices are capable of setting a few different DMA modes, 20 * including: scatter-gather, field and frame modes. However, 21 * under stress testings it has been found that the machine can 22 * freeze completely if DMA registers are programmed while streaming 23 * is active. 24 * 25 * Therefore, driver implements a dma_mode called 'memcpy' which 26 * avoids cycling the DMA buffers, and insteads allocates extra DMA buffers 27 * and then copies into vmalloc'ed user buffers. 28 * 29 * In addition to this, when streaming is on, the driver tries to access 30 * hardware registers as infrequently as possible. This is done by using 31 * a timer to limit the rate at which DMA is reset on DMA channels error. 32 */ 33 34 #include <linux/init.h> 35 #include <linux/interrupt.h> 36 #include <linux/delay.h> 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/pci_ids.h> 40 #include <linux/slab.h> 41 #include <linux/timer.h> 42 43 #include "tw686x.h" 44 #include "tw686x-regs.h" 45 46 /* 47 * This module parameter allows to control the DMA_TIMER_INTERVAL value. 48 * The DMA_TIMER_INTERVAL register controls the minimum DMA interrupt 49 * time span (iow, the maximum DMA interrupt rate) thus allowing for 50 * IRQ coalescing. 51 * 52 * The chip datasheet does not mention a time unit for this value, so 53 * users wanting fine-grain control over the interrupt rate should 54 * determine the desired value through testing. 55 */ 56 static u32 dma_interval = 0x00098968; 57 module_param(dma_interval, int, 0444); 58 MODULE_PARM_DESC(dma_interval, "Minimum time span for DMA interrupting host"); 59 60 static unsigned int dma_mode = TW686X_DMA_MODE_MEMCPY; 61 static const char *dma_mode_name(unsigned int mode) 62 { 63 switch (mode) { 64 case TW686X_DMA_MODE_MEMCPY: 65 return "memcpy"; 66 case TW686X_DMA_MODE_CONTIG: 67 return "contig"; 68 case TW686X_DMA_MODE_SG: 69 return "sg"; 70 default: 71 return "unknown"; 72 } 73 } 74 75 static int tw686x_dma_mode_get(char *buffer, const struct kernel_param *kp) 76 { 77 return sprintf(buffer, "%s", dma_mode_name(dma_mode)); 78 } 79 80 static int tw686x_dma_mode_set(const char *val, const struct kernel_param *kp) 81 { 82 if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_MEMCPY))) 83 dma_mode = TW686X_DMA_MODE_MEMCPY; 84 else if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_CONTIG))) 85 dma_mode = TW686X_DMA_MODE_CONTIG; 86 else if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_SG))) 87 dma_mode = TW686X_DMA_MODE_SG; 88 else 89 return -EINVAL; 90 return 0; 91 } 92 module_param_call(dma_mode, tw686x_dma_mode_set, tw686x_dma_mode_get, 93 &dma_mode, S_IRUGO|S_IWUSR); 94 MODULE_PARM_DESC(dma_mode, "DMA operation mode (memcpy/contig/sg, default=memcpy)"); 95 96 void tw686x_disable_channel(struct tw686x_dev *dev, unsigned int channel) 97 { 98 u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE); 99 u32 dma_cmd = reg_read(dev, DMA_CMD); 100 101 dma_en &= ~BIT(channel); 102 dma_cmd &= ~BIT(channel); 103 104 /* Must remove it from pending too */ 105 dev->pending_dma_en &= ~BIT(channel); 106 dev->pending_dma_cmd &= ~BIT(channel); 107 108 /* Stop DMA if no channels are enabled */ 109 if (!dma_en) 110 dma_cmd = 0; 111 reg_write(dev, DMA_CHANNEL_ENABLE, dma_en); 112 reg_write(dev, DMA_CMD, dma_cmd); 113 } 114 115 void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel) 116 { 117 u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE); 118 u32 dma_cmd = reg_read(dev, DMA_CMD); 119 120 dev->pending_dma_en |= dma_en | BIT(channel); 121 dev->pending_dma_cmd |= dma_cmd | DMA_CMD_ENABLE | BIT(channel); 122 } 123 124 /* 125 * The purpose of this awful hack is to avoid enabling the DMA 126 * channels "too fast" which makes some TW686x devices very 127 * angry and freeze the CPU (see note 1). 128 */ 129 static void tw686x_dma_delay(struct timer_list *t) 130 { 131 struct tw686x_dev *dev = from_timer(dev, t, dma_delay_timer); 132 unsigned long flags; 133 134 spin_lock_irqsave(&dev->lock, flags); 135 136 reg_write(dev, DMA_CHANNEL_ENABLE, dev->pending_dma_en); 137 reg_write(dev, DMA_CMD, dev->pending_dma_cmd); 138 dev->pending_dma_en = 0; 139 dev->pending_dma_cmd = 0; 140 141 spin_unlock_irqrestore(&dev->lock, flags); 142 } 143 144 static void tw686x_reset_channels(struct tw686x_dev *dev, unsigned int ch_mask) 145 { 146 u32 dma_en, dma_cmd; 147 148 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE); 149 dma_cmd = reg_read(dev, DMA_CMD); 150 151 /* 152 * Save pending register status, the timer will 153 * restore them. 154 */ 155 dev->pending_dma_en |= dma_en; 156 dev->pending_dma_cmd |= dma_cmd; 157 158 /* Disable the reset channels */ 159 reg_write(dev, DMA_CHANNEL_ENABLE, dma_en & ~ch_mask); 160 161 if ((dma_en & ~ch_mask) == 0) { 162 dev_dbg(&dev->pci_dev->dev, "reset: stopping DMA\n"); 163 dma_cmd &= ~DMA_CMD_ENABLE; 164 } 165 reg_write(dev, DMA_CMD, dma_cmd & ~ch_mask); 166 } 167 168 static irqreturn_t tw686x_irq(int irq, void *dev_id) 169 { 170 struct tw686x_dev *dev = (struct tw686x_dev *)dev_id; 171 unsigned int video_requests, audio_requests, reset_ch; 172 u32 fifo_status, fifo_signal, fifo_ov, fifo_bad, fifo_errors; 173 u32 int_status, dma_en, video_en, pb_status; 174 unsigned long flags; 175 176 int_status = reg_read(dev, INT_STATUS); /* cleared on read */ 177 fifo_status = reg_read(dev, VIDEO_FIFO_STATUS); 178 179 /* INT_STATUS does not include FIFO_STATUS errors! */ 180 if (!int_status && !TW686X_FIFO_ERROR(fifo_status)) 181 return IRQ_NONE; 182 183 if (int_status & INT_STATUS_DMA_TOUT) { 184 dev_dbg(&dev->pci_dev->dev, 185 "DMA timeout. Resetting DMA for all channels\n"); 186 reset_ch = ~0; 187 goto reset_channels; 188 } 189 190 spin_lock_irqsave(&dev->lock, flags); 191 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE); 192 spin_unlock_irqrestore(&dev->lock, flags); 193 194 video_en = dma_en & 0xff; 195 fifo_signal = ~(fifo_status & 0xff) & video_en; 196 fifo_ov = fifo_status >> 24; 197 fifo_bad = fifo_status >> 16; 198 199 /* Mask of channels with signal and FIFO errors */ 200 fifo_errors = fifo_signal & (fifo_ov | fifo_bad); 201 202 reset_ch = 0; 203 pb_status = reg_read(dev, PB_STATUS); 204 205 /* Coalesce video frame/error events */ 206 video_requests = (int_status & video_en) | fifo_errors; 207 audio_requests = (int_status & dma_en) >> 8; 208 209 if (video_requests) 210 tw686x_video_irq(dev, video_requests, pb_status, 211 fifo_status, &reset_ch); 212 if (audio_requests) 213 tw686x_audio_irq(dev, audio_requests, pb_status); 214 215 reset_channels: 216 if (reset_ch) { 217 spin_lock_irqsave(&dev->lock, flags); 218 tw686x_reset_channels(dev, reset_ch); 219 spin_unlock_irqrestore(&dev->lock, flags); 220 mod_timer(&dev->dma_delay_timer, 221 jiffies + msecs_to_jiffies(100)); 222 } 223 224 return IRQ_HANDLED; 225 } 226 227 static void tw686x_dev_release(struct v4l2_device *v4l2_dev) 228 { 229 struct tw686x_dev *dev = container_of(v4l2_dev, struct tw686x_dev, 230 v4l2_dev); 231 unsigned int ch; 232 233 for (ch = 0; ch < max_channels(dev); ch++) 234 v4l2_ctrl_handler_free(&dev->video_channels[ch].ctrl_handler); 235 236 v4l2_device_unregister(&dev->v4l2_dev); 237 238 kfree(dev->audio_channels); 239 kfree(dev->video_channels); 240 kfree(dev); 241 } 242 243 static int tw686x_probe(struct pci_dev *pci_dev, 244 const struct pci_device_id *pci_id) 245 { 246 struct tw686x_dev *dev; 247 int err; 248 249 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 250 if (!dev) 251 return -ENOMEM; 252 dev->type = pci_id->driver_data; 253 dev->dma_mode = dma_mode; 254 sprintf(dev->name, "tw%04X", pci_dev->device); 255 256 dev->video_channels = kcalloc(max_channels(dev), 257 sizeof(*dev->video_channels), GFP_KERNEL); 258 if (!dev->video_channels) { 259 err = -ENOMEM; 260 goto free_dev; 261 } 262 263 dev->audio_channels = kcalloc(max_channels(dev), 264 sizeof(*dev->audio_channels), GFP_KERNEL); 265 if (!dev->audio_channels) { 266 err = -ENOMEM; 267 goto free_video; 268 } 269 270 pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx (%s mode)\n", dev->name, 271 pci_name(pci_dev), pci_dev->irq, 272 (unsigned long)pci_resource_start(pci_dev, 0), 273 dma_mode_name(dma_mode)); 274 275 dev->pci_dev = pci_dev; 276 if (pci_enable_device(pci_dev)) { 277 err = -EIO; 278 goto free_audio; 279 } 280 281 pci_set_master(pci_dev); 282 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); 283 if (err) { 284 dev_err(&pci_dev->dev, "32-bit PCI DMA not supported\n"); 285 err = -EIO; 286 goto disable_pci; 287 } 288 289 err = pci_request_regions(pci_dev, dev->name); 290 if (err) { 291 dev_err(&pci_dev->dev, "unable to request PCI region\n"); 292 goto disable_pci; 293 } 294 295 dev->mmio = pci_ioremap_bar(pci_dev, 0); 296 if (!dev->mmio) { 297 dev_err(&pci_dev->dev, "unable to remap PCI region\n"); 298 err = -ENOMEM; 299 goto free_region; 300 } 301 302 /* Reset all subsystems */ 303 reg_write(dev, SYS_SOFT_RST, 0x0f); 304 mdelay(1); 305 306 reg_write(dev, SRST[0], 0x3f); 307 if (max_channels(dev) > 4) 308 reg_write(dev, SRST[1], 0x3f); 309 310 /* Disable the DMA engine */ 311 reg_write(dev, DMA_CMD, 0); 312 reg_write(dev, DMA_CHANNEL_ENABLE, 0); 313 314 /* Enable DMA FIFO overflow and pointer check */ 315 reg_write(dev, DMA_CONFIG, 0xffffff04); 316 reg_write(dev, DMA_CHANNEL_TIMEOUT, 0x140c8584); 317 reg_write(dev, DMA_TIMER_INTERVAL, dma_interval); 318 319 spin_lock_init(&dev->lock); 320 321 err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED, 322 dev->name, dev); 323 if (err < 0) { 324 dev_err(&pci_dev->dev, "unable to request interrupt\n"); 325 goto iounmap; 326 } 327 328 timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0); 329 330 /* 331 * This must be set right before initializing v4l2_dev. 332 * It's used to release resources after the last handle 333 * held is released. 334 */ 335 dev->v4l2_dev.release = tw686x_dev_release; 336 err = tw686x_video_init(dev); 337 if (err) { 338 dev_err(&pci_dev->dev, "can't register video\n"); 339 goto free_irq; 340 } 341 342 err = tw686x_audio_init(dev); 343 if (err) 344 dev_warn(&pci_dev->dev, "can't register audio\n"); 345 346 pci_set_drvdata(pci_dev, dev); 347 return 0; 348 349 free_irq: 350 free_irq(pci_dev->irq, dev); 351 iounmap: 352 pci_iounmap(pci_dev, dev->mmio); 353 free_region: 354 pci_release_regions(pci_dev); 355 disable_pci: 356 pci_disable_device(pci_dev); 357 free_audio: 358 kfree(dev->audio_channels); 359 free_video: 360 kfree(dev->video_channels); 361 free_dev: 362 kfree(dev); 363 return err; 364 } 365 366 static void tw686x_remove(struct pci_dev *pci_dev) 367 { 368 struct tw686x_dev *dev = pci_get_drvdata(pci_dev); 369 unsigned long flags; 370 371 /* This guarantees the IRQ handler is no longer running, 372 * which means we can kiss good-bye some resources. 373 */ 374 free_irq(pci_dev->irq, dev); 375 376 tw686x_video_free(dev); 377 tw686x_audio_free(dev); 378 del_timer_sync(&dev->dma_delay_timer); 379 380 pci_iounmap(pci_dev, dev->mmio); 381 pci_release_regions(pci_dev); 382 pci_disable_device(pci_dev); 383 384 /* 385 * Setting pci_dev to NULL allows to detect hardware is no longer 386 * available and will be used by vb2_ops. This is required because 387 * the device sometimes hot-unplugs itself as the result of a PCIe 388 * link down. 389 * The lock is really important here. 390 */ 391 spin_lock_irqsave(&dev->lock, flags); 392 dev->pci_dev = NULL; 393 spin_unlock_irqrestore(&dev->lock, flags); 394 395 /* 396 * This calls tw686x_dev_release if it's the last reference. 397 * Otherwise, release is postponed until there are no users left. 398 */ 399 v4l2_device_put(&dev->v4l2_dev); 400 } 401 402 /* 403 * On TW6864 and TW6868, all channels share the pair of video DMA SG tables, 404 * with 10-bit start_idx and end_idx determining start and end of frame buffer 405 * for particular channel. 406 * TW6868 with all its 8 channels would be problematic (only 127 SG entries per 407 * channel) but we support only 4 channels on this chip anyway (the first 408 * 4 channels are driven with internal video decoder, the other 4 would require 409 * an external TW286x part). 410 * 411 * On TW6865 and TW6869, each channel has its own DMA SG table, with indexes 412 * starting with 0. Both chips have complete sets of internal video decoders 413 * (respectively 4 or 8-channel). 414 * 415 * All chips have separate SG tables for two video frames. 416 */ 417 418 /* driver_data is number of A/V channels */ 419 static const struct pci_device_id tw686x_pci_tbl[] = { 420 { 421 PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6864), 422 .driver_data = 4 423 }, 424 { 425 PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6865), /* not tested */ 426 .driver_data = 4 | TYPE_SECOND_GEN 427 }, 428 /* 429 * TW6868 supports 8 A/V channels with an external TW2865 chip; 430 * not supported by the driver. 431 */ 432 { 433 PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6868), /* not tested */ 434 .driver_data = 4 435 }, 436 { 437 PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6869), 438 .driver_data = 8 | TYPE_SECOND_GEN}, 439 {} 440 }; 441 MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl); 442 443 static struct pci_driver tw686x_pci_driver = { 444 .name = "tw686x", 445 .id_table = tw686x_pci_tbl, 446 .probe = tw686x_probe, 447 .remove = tw686x_remove, 448 }; 449 module_pci_driver(tw686x_pci_driver); 450 451 MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]"); 452 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>"); 453 MODULE_AUTHOR("Krzysztof Ha?asa <khalasa@piap.pl>"); 454 MODULE_LICENSE("GPL v2"); 455