1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core 4 * 5 * Copyright (C) 2018 Jacob Feder 6 * 7 * Authors: Jacob Feder <jacobsfeder@gmail.com> 8 * 9 * See Xilinx PG080 document for IP details 10 */ 11 12 /* ---------------------------- 13 * includes 14 * ---------------------------- 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/wait.h> 19 #include <linux/mutex.h> 20 #include <linux/device.h> 21 #include <linux/cdev.h> 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/slab.h> 25 #include <linux/io.h> 26 #include <linux/moduleparam.h> 27 #include <linux/interrupt.h> 28 #include <linux/param.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/uaccess.h> 32 #include <linux/jiffies.h> 33 #include <linux/miscdevice.h> 34 35 #include <linux/of_address.h> 36 #include <linux/of_device.h> 37 #include <linux/of_platform.h> 38 39 /* ---------------------------- 40 * driver parameters 41 * ---------------------------- 42 */ 43 44 #define DRIVER_NAME "axis_fifo" 45 46 #define READ_BUF_SIZE 128U /* read buffer length in words */ 47 #define WRITE_BUF_SIZE 128U /* write buffer length in words */ 48 49 /* ---------------------------- 50 * IP register offsets 51 * ---------------------------- 52 */ 53 54 #define XLLF_ISR_OFFSET 0x00000000 /* Interrupt Status */ 55 #define XLLF_IER_OFFSET 0x00000004 /* Interrupt Enable */ 56 57 #define XLLF_TDFR_OFFSET 0x00000008 /* Transmit Reset */ 58 #define XLLF_TDFV_OFFSET 0x0000000c /* Transmit Vacancy */ 59 #define XLLF_TDFD_OFFSET 0x00000010 /* Transmit Data */ 60 #define XLLF_TLR_OFFSET 0x00000014 /* Transmit Length */ 61 62 #define XLLF_RDFR_OFFSET 0x00000018 /* Receive Reset */ 63 #define XLLF_RDFO_OFFSET 0x0000001c /* Receive Occupancy */ 64 #define XLLF_RDFD_OFFSET 0x00000020 /* Receive Data */ 65 #define XLLF_RLR_OFFSET 0x00000024 /* Receive Length */ 66 #define XLLF_SRR_OFFSET 0x00000028 /* Local Link Reset */ 67 #define XLLF_TDR_OFFSET 0x0000002C /* Transmit Destination */ 68 #define XLLF_RDR_OFFSET 0x00000030 /* Receive Destination */ 69 70 /* ---------------------------- 71 * reset register masks 72 * ---------------------------- 73 */ 74 75 #define XLLF_RDFR_RESET_MASK 0x000000a5 /* receive reset value */ 76 #define XLLF_TDFR_RESET_MASK 0x000000a5 /* Transmit reset value */ 77 #define XLLF_SRR_RESET_MASK 0x000000a5 /* Local Link reset value */ 78 79 /* ---------------------------- 80 * interrupt masks 81 * ---------------------------- 82 */ 83 84 #define XLLF_INT_RPURE_MASK 0x80000000 /* Receive under-read */ 85 #define XLLF_INT_RPORE_MASK 0x40000000 /* Receive over-read */ 86 #define XLLF_INT_RPUE_MASK 0x20000000 /* Receive underrun (empty) */ 87 #define XLLF_INT_TPOE_MASK 0x10000000 /* Transmit overrun */ 88 #define XLLF_INT_TC_MASK 0x08000000 /* Transmit complete */ 89 #define XLLF_INT_RC_MASK 0x04000000 /* Receive complete */ 90 #define XLLF_INT_TSE_MASK 0x02000000 /* Transmit length mismatch */ 91 #define XLLF_INT_TRC_MASK 0x01000000 /* Transmit reset complete */ 92 #define XLLF_INT_RRC_MASK 0x00800000 /* Receive reset complete */ 93 #define XLLF_INT_TFPF_MASK 0x00400000 /* Tx FIFO Programmable Full */ 94 #define XLLF_INT_TFPE_MASK 0x00200000 /* Tx FIFO Programmable Empty */ 95 #define XLLF_INT_RFPF_MASK 0x00100000 /* Rx FIFO Programmable Full */ 96 #define XLLF_INT_RFPE_MASK 0x00080000 /* Rx FIFO Programmable Empty */ 97 #define XLLF_INT_ALL_MASK 0xfff80000 /* All the ints */ 98 #define XLLF_INT_ERROR_MASK 0xf2000000 /* Error status ints */ 99 #define XLLF_INT_RXERROR_MASK 0xe0000000 /* Receive Error status ints */ 100 #define XLLF_INT_TXERROR_MASK 0x12000000 /* Transmit Error status ints */ 101 102 /* ---------------------------- 103 * globals 104 * ---------------------------- 105 */ 106 static long read_timeout = 1000; /* ms to wait before read() times out */ 107 static long write_timeout = 1000; /* ms to wait before write() times out */ 108 109 /* ---------------------------- 110 * module command-line arguments 111 * ---------------------------- 112 */ 113 114 module_param(read_timeout, long, 0444); 115 MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout"); 116 module_param(write_timeout, long, 0444); 117 MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout"); 118 119 /* ---------------------------- 120 * types 121 * ---------------------------- 122 */ 123 124 struct axis_fifo { 125 int irq; /* interrupt */ 126 void __iomem *base_addr; /* kernel space memory */ 127 128 unsigned int rx_fifo_depth; /* max words in the receive fifo */ 129 unsigned int tx_fifo_depth; /* max words in the transmit fifo */ 130 int has_rx_fifo; /* whether the IP has the rx fifo enabled */ 131 int has_tx_fifo; /* whether the IP has the tx fifo enabled */ 132 133 wait_queue_head_t read_queue; /* wait queue for asynchronos read */ 134 struct mutex read_lock; /* lock for reading */ 135 wait_queue_head_t write_queue; /* wait queue for asynchronos write */ 136 struct mutex write_lock; /* lock for writing */ 137 unsigned int write_flags; /* write file flags */ 138 unsigned int read_flags; /* read file flags */ 139 140 struct device *dt_device; /* device created from the device tree */ 141 struct miscdevice miscdev; 142 }; 143 144 /* ---------------------------- 145 * sysfs entries 146 * ---------------------------- 147 */ 148 149 static ssize_t sysfs_write(struct device *dev, const char *buf, 150 size_t count, unsigned int addr_offset) 151 { 152 struct axis_fifo *fifo = dev_get_drvdata(dev); 153 unsigned long tmp; 154 int rc; 155 156 rc = kstrtoul(buf, 0, &tmp); 157 if (rc < 0) 158 return rc; 159 160 iowrite32(tmp, fifo->base_addr + addr_offset); 161 162 return count; 163 } 164 165 static ssize_t sysfs_read(struct device *dev, char *buf, 166 unsigned int addr_offset) 167 { 168 struct axis_fifo *fifo = dev_get_drvdata(dev); 169 unsigned int read_val; 170 unsigned int len; 171 char tmp[32]; 172 173 read_val = ioread32(fifo->base_addr + addr_offset); 174 len = snprintf(tmp, sizeof(tmp), "0x%x\n", read_val); 175 memcpy(buf, tmp, len); 176 177 return len; 178 } 179 180 static ssize_t isr_store(struct device *dev, struct device_attribute *attr, 181 const char *buf, size_t count) 182 { 183 return sysfs_write(dev, buf, count, XLLF_ISR_OFFSET); 184 } 185 186 static ssize_t isr_show(struct device *dev, 187 struct device_attribute *attr, char *buf) 188 { 189 return sysfs_read(dev, buf, XLLF_ISR_OFFSET); 190 } 191 192 static DEVICE_ATTR_RW(isr); 193 194 static ssize_t ier_store(struct device *dev, struct device_attribute *attr, 195 const char *buf, size_t count) 196 { 197 return sysfs_write(dev, buf, count, XLLF_IER_OFFSET); 198 } 199 200 static ssize_t ier_show(struct device *dev, 201 struct device_attribute *attr, char *buf) 202 { 203 return sysfs_read(dev, buf, XLLF_IER_OFFSET); 204 } 205 206 static DEVICE_ATTR_RW(ier); 207 208 static ssize_t tdfr_store(struct device *dev, struct device_attribute *attr, 209 const char *buf, size_t count) 210 { 211 return sysfs_write(dev, buf, count, XLLF_TDFR_OFFSET); 212 } 213 214 static DEVICE_ATTR_WO(tdfr); 215 216 static ssize_t tdfv_show(struct device *dev, 217 struct device_attribute *attr, char *buf) 218 { 219 return sysfs_read(dev, buf, XLLF_TDFV_OFFSET); 220 } 221 222 static DEVICE_ATTR_RO(tdfv); 223 224 static ssize_t tdfd_store(struct device *dev, struct device_attribute *attr, 225 const char *buf, size_t count) 226 { 227 return sysfs_write(dev, buf, count, XLLF_TDFD_OFFSET); 228 } 229 230 static DEVICE_ATTR_WO(tdfd); 231 232 static ssize_t tlr_store(struct device *dev, struct device_attribute *attr, 233 const char *buf, size_t count) 234 { 235 return sysfs_write(dev, buf, count, XLLF_TLR_OFFSET); 236 } 237 238 static DEVICE_ATTR_WO(tlr); 239 240 static ssize_t rdfr_store(struct device *dev, struct device_attribute *attr, 241 const char *buf, size_t count) 242 { 243 return sysfs_write(dev, buf, count, XLLF_RDFR_OFFSET); 244 } 245 246 static DEVICE_ATTR_WO(rdfr); 247 248 static ssize_t rdfo_show(struct device *dev, 249 struct device_attribute *attr, char *buf) 250 { 251 return sysfs_read(dev, buf, XLLF_RDFO_OFFSET); 252 } 253 254 static DEVICE_ATTR_RO(rdfo); 255 256 static ssize_t rdfd_show(struct device *dev, 257 struct device_attribute *attr, char *buf) 258 { 259 return sysfs_read(dev, buf, XLLF_RDFD_OFFSET); 260 } 261 262 static DEVICE_ATTR_RO(rdfd); 263 264 static ssize_t rlr_show(struct device *dev, 265 struct device_attribute *attr, char *buf) 266 { 267 return sysfs_read(dev, buf, XLLF_RLR_OFFSET); 268 } 269 270 static DEVICE_ATTR_RO(rlr); 271 272 static ssize_t srr_store(struct device *dev, struct device_attribute *attr, 273 const char *buf, size_t count) 274 { 275 return sysfs_write(dev, buf, count, XLLF_SRR_OFFSET); 276 } 277 278 static DEVICE_ATTR_WO(srr); 279 280 static ssize_t tdr_store(struct device *dev, struct device_attribute *attr, 281 const char *buf, size_t count) 282 { 283 return sysfs_write(dev, buf, count, XLLF_TDR_OFFSET); 284 } 285 286 static DEVICE_ATTR_WO(tdr); 287 288 static ssize_t rdr_show(struct device *dev, 289 struct device_attribute *attr, char *buf) 290 { 291 return sysfs_read(dev, buf, XLLF_RDR_OFFSET); 292 } 293 294 static DEVICE_ATTR_RO(rdr); 295 296 static struct attribute *axis_fifo_attrs[] = { 297 &dev_attr_isr.attr, 298 &dev_attr_ier.attr, 299 &dev_attr_tdfr.attr, 300 &dev_attr_tdfv.attr, 301 &dev_attr_tdfd.attr, 302 &dev_attr_tlr.attr, 303 &dev_attr_rdfr.attr, 304 &dev_attr_rdfo.attr, 305 &dev_attr_rdfd.attr, 306 &dev_attr_rlr.attr, 307 &dev_attr_srr.attr, 308 &dev_attr_tdr.attr, 309 &dev_attr_rdr.attr, 310 NULL, 311 }; 312 313 static const struct attribute_group axis_fifo_attrs_group = { 314 .name = "ip_registers", 315 .attrs = axis_fifo_attrs, 316 }; 317 318 static const struct attribute_group *axis_fifo_attrs_groups[] = { 319 &axis_fifo_attrs_group, 320 NULL, 321 }; 322 323 /* ---------------------------- 324 * implementation 325 * ---------------------------- 326 */ 327 328 static void reset_ip_core(struct axis_fifo *fifo) 329 { 330 iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET); 331 iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET); 332 iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET); 333 iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK | 334 XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK | 335 XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK, 336 fifo->base_addr + XLLF_IER_OFFSET); 337 iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET); 338 } 339 340 /** 341 * axis_fifo_read() - Read a packet from AXIS-FIFO character device. 342 * @f: Open file. 343 * @buf: User space buffer to read to. 344 * @len: User space buffer length. 345 * @off: Buffer offset. 346 * 347 * As defined by the device's documentation, we need to check the device's 348 * occupancy before reading the length register and then the data. All these 349 * operations must be executed atomically, in order and one after the other 350 * without missing any. 351 * 352 * Returns the number of bytes read from the device or negative error code 353 * on failure. 354 */ 355 static ssize_t axis_fifo_read(struct file *f, char __user *buf, 356 size_t len, loff_t *off) 357 { 358 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data; 359 size_t bytes_available; 360 unsigned int words_available; 361 unsigned int copied; 362 unsigned int copy; 363 unsigned int i; 364 int ret; 365 u32 tmp_buf[READ_BUF_SIZE]; 366 367 if (fifo->read_flags & O_NONBLOCK) { 368 /* 369 * Device opened in non-blocking mode. Try to lock it and then 370 * check if any packet is available. 371 */ 372 if (!mutex_trylock(&fifo->read_lock)) 373 return -EAGAIN; 374 375 if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) { 376 ret = -EAGAIN; 377 goto end_unlock; 378 } 379 } else { 380 /* opened in blocking mode 381 * wait for a packet available interrupt (or timeout) 382 * if nothing is currently available 383 */ 384 mutex_lock(&fifo->read_lock); 385 ret = wait_event_interruptible_timeout(fifo->read_queue, 386 ioread32(fifo->base_addr + XLLF_RDFO_OFFSET), 387 read_timeout); 388 389 if (ret <= 0) { 390 if (ret == 0) { 391 ret = -EAGAIN; 392 } else if (ret != -ERESTARTSYS) { 393 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n", 394 ret); 395 } 396 397 goto end_unlock; 398 } 399 } 400 401 bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); 402 if (!bytes_available) { 403 dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n"); 404 reset_ip_core(fifo); 405 ret = -EIO; 406 goto end_unlock; 407 } 408 409 if (bytes_available > len) { 410 dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n", 411 bytes_available, len); 412 reset_ip_core(fifo); 413 ret = -EINVAL; 414 goto end_unlock; 415 } 416 417 if (bytes_available % sizeof(u32)) { 418 /* this probably can't happen unless IP 419 * registers were previously mishandled 420 */ 421 dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n"); 422 reset_ip_core(fifo); 423 ret = -EIO; 424 goto end_unlock; 425 } 426 427 words_available = bytes_available / sizeof(u32); 428 429 /* read data into an intermediate buffer, copying the contents 430 * to userspace when the buffer is full 431 */ 432 copied = 0; 433 while (words_available > 0) { 434 copy = min(words_available, READ_BUF_SIZE); 435 436 for (i = 0; i < copy; i++) { 437 tmp_buf[i] = ioread32(fifo->base_addr + 438 XLLF_RDFD_OFFSET); 439 } 440 441 if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, 442 copy * sizeof(u32))) { 443 reset_ip_core(fifo); 444 ret = -EFAULT; 445 goto end_unlock; 446 } 447 448 copied += copy; 449 words_available -= copy; 450 } 451 452 ret = bytes_available; 453 454 end_unlock: 455 mutex_unlock(&fifo->read_lock); 456 457 return ret; 458 } 459 460 /** 461 * axis_fifo_write() - Write buffer to AXIS-FIFO character device. 462 * @f: Open file. 463 * @buf: User space buffer to write to the device. 464 * @len: User space buffer length. 465 * @off: Buffer offset. 466 * 467 * As defined by the device's documentation, we need to write to the device's 468 * data buffer then to the device's packet length register atomically. Also, 469 * we need to lock before checking if the device has available space to avoid 470 * any concurrency issue. 471 * 472 * Returns the number of bytes written to the device or negative error code 473 * on failure. 474 */ 475 static ssize_t axis_fifo_write(struct file *f, const char __user *buf, 476 size_t len, loff_t *off) 477 { 478 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data; 479 unsigned int words_to_write; 480 unsigned int copied; 481 unsigned int copy; 482 unsigned int i; 483 int ret; 484 u32 tmp_buf[WRITE_BUF_SIZE]; 485 486 if (len % sizeof(u32)) { 487 dev_err(fifo->dt_device, 488 "tried to send a packet that isn't word-aligned\n"); 489 return -EINVAL; 490 } 491 492 words_to_write = len / sizeof(u32); 493 494 if (!words_to_write) { 495 dev_err(fifo->dt_device, 496 "tried to send a packet of length 0\n"); 497 return -EINVAL; 498 } 499 500 if (words_to_write > fifo->tx_fifo_depth) { 501 dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n", 502 words_to_write, fifo->tx_fifo_depth); 503 return -EINVAL; 504 } 505 506 if (fifo->write_flags & O_NONBLOCK) { 507 /* 508 * Device opened in non-blocking mode. Try to lock it and then 509 * check if there is any room to write the given buffer. 510 */ 511 if (!mutex_trylock(&fifo->write_lock)) 512 return -EAGAIN; 513 514 if (words_to_write > ioread32(fifo->base_addr + 515 XLLF_TDFV_OFFSET)) { 516 ret = -EAGAIN; 517 goto end_unlock; 518 } 519 } else { 520 /* opened in blocking mode */ 521 522 /* wait for an interrupt (or timeout) if there isn't 523 * currently enough room in the fifo 524 */ 525 mutex_lock(&fifo->write_lock); 526 ret = wait_event_interruptible_timeout(fifo->write_queue, 527 ioread32(fifo->base_addr + XLLF_TDFV_OFFSET) 528 >= words_to_write, 529 write_timeout); 530 531 if (ret <= 0) { 532 if (ret == 0) { 533 ret = -EAGAIN; 534 } else if (ret != -ERESTARTSYS) { 535 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n", 536 ret); 537 } 538 539 goto end_unlock; 540 } 541 } 542 543 /* write data from an intermediate buffer into the fifo IP, refilling 544 * the buffer with userspace data as needed 545 */ 546 copied = 0; 547 while (words_to_write > 0) { 548 copy = min(words_to_write, WRITE_BUF_SIZE); 549 550 if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), 551 copy * sizeof(u32))) { 552 reset_ip_core(fifo); 553 ret = -EFAULT; 554 goto end_unlock; 555 } 556 557 for (i = 0; i < copy; i++) 558 iowrite32(tmp_buf[i], fifo->base_addr + 559 XLLF_TDFD_OFFSET); 560 561 copied += copy; 562 words_to_write -= copy; 563 } 564 565 ret = copied * sizeof(u32); 566 567 /* write packet size to fifo */ 568 iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET); 569 570 end_unlock: 571 mutex_unlock(&fifo->write_lock); 572 573 return ret; 574 } 575 576 static irqreturn_t axis_fifo_irq(int irq, void *dw) 577 { 578 struct axis_fifo *fifo = (struct axis_fifo *)dw; 579 unsigned int pending_interrupts; 580 581 do { 582 pending_interrupts = ioread32(fifo->base_addr + 583 XLLF_IER_OFFSET) & 584 ioread32(fifo->base_addr 585 + XLLF_ISR_OFFSET); 586 if (pending_interrupts & XLLF_INT_RC_MASK) { 587 /* packet received */ 588 589 /* wake the reader process if it is waiting */ 590 wake_up(&fifo->read_queue); 591 592 /* clear interrupt */ 593 iowrite32(XLLF_INT_RC_MASK & XLLF_INT_ALL_MASK, 594 fifo->base_addr + XLLF_ISR_OFFSET); 595 } else if (pending_interrupts & XLLF_INT_TC_MASK) { 596 /* packet sent */ 597 598 /* wake the writer process if it is waiting */ 599 wake_up(&fifo->write_queue); 600 601 iowrite32(XLLF_INT_TC_MASK & XLLF_INT_ALL_MASK, 602 fifo->base_addr + XLLF_ISR_OFFSET); 603 } else if (pending_interrupts & XLLF_INT_TFPF_MASK) { 604 /* transmit fifo programmable full */ 605 606 iowrite32(XLLF_INT_TFPF_MASK & XLLF_INT_ALL_MASK, 607 fifo->base_addr + XLLF_ISR_OFFSET); 608 } else if (pending_interrupts & XLLF_INT_TFPE_MASK) { 609 /* transmit fifo programmable empty */ 610 611 iowrite32(XLLF_INT_TFPE_MASK & XLLF_INT_ALL_MASK, 612 fifo->base_addr + XLLF_ISR_OFFSET); 613 } else if (pending_interrupts & XLLF_INT_RFPF_MASK) { 614 /* receive fifo programmable full */ 615 616 iowrite32(XLLF_INT_RFPF_MASK & XLLF_INT_ALL_MASK, 617 fifo->base_addr + XLLF_ISR_OFFSET); 618 } else if (pending_interrupts & XLLF_INT_RFPE_MASK) { 619 /* receive fifo programmable empty */ 620 621 iowrite32(XLLF_INT_RFPE_MASK & XLLF_INT_ALL_MASK, 622 fifo->base_addr + XLLF_ISR_OFFSET); 623 } else if (pending_interrupts & XLLF_INT_TRC_MASK) { 624 /* transmit reset complete interrupt */ 625 626 iowrite32(XLLF_INT_TRC_MASK & XLLF_INT_ALL_MASK, 627 fifo->base_addr + XLLF_ISR_OFFSET); 628 } else if (pending_interrupts & XLLF_INT_RRC_MASK) { 629 /* receive reset complete interrupt */ 630 631 iowrite32(XLLF_INT_RRC_MASK & XLLF_INT_ALL_MASK, 632 fifo->base_addr + XLLF_ISR_OFFSET); 633 } else if (pending_interrupts & XLLF_INT_RPURE_MASK) { 634 /* receive fifo under-read error interrupt */ 635 dev_err(fifo->dt_device, 636 "receive under-read interrupt\n"); 637 638 iowrite32(XLLF_INT_RPURE_MASK & XLLF_INT_ALL_MASK, 639 fifo->base_addr + XLLF_ISR_OFFSET); 640 } else if (pending_interrupts & XLLF_INT_RPORE_MASK) { 641 /* receive over-read error interrupt */ 642 dev_err(fifo->dt_device, 643 "receive over-read interrupt\n"); 644 645 iowrite32(XLLF_INT_RPORE_MASK & XLLF_INT_ALL_MASK, 646 fifo->base_addr + XLLF_ISR_OFFSET); 647 } else if (pending_interrupts & XLLF_INT_RPUE_MASK) { 648 /* receive underrun error interrupt */ 649 dev_err(fifo->dt_device, 650 "receive underrun error interrupt\n"); 651 652 iowrite32(XLLF_INT_RPUE_MASK & XLLF_INT_ALL_MASK, 653 fifo->base_addr + XLLF_ISR_OFFSET); 654 } else if (pending_interrupts & XLLF_INT_TPOE_MASK) { 655 /* transmit overrun error interrupt */ 656 dev_err(fifo->dt_device, 657 "transmit overrun error interrupt\n"); 658 659 iowrite32(XLLF_INT_TPOE_MASK & XLLF_INT_ALL_MASK, 660 fifo->base_addr + XLLF_ISR_OFFSET); 661 } else if (pending_interrupts & XLLF_INT_TSE_MASK) { 662 /* transmit length mismatch error interrupt */ 663 dev_err(fifo->dt_device, 664 "transmit length mismatch error interrupt\n"); 665 666 iowrite32(XLLF_INT_TSE_MASK & XLLF_INT_ALL_MASK, 667 fifo->base_addr + XLLF_ISR_OFFSET); 668 } else if (pending_interrupts) { 669 /* unknown interrupt type */ 670 dev_err(fifo->dt_device, 671 "unknown interrupt(s) 0x%x\n", 672 pending_interrupts); 673 674 iowrite32(XLLF_INT_ALL_MASK, 675 fifo->base_addr + XLLF_ISR_OFFSET); 676 } 677 } while (pending_interrupts); 678 679 return IRQ_HANDLED; 680 } 681 682 static int axis_fifo_open(struct inode *inod, struct file *f) 683 { 684 struct axis_fifo *fifo = container_of(f->private_data, 685 struct axis_fifo, miscdev); 686 f->private_data = fifo; 687 688 if (((f->f_flags & O_ACCMODE) == O_WRONLY) || 689 ((f->f_flags & O_ACCMODE) == O_RDWR)) { 690 if (fifo->has_tx_fifo) { 691 fifo->write_flags = f->f_flags; 692 } else { 693 dev_err(fifo->dt_device, "tried to open device for write but the transmit fifo is disabled\n"); 694 return -EPERM; 695 } 696 } 697 698 if (((f->f_flags & O_ACCMODE) == O_RDONLY) || 699 ((f->f_flags & O_ACCMODE) == O_RDWR)) { 700 if (fifo->has_rx_fifo) { 701 fifo->read_flags = f->f_flags; 702 } else { 703 dev_err(fifo->dt_device, "tried to open device for read but the receive fifo is disabled\n"); 704 return -EPERM; 705 } 706 } 707 708 return 0; 709 } 710 711 static int axis_fifo_close(struct inode *inod, struct file *f) 712 { 713 f->private_data = NULL; 714 715 return 0; 716 } 717 718 static const struct file_operations fops = { 719 .owner = THIS_MODULE, 720 .open = axis_fifo_open, 721 .release = axis_fifo_close, 722 .read = axis_fifo_read, 723 .write = axis_fifo_write 724 }; 725 726 /* read named property from the device tree */ 727 static int get_dts_property(struct axis_fifo *fifo, 728 char *name, unsigned int *var) 729 { 730 int rc; 731 732 rc = of_property_read_u32(fifo->dt_device->of_node, name, var); 733 if (rc) { 734 dev_err(fifo->dt_device, "couldn't read IP dts property '%s'", 735 name); 736 return rc; 737 } 738 dev_dbg(fifo->dt_device, "dts property '%s' = %u\n", 739 name, *var); 740 741 return 0; 742 } 743 744 static int axis_fifo_parse_dt(struct axis_fifo *fifo) 745 { 746 int ret; 747 unsigned int value; 748 749 ret = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width", &value); 750 if (ret) { 751 dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n"); 752 goto end; 753 } else if (value != 32) { 754 dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n"); 755 ret = -EIO; 756 goto end; 757 } 758 759 ret = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width", &value); 760 if (ret) { 761 dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n"); 762 goto end; 763 } else if (value != 32) { 764 dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n"); 765 ret = -EIO; 766 goto end; 767 } 768 769 ret = get_dts_property(fifo, "xlnx,rx-fifo-depth", 770 &fifo->rx_fifo_depth); 771 if (ret) { 772 dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n"); 773 ret = -EIO; 774 goto end; 775 } 776 777 ret = get_dts_property(fifo, "xlnx,tx-fifo-depth", 778 &fifo->tx_fifo_depth); 779 if (ret) { 780 dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n"); 781 ret = -EIO; 782 goto end; 783 } 784 785 /* IP sets TDFV to fifo depth - 4 so we will do the same */ 786 fifo->tx_fifo_depth -= 4; 787 788 ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo); 789 if (ret) { 790 dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n"); 791 ret = -EIO; 792 goto end; 793 } 794 795 ret = get_dts_property(fifo, "xlnx,use-tx-data", &fifo->has_tx_fifo); 796 if (ret) { 797 dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n"); 798 ret = -EIO; 799 goto end; 800 } 801 802 end: 803 return ret; 804 } 805 806 static int axis_fifo_probe(struct platform_device *pdev) 807 { 808 struct resource *r_mem; /* IO mem resources */ 809 struct device *dev = &pdev->dev; /* OS device (from device tree) */ 810 struct axis_fifo *fifo = NULL; 811 char *device_name; 812 int rc = 0; /* error return value */ 813 814 /* ---------------------------- 815 * init wrapper device 816 * ---------------------------- 817 */ 818 819 device_name = devm_kzalloc(dev, 32, GFP_KERNEL); 820 if (!device_name) 821 return -ENOMEM; 822 823 /* allocate device wrapper memory */ 824 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 825 if (!fifo) 826 return -ENOMEM; 827 828 dev_set_drvdata(dev, fifo); 829 fifo->dt_device = dev; 830 831 init_waitqueue_head(&fifo->read_queue); 832 init_waitqueue_head(&fifo->write_queue); 833 834 mutex_init(&fifo->read_lock); 835 mutex_init(&fifo->write_lock); 836 837 /* ---------------------------- 838 * init device memory space 839 * ---------------------------- 840 */ 841 842 /* get iospace for the device */ 843 r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 844 if (!r_mem) { 845 dev_err(fifo->dt_device, "invalid address\n"); 846 rc = -ENODEV; 847 goto err_initial; 848 } 849 850 /* request physical memory */ 851 fifo->base_addr = devm_ioremap_resource(fifo->dt_device, r_mem); 852 if (IS_ERR(fifo->base_addr)) { 853 rc = PTR_ERR(fifo->base_addr); 854 goto err_initial; 855 } 856 857 dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr); 858 859 /* create unique device name */ 860 snprintf(device_name, 32, "%s_%pa", DRIVER_NAME, &r_mem->start); 861 dev_dbg(fifo->dt_device, "device name [%s]\n", device_name); 862 863 /* ---------------------------- 864 * init IP 865 * ---------------------------- 866 */ 867 868 rc = axis_fifo_parse_dt(fifo); 869 if (rc) 870 goto err_initial; 871 872 reset_ip_core(fifo); 873 874 /* ---------------------------- 875 * init device interrupts 876 * ---------------------------- 877 */ 878 879 /* get IRQ resource */ 880 rc = platform_get_irq(pdev, 0); 881 if (rc < 0) 882 goto err_initial; 883 884 /* request IRQ */ 885 fifo->irq = rc; 886 rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0, 887 DRIVER_NAME, fifo); 888 if (rc) { 889 dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n", 890 fifo->irq); 891 goto err_initial; 892 } 893 894 /* ---------------------------- 895 * init char device 896 * ---------------------------- 897 */ 898 899 /* create character device */ 900 fifo->miscdev.fops = &fops; 901 fifo->miscdev.minor = MISC_DYNAMIC_MINOR; 902 fifo->miscdev.name = device_name; 903 fifo->miscdev.groups = axis_fifo_attrs_groups; 904 fifo->miscdev.parent = dev; 905 rc = misc_register(&fifo->miscdev); 906 if (rc < 0) 907 goto err_initial; 908 909 dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i\n", 910 &r_mem->start, &fifo->base_addr, fifo->irq); 911 912 return 0; 913 914 err_initial: 915 dev_set_drvdata(dev, NULL); 916 return rc; 917 } 918 919 static void axis_fifo_remove(struct platform_device *pdev) 920 { 921 struct device *dev = &pdev->dev; 922 struct axis_fifo *fifo = dev_get_drvdata(dev); 923 924 misc_deregister(&fifo->miscdev); 925 dev_set_drvdata(dev, NULL); 926 } 927 928 static const struct of_device_id axis_fifo_of_match[] = { 929 { .compatible = "xlnx,axi-fifo-mm-s-4.1", }, 930 {}, 931 }; 932 MODULE_DEVICE_TABLE(of, axis_fifo_of_match); 933 934 static struct platform_driver axis_fifo_driver = { 935 .driver = { 936 .name = DRIVER_NAME, 937 .of_match_table = axis_fifo_of_match, 938 }, 939 .probe = axis_fifo_probe, 940 .remove_new = axis_fifo_remove, 941 }; 942 943 static int __init axis_fifo_init(void) 944 { 945 if (read_timeout >= 0) 946 read_timeout = msecs_to_jiffies(read_timeout); 947 else 948 read_timeout = MAX_SCHEDULE_TIMEOUT; 949 950 if (write_timeout >= 0) 951 write_timeout = msecs_to_jiffies(write_timeout); 952 else 953 write_timeout = MAX_SCHEDULE_TIMEOUT; 954 955 pr_info("axis-fifo driver loaded with parameters read_timeout = %li, write_timeout = %li\n", 956 read_timeout, write_timeout); 957 return platform_driver_register(&axis_fifo_driver); 958 } 959 960 module_init(axis_fifo_init); 961 962 static void __exit axis_fifo_exit(void) 963 { 964 platform_driver_unregister(&axis_fifo_driver); 965 } 966 967 module_exit(axis_fifo_exit); 968 969 MODULE_LICENSE("GPL"); 970 MODULE_AUTHOR("Jacob Feder <jacobsfeder@gmail.com>"); 971 MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO v4.1 IP core driver"); 972