1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core 4 * 5 * Copyright (C) 2018 Jacob Feder 6 * 7 * Authors: Jacob Feder <jacobsfeder@gmail.com> 8 * 9 * See Xilinx PG080 document for IP details 10 */ 11 12 /* ---------------------------- 13 * includes 14 * ---------------------------- 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/wait.h> 19 #include <linux/mutex.h> 20 #include <linux/device.h> 21 #include <linux/cdev.h> 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/slab.h> 25 #include <linux/io.h> 26 #include <linux/moduleparam.h> 27 #include <linux/interrupt.h> 28 #include <linux/param.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/uaccess.h> 32 #include <linux/jiffies.h> 33 34 #include <linux/of_address.h> 35 #include <linux/of_device.h> 36 #include <linux/of_platform.h> 37 38 /* ---------------------------- 39 * driver parameters 40 * ---------------------------- 41 */ 42 43 #define DRIVER_NAME "axis_fifo" 44 45 #define READ_BUF_SIZE 128U /* read buffer length in words */ 46 #define WRITE_BUF_SIZE 128U /* write buffer length in words */ 47 48 /* ---------------------------- 49 * IP register offsets 50 * ---------------------------- 51 */ 52 53 #define XLLF_ISR_OFFSET 0x00000000 /* Interrupt Status */ 54 #define XLLF_IER_OFFSET 0x00000004 /* Interrupt Enable */ 55 56 #define XLLF_TDFR_OFFSET 0x00000008 /* Transmit Reset */ 57 #define XLLF_TDFV_OFFSET 0x0000000c /* Transmit Vacancy */ 58 #define XLLF_TDFD_OFFSET 0x00000010 /* Transmit Data */ 59 #define XLLF_TLR_OFFSET 0x00000014 /* Transmit Length */ 60 61 #define XLLF_RDFR_OFFSET 0x00000018 /* Receive Reset */ 62 #define XLLF_RDFO_OFFSET 0x0000001c /* Receive Occupancy */ 63 #define XLLF_RDFD_OFFSET 0x00000020 /* Receive Data */ 64 #define XLLF_RLR_OFFSET 0x00000024 /* Receive Length */ 65 #define XLLF_SRR_OFFSET 0x00000028 /* Local Link Reset */ 66 #define XLLF_TDR_OFFSET 0x0000002C /* Transmit Destination */ 67 #define XLLF_RDR_OFFSET 0x00000030 /* Receive Destination */ 68 69 /* ---------------------------- 70 * reset register masks 71 * ---------------------------- 72 */ 73 74 #define XLLF_RDFR_RESET_MASK 0x000000a5 /* receive reset value */ 75 #define XLLF_TDFR_RESET_MASK 0x000000a5 /* Transmit reset value */ 76 #define XLLF_SRR_RESET_MASK 0x000000a5 /* Local Link reset value */ 77 78 /* ---------------------------- 79 * interrupt masks 80 * ---------------------------- 81 */ 82 83 #define XLLF_INT_RPURE_MASK 0x80000000 /* Receive under-read */ 84 #define XLLF_INT_RPORE_MASK 0x40000000 /* Receive over-read */ 85 #define XLLF_INT_RPUE_MASK 0x20000000 /* Receive underrun (empty) */ 86 #define XLLF_INT_TPOE_MASK 0x10000000 /* Transmit overrun */ 87 #define XLLF_INT_TC_MASK 0x08000000 /* Transmit complete */ 88 #define XLLF_INT_RC_MASK 0x04000000 /* Receive complete */ 89 #define XLLF_INT_TSE_MASK 0x02000000 /* Transmit length mismatch */ 90 #define XLLF_INT_TRC_MASK 0x01000000 /* Transmit reset complete */ 91 #define XLLF_INT_RRC_MASK 0x00800000 /* Receive reset complete */ 92 #define XLLF_INT_TFPF_MASK 0x00400000 /* Tx FIFO Programmable Full */ 93 #define XLLF_INT_TFPE_MASK 0x00200000 /* Tx FIFO Programmable Empty */ 94 #define XLLF_INT_RFPF_MASK 0x00100000 /* Rx FIFO Programmable Full */ 95 #define XLLF_INT_RFPE_MASK 0x00080000 /* Rx FIFO Programmable Empty */ 96 #define XLLF_INT_ALL_MASK 0xfff80000 /* All the ints */ 97 #define XLLF_INT_ERROR_MASK 0xf2000000 /* Error status ints */ 98 #define XLLF_INT_RXERROR_MASK 0xe0000000 /* Receive Error status ints */ 99 #define XLLF_INT_TXERROR_MASK 0x12000000 /* Transmit Error status ints */ 100 101 /* ---------------------------- 102 * globals 103 * ---------------------------- 104 */ 105 106 static struct class *axis_fifo_driver_class; /* char device class */ 107 108 static int read_timeout = 1000; /* ms to wait before read() times out */ 109 static int write_timeout = 1000; /* ms to wait before write() times out */ 110 111 /* ---------------------------- 112 * module command-line arguments 113 * ---------------------------- 114 */ 115 116 module_param(read_timeout, int, 0444); 117 MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout"); 118 module_param(write_timeout, int, 0444); 119 MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout"); 120 121 /* ---------------------------- 122 * types 123 * ---------------------------- 124 */ 125 126 struct axis_fifo { 127 int irq; /* interrupt */ 128 void __iomem *base_addr; /* kernel space memory */ 129 130 unsigned int rx_fifo_depth; /* max words in the receive fifo */ 131 unsigned int tx_fifo_depth; /* max words in the transmit fifo */ 132 int has_rx_fifo; /* whether the IP has the rx fifo enabled */ 133 int has_tx_fifo; /* whether the IP has the tx fifo enabled */ 134 135 wait_queue_head_t read_queue; /* wait queue for asynchronos read */ 136 struct mutex read_lock; /* lock for reading */ 137 wait_queue_head_t write_queue; /* wait queue for asynchronos write */ 138 struct mutex write_lock; /* lock for writing */ 139 unsigned int write_flags; /* write file flags */ 140 unsigned int read_flags; /* read file flags */ 141 142 struct device *dt_device; /* device created from the device tree */ 143 struct device *device; /* device associated with char_device */ 144 dev_t devt; /* our char device number */ 145 struct cdev char_device; /* our char device */ 146 }; 147 148 /* ---------------------------- 149 * sysfs entries 150 * ---------------------------- 151 */ 152 153 static ssize_t sysfs_write(struct device *dev, const char *buf, 154 size_t count, unsigned int addr_offset) 155 { 156 struct axis_fifo *fifo = dev_get_drvdata(dev); 157 unsigned long tmp; 158 int rc; 159 160 rc = kstrtoul(buf, 0, &tmp); 161 if (rc < 0) 162 return rc; 163 164 iowrite32(tmp, fifo->base_addr + addr_offset); 165 166 return count; 167 } 168 169 static ssize_t sysfs_read(struct device *dev, char *buf, 170 unsigned int addr_offset) 171 { 172 struct axis_fifo *fifo = dev_get_drvdata(dev); 173 unsigned int read_val; 174 unsigned int len; 175 char tmp[32]; 176 177 read_val = ioread32(fifo->base_addr + addr_offset); 178 len = snprintf(tmp, sizeof(tmp), "0x%x\n", read_val); 179 memcpy(buf, tmp, len); 180 181 return len; 182 } 183 184 static ssize_t isr_store(struct device *dev, struct device_attribute *attr, 185 const char *buf, size_t count) 186 { 187 return sysfs_write(dev, buf, count, XLLF_ISR_OFFSET); 188 } 189 190 static ssize_t isr_show(struct device *dev, 191 struct device_attribute *attr, char *buf) 192 { 193 return sysfs_read(dev, buf, XLLF_ISR_OFFSET); 194 } 195 196 static DEVICE_ATTR_RW(isr); 197 198 static ssize_t ier_store(struct device *dev, struct device_attribute *attr, 199 const char *buf, size_t count) 200 { 201 return sysfs_write(dev, buf, count, XLLF_IER_OFFSET); 202 } 203 204 static ssize_t ier_show(struct device *dev, 205 struct device_attribute *attr, char *buf) 206 { 207 return sysfs_read(dev, buf, XLLF_IER_OFFSET); 208 } 209 210 static DEVICE_ATTR_RW(ier); 211 212 static ssize_t tdfr_store(struct device *dev, struct device_attribute *attr, 213 const char *buf, size_t count) 214 { 215 return sysfs_write(dev, buf, count, XLLF_TDFR_OFFSET); 216 } 217 218 static DEVICE_ATTR_WO(tdfr); 219 220 static ssize_t tdfv_show(struct device *dev, 221 struct device_attribute *attr, char *buf) 222 { 223 return sysfs_read(dev, buf, XLLF_TDFV_OFFSET); 224 } 225 226 static DEVICE_ATTR_RO(tdfv); 227 228 static ssize_t tdfd_store(struct device *dev, struct device_attribute *attr, 229 const char *buf, size_t count) 230 { 231 return sysfs_write(dev, buf, count, XLLF_TDFD_OFFSET); 232 } 233 234 static DEVICE_ATTR_WO(tdfd); 235 236 static ssize_t tlr_store(struct device *dev, struct device_attribute *attr, 237 const char *buf, size_t count) 238 { 239 return sysfs_write(dev, buf, count, XLLF_TLR_OFFSET); 240 } 241 242 static DEVICE_ATTR_WO(tlr); 243 244 static ssize_t rdfr_store(struct device *dev, struct device_attribute *attr, 245 const char *buf, size_t count) 246 { 247 return sysfs_write(dev, buf, count, XLLF_RDFR_OFFSET); 248 } 249 250 static DEVICE_ATTR_WO(rdfr); 251 252 static ssize_t rdfo_show(struct device *dev, 253 struct device_attribute *attr, char *buf) 254 { 255 return sysfs_read(dev, buf, XLLF_RDFO_OFFSET); 256 } 257 258 static DEVICE_ATTR_RO(rdfo); 259 260 static ssize_t rdfd_show(struct device *dev, 261 struct device_attribute *attr, char *buf) 262 { 263 return sysfs_read(dev, buf, XLLF_RDFD_OFFSET); 264 } 265 266 static DEVICE_ATTR_RO(rdfd); 267 268 static ssize_t rlr_show(struct device *dev, 269 struct device_attribute *attr, char *buf) 270 { 271 return sysfs_read(dev, buf, XLLF_RLR_OFFSET); 272 } 273 274 static DEVICE_ATTR_RO(rlr); 275 276 static ssize_t srr_store(struct device *dev, struct device_attribute *attr, 277 const char *buf, size_t count) 278 { 279 return sysfs_write(dev, buf, count, XLLF_SRR_OFFSET); 280 } 281 282 static DEVICE_ATTR_WO(srr); 283 284 static ssize_t tdr_store(struct device *dev, struct device_attribute *attr, 285 const char *buf, size_t count) 286 { 287 return sysfs_write(dev, buf, count, XLLF_TDR_OFFSET); 288 } 289 290 static DEVICE_ATTR_WO(tdr); 291 292 static ssize_t rdr_show(struct device *dev, 293 struct device_attribute *attr, char *buf) 294 { 295 return sysfs_read(dev, buf, XLLF_RDR_OFFSET); 296 } 297 298 static DEVICE_ATTR_RO(rdr); 299 300 static struct attribute *axis_fifo_attrs[] = { 301 &dev_attr_isr.attr, 302 &dev_attr_ier.attr, 303 &dev_attr_tdfr.attr, 304 &dev_attr_tdfv.attr, 305 &dev_attr_tdfd.attr, 306 &dev_attr_tlr.attr, 307 &dev_attr_rdfr.attr, 308 &dev_attr_rdfo.attr, 309 &dev_attr_rdfd.attr, 310 &dev_attr_rlr.attr, 311 &dev_attr_srr.attr, 312 &dev_attr_tdr.attr, 313 &dev_attr_rdr.attr, 314 NULL, 315 }; 316 317 static const struct attribute_group axis_fifo_attrs_group = { 318 .name = "ip_registers", 319 .attrs = axis_fifo_attrs, 320 }; 321 322 /* ---------------------------- 323 * implementation 324 * ---------------------------- 325 */ 326 327 static void reset_ip_core(struct axis_fifo *fifo) 328 { 329 iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET); 330 iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET); 331 iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET); 332 iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK | 333 XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK | 334 XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK, 335 fifo->base_addr + XLLF_IER_OFFSET); 336 iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET); 337 } 338 339 /** 340 * axis_fifo_write() - Read a packet from AXIS-FIFO character device. 341 * @f Open file. 342 * @buf User space buffer to read to. 343 * @len User space buffer length. 344 * @off Buffer offset. 345 * 346 * As defined by the device's documentation, we need to check the device's 347 * occupancy before reading the length register and then the data. All these 348 * operations must be executed atomically, in order and one after the other 349 * without missing any. 350 * 351 * Returns the number of bytes read from the device or negative error code 352 * on failure. 353 */ 354 static ssize_t axis_fifo_read(struct file *f, char __user *buf, 355 size_t len, loff_t *off) 356 { 357 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data; 358 size_t bytes_available; 359 unsigned int words_available; 360 unsigned int copied; 361 unsigned int copy; 362 unsigned int i; 363 int ret; 364 u32 tmp_buf[READ_BUF_SIZE]; 365 366 if (fifo->read_flags & O_NONBLOCK) { 367 /* 368 * Device opened in non-blocking mode. Try to lock it and then 369 * check if any packet is available. 370 */ 371 if (!mutex_trylock(&fifo->read_lock)) 372 return -EAGAIN; 373 374 if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) { 375 ret = -EAGAIN; 376 goto end_unlock; 377 } 378 } else { 379 /* opened in blocking mode 380 * wait for a packet available interrupt (or timeout) 381 * if nothing is currently available 382 */ 383 mutex_lock(&fifo->read_lock); 384 ret = wait_event_interruptible_timeout(fifo->read_queue, 385 ioread32(fifo->base_addr + XLLF_RDFO_OFFSET), 386 (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) : 387 MAX_SCHEDULE_TIMEOUT); 388 389 if (ret <= 0) { 390 if (ret == 0) { 391 ret = -EAGAIN; 392 } else if (ret != -ERESTARTSYS) { 393 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n", 394 ret); 395 } 396 397 goto end_unlock; 398 } 399 } 400 401 bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); 402 if (!bytes_available) { 403 dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n"); 404 reset_ip_core(fifo); 405 ret = -EIO; 406 goto end_unlock; 407 } 408 409 if (bytes_available > len) { 410 dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n", 411 bytes_available, len); 412 reset_ip_core(fifo); 413 ret = -EINVAL; 414 goto end_unlock; 415 } 416 417 if (bytes_available % sizeof(u32)) { 418 /* this probably can't happen unless IP 419 * registers were previously mishandled 420 */ 421 dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n"); 422 reset_ip_core(fifo); 423 ret = -EIO; 424 goto end_unlock; 425 } 426 427 words_available = bytes_available / sizeof(u32); 428 429 /* read data into an intermediate buffer, copying the contents 430 * to userspace when the buffer is full 431 */ 432 copied = 0; 433 while (words_available > 0) { 434 copy = min(words_available, READ_BUF_SIZE); 435 436 for (i = 0; i < copy; i++) { 437 tmp_buf[i] = ioread32(fifo->base_addr + 438 XLLF_RDFD_OFFSET); 439 } 440 441 if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, 442 copy * sizeof(u32))) { 443 reset_ip_core(fifo); 444 ret = -EFAULT; 445 goto end_unlock; 446 } 447 448 copied += copy; 449 words_available -= copy; 450 } 451 452 ret = bytes_available; 453 454 end_unlock: 455 mutex_unlock(&fifo->read_lock); 456 457 return ret; 458 } 459 460 /** 461 * axis_fifo_write() - Write buffer to AXIS-FIFO character device. 462 * @f Open file. 463 * @buf User space buffer to write to the device. 464 * @len User space buffer length. 465 * @off Buffer offset. 466 * 467 * As defined by the device's documentation, we need to write to the device's 468 * data buffer then to the device's packet length register atomically. Also, 469 * we need to lock before checking if the device has available space to avoid 470 * any concurrency issue. 471 * 472 * Returns the number of bytes written to the device or negative error code 473 * on failure. 474 */ 475 static ssize_t axis_fifo_write(struct file *f, const char __user *buf, 476 size_t len, loff_t *off) 477 { 478 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data; 479 unsigned int words_to_write; 480 unsigned int copied; 481 unsigned int copy; 482 unsigned int i; 483 int ret; 484 u32 tmp_buf[WRITE_BUF_SIZE]; 485 486 if (len % sizeof(u32)) { 487 dev_err(fifo->dt_device, 488 "tried to send a packet that isn't word-aligned\n"); 489 return -EINVAL; 490 } 491 492 words_to_write = len / sizeof(u32); 493 494 if (!words_to_write) { 495 dev_err(fifo->dt_device, 496 "tried to send a packet of length 0\n"); 497 return -EINVAL; 498 } 499 500 if (words_to_write > fifo->tx_fifo_depth) { 501 dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n", 502 words_to_write, fifo->tx_fifo_depth); 503 return -EINVAL; 504 } 505 506 if (fifo->write_flags & O_NONBLOCK) { 507 /* 508 * Device opened in non-blocking mode. Try to lock it and then 509 * check if there is any room to write the given buffer. 510 */ 511 if (!mutex_trylock(&fifo->write_lock)) 512 return -EAGAIN; 513 514 if (words_to_write > ioread32(fifo->base_addr + 515 XLLF_TDFV_OFFSET)) { 516 ret = -EAGAIN; 517 goto end_unlock; 518 } 519 } else { 520 /* opened in blocking mode */ 521 522 /* wait for an interrupt (or timeout) if there isn't 523 * currently enough room in the fifo 524 */ 525 mutex_lock(&fifo->write_lock); 526 ret = wait_event_interruptible_timeout(fifo->write_queue, 527 ioread32(fifo->base_addr + XLLF_TDFV_OFFSET) 528 >= words_to_write, 529 (write_timeout >= 0) ? msecs_to_jiffies(write_timeout) : 530 MAX_SCHEDULE_TIMEOUT); 531 532 if (ret <= 0) { 533 if (ret == 0) { 534 ret = -EAGAIN; 535 } else if (ret != -ERESTARTSYS) { 536 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n", 537 ret); 538 } 539 540 goto end_unlock; 541 } 542 } 543 544 /* write data from an intermediate buffer into the fifo IP, refilling 545 * the buffer with userspace data as needed 546 */ 547 copied = 0; 548 while (words_to_write > 0) { 549 copy = min(words_to_write, WRITE_BUF_SIZE); 550 551 if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), 552 copy * sizeof(u32))) { 553 reset_ip_core(fifo); 554 ret = -EFAULT; 555 goto end_unlock; 556 } 557 558 for (i = 0; i < copy; i++) 559 iowrite32(tmp_buf[i], fifo->base_addr + 560 XLLF_TDFD_OFFSET); 561 562 copied += copy; 563 words_to_write -= copy; 564 } 565 566 ret = copied * sizeof(u32); 567 568 /* write packet size to fifo */ 569 iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET); 570 571 end_unlock: 572 mutex_unlock(&fifo->write_lock); 573 574 return ret; 575 } 576 577 static irqreturn_t axis_fifo_irq(int irq, void *dw) 578 { 579 struct axis_fifo *fifo = (struct axis_fifo *)dw; 580 unsigned int pending_interrupts; 581 582 do { 583 pending_interrupts = ioread32(fifo->base_addr + 584 XLLF_IER_OFFSET) & 585 ioread32(fifo->base_addr 586 + XLLF_ISR_OFFSET); 587 if (pending_interrupts & XLLF_INT_RC_MASK) { 588 /* packet received */ 589 590 /* wake the reader process if it is waiting */ 591 wake_up(&fifo->read_queue); 592 593 /* clear interrupt */ 594 iowrite32(XLLF_INT_RC_MASK & XLLF_INT_ALL_MASK, 595 fifo->base_addr + XLLF_ISR_OFFSET); 596 } else if (pending_interrupts & XLLF_INT_TC_MASK) { 597 /* packet sent */ 598 599 /* wake the writer process if it is waiting */ 600 wake_up(&fifo->write_queue); 601 602 iowrite32(XLLF_INT_TC_MASK & XLLF_INT_ALL_MASK, 603 fifo->base_addr + XLLF_ISR_OFFSET); 604 } else if (pending_interrupts & XLLF_INT_TFPF_MASK) { 605 /* transmit fifo programmable full */ 606 607 iowrite32(XLLF_INT_TFPF_MASK & XLLF_INT_ALL_MASK, 608 fifo->base_addr + XLLF_ISR_OFFSET); 609 } else if (pending_interrupts & XLLF_INT_TFPE_MASK) { 610 /* transmit fifo programmable empty */ 611 612 iowrite32(XLLF_INT_TFPE_MASK & XLLF_INT_ALL_MASK, 613 fifo->base_addr + XLLF_ISR_OFFSET); 614 } else if (pending_interrupts & XLLF_INT_RFPF_MASK) { 615 /* receive fifo programmable full */ 616 617 iowrite32(XLLF_INT_RFPF_MASK & XLLF_INT_ALL_MASK, 618 fifo->base_addr + XLLF_ISR_OFFSET); 619 } else if (pending_interrupts & XLLF_INT_RFPE_MASK) { 620 /* receive fifo programmable empty */ 621 622 iowrite32(XLLF_INT_RFPE_MASK & XLLF_INT_ALL_MASK, 623 fifo->base_addr + XLLF_ISR_OFFSET); 624 } else if (pending_interrupts & XLLF_INT_TRC_MASK) { 625 /* transmit reset complete interrupt */ 626 627 iowrite32(XLLF_INT_TRC_MASK & XLLF_INT_ALL_MASK, 628 fifo->base_addr + XLLF_ISR_OFFSET); 629 } else if (pending_interrupts & XLLF_INT_RRC_MASK) { 630 /* receive reset complete interrupt */ 631 632 iowrite32(XLLF_INT_RRC_MASK & XLLF_INT_ALL_MASK, 633 fifo->base_addr + XLLF_ISR_OFFSET); 634 } else if (pending_interrupts & XLLF_INT_RPURE_MASK) { 635 /* receive fifo under-read error interrupt */ 636 dev_err(fifo->dt_device, 637 "receive under-read interrupt\n"); 638 639 iowrite32(XLLF_INT_RPURE_MASK & XLLF_INT_ALL_MASK, 640 fifo->base_addr + XLLF_ISR_OFFSET); 641 } else if (pending_interrupts & XLLF_INT_RPORE_MASK) { 642 /* receive over-read error interrupt */ 643 dev_err(fifo->dt_device, 644 "receive over-read interrupt\n"); 645 646 iowrite32(XLLF_INT_RPORE_MASK & XLLF_INT_ALL_MASK, 647 fifo->base_addr + XLLF_ISR_OFFSET); 648 } else if (pending_interrupts & XLLF_INT_RPUE_MASK) { 649 /* receive underrun error interrupt */ 650 dev_err(fifo->dt_device, 651 "receive underrun error interrupt\n"); 652 653 iowrite32(XLLF_INT_RPUE_MASK & XLLF_INT_ALL_MASK, 654 fifo->base_addr + XLLF_ISR_OFFSET); 655 } else if (pending_interrupts & XLLF_INT_TPOE_MASK) { 656 /* transmit overrun error interrupt */ 657 dev_err(fifo->dt_device, 658 "transmit overrun error interrupt\n"); 659 660 iowrite32(XLLF_INT_TPOE_MASK & XLLF_INT_ALL_MASK, 661 fifo->base_addr + XLLF_ISR_OFFSET); 662 } else if (pending_interrupts & XLLF_INT_TSE_MASK) { 663 /* transmit length mismatch error interrupt */ 664 dev_err(fifo->dt_device, 665 "transmit length mismatch error interrupt\n"); 666 667 iowrite32(XLLF_INT_TSE_MASK & XLLF_INT_ALL_MASK, 668 fifo->base_addr + XLLF_ISR_OFFSET); 669 } else if (pending_interrupts) { 670 /* unknown interrupt type */ 671 dev_err(fifo->dt_device, 672 "unknown interrupt(s) 0x%x\n", 673 pending_interrupts); 674 675 iowrite32(XLLF_INT_ALL_MASK, 676 fifo->base_addr + XLLF_ISR_OFFSET); 677 } 678 } while (pending_interrupts); 679 680 return IRQ_HANDLED; 681 } 682 683 static int axis_fifo_open(struct inode *inod, struct file *f) 684 { 685 struct axis_fifo *fifo = (struct axis_fifo *)container_of(inod->i_cdev, 686 struct axis_fifo, char_device); 687 f->private_data = fifo; 688 689 if (((f->f_flags & O_ACCMODE) == O_WRONLY) || 690 ((f->f_flags & O_ACCMODE) == O_RDWR)) { 691 if (fifo->has_tx_fifo) { 692 fifo->write_flags = f->f_flags; 693 } else { 694 dev_err(fifo->dt_device, "tried to open device for write but the transmit fifo is disabled\n"); 695 return -EPERM; 696 } 697 } 698 699 if (((f->f_flags & O_ACCMODE) == O_RDONLY) || 700 ((f->f_flags & O_ACCMODE) == O_RDWR)) { 701 if (fifo->has_rx_fifo) { 702 fifo->read_flags = f->f_flags; 703 } else { 704 dev_err(fifo->dt_device, "tried to open device for read but the receive fifo is disabled\n"); 705 return -EPERM; 706 } 707 } 708 709 return 0; 710 } 711 712 static int axis_fifo_close(struct inode *inod, struct file *f) 713 { 714 f->private_data = NULL; 715 716 return 0; 717 } 718 719 static const struct file_operations fops = { 720 .owner = THIS_MODULE, 721 .open = axis_fifo_open, 722 .release = axis_fifo_close, 723 .read = axis_fifo_read, 724 .write = axis_fifo_write 725 }; 726 727 /* read named property from the device tree */ 728 static int get_dts_property(struct axis_fifo *fifo, 729 char *name, unsigned int *var) 730 { 731 int rc; 732 733 rc = of_property_read_u32(fifo->dt_device->of_node, name, var); 734 if (rc) { 735 dev_err(fifo->dt_device, "couldn't read IP dts property '%s'", 736 name); 737 return rc; 738 } 739 dev_dbg(fifo->dt_device, "dts property '%s' = %u\n", 740 name, *var); 741 742 return 0; 743 } 744 745 static int axis_fifo_parse_dt(struct axis_fifo *fifo) 746 { 747 int ret; 748 unsigned int value; 749 750 ret = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width", &value); 751 if (ret) { 752 dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n"); 753 goto end; 754 } else if (value != 32) { 755 dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n"); 756 ret = -EIO; 757 goto end; 758 } 759 760 ret = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width", &value); 761 if (ret) { 762 dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n"); 763 goto end; 764 } else if (value != 32) { 765 dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n"); 766 ret = -EIO; 767 goto end; 768 } 769 770 ret = get_dts_property(fifo, "xlnx,rx-fifo-depth", 771 &fifo->rx_fifo_depth); 772 if (ret) { 773 dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n"); 774 ret = -EIO; 775 goto end; 776 } 777 778 ret = get_dts_property(fifo, "xlnx,tx-fifo-depth", 779 &fifo->tx_fifo_depth); 780 if (ret) { 781 dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n"); 782 ret = -EIO; 783 goto end; 784 } 785 786 /* IP sets TDFV to fifo depth - 4 so we will do the same */ 787 fifo->tx_fifo_depth -= 4; 788 789 ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo); 790 if (ret) { 791 dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n"); 792 ret = -EIO; 793 goto end; 794 } 795 796 ret = get_dts_property(fifo, "xlnx,use-tx-data", &fifo->has_tx_fifo); 797 if (ret) { 798 dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n"); 799 ret = -EIO; 800 goto end; 801 } 802 803 end: 804 return ret; 805 } 806 807 static int axis_fifo_probe(struct platform_device *pdev) 808 { 809 struct resource *r_irq; /* interrupt resources */ 810 struct resource *r_mem; /* IO mem resources */ 811 struct device *dev = &pdev->dev; /* OS device (from device tree) */ 812 struct axis_fifo *fifo = NULL; 813 814 char device_name[32]; 815 816 int rc = 0; /* error return value */ 817 818 /* ---------------------------- 819 * init wrapper device 820 * ---------------------------- 821 */ 822 823 /* allocate device wrapper memory */ 824 fifo = devm_kmalloc(dev, sizeof(*fifo), GFP_KERNEL); 825 if (!fifo) 826 return -ENOMEM; 827 828 dev_set_drvdata(dev, fifo); 829 fifo->dt_device = dev; 830 831 init_waitqueue_head(&fifo->read_queue); 832 init_waitqueue_head(&fifo->write_queue); 833 834 mutex_init(&fifo->read_lock); 835 mutex_init(&fifo->write_lock); 836 837 /* ---------------------------- 838 * init device memory space 839 * ---------------------------- 840 */ 841 842 /* get iospace for the device */ 843 r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 844 if (!r_mem) { 845 dev_err(fifo->dt_device, "invalid address\n"); 846 rc = -ENODEV; 847 goto err_initial; 848 } 849 850 /* request physical memory */ 851 fifo->base_addr = devm_ioremap_resource(fifo->dt_device, r_mem); 852 if (IS_ERR(fifo->base_addr)) { 853 rc = PTR_ERR(fifo->base_addr); 854 dev_err(fifo->dt_device, "can't remap IO resource (%d)\n", rc); 855 goto err_initial; 856 } 857 858 dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr); 859 860 /* create unique device name */ 861 snprintf(device_name, sizeof(device_name), "%s_%pa", 862 DRIVER_NAME, &r_mem->start); 863 864 dev_dbg(fifo->dt_device, "device name [%s]\n", device_name); 865 866 /* ---------------------------- 867 * init IP 868 * ---------------------------- 869 */ 870 871 rc = axis_fifo_parse_dt(fifo); 872 if (rc) 873 goto err_initial; 874 875 reset_ip_core(fifo); 876 877 /* ---------------------------- 878 * init device interrupts 879 * ---------------------------- 880 */ 881 882 /* get IRQ resource */ 883 r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 884 if (!r_irq) { 885 dev_err(fifo->dt_device, "no IRQ found for 0x%pa\n", 886 &r_mem->start); 887 rc = -EIO; 888 goto err_initial; 889 } 890 891 /* request IRQ */ 892 fifo->irq = r_irq->start; 893 rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0, 894 DRIVER_NAME, fifo); 895 if (rc) { 896 dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n", 897 fifo->irq); 898 goto err_initial; 899 } 900 901 /* ---------------------------- 902 * init char device 903 * ---------------------------- 904 */ 905 906 /* allocate device number */ 907 rc = alloc_chrdev_region(&fifo->devt, 0, 1, DRIVER_NAME); 908 if (rc < 0) 909 goto err_initial; 910 dev_dbg(fifo->dt_device, "allocated device number major %i minor %i\n", 911 MAJOR(fifo->devt), MINOR(fifo->devt)); 912 913 /* create driver file */ 914 fifo->device = device_create(axis_fifo_driver_class, NULL, fifo->devt, 915 NULL, device_name); 916 if (IS_ERR(fifo->device)) { 917 dev_err(fifo->dt_device, 918 "couldn't create driver file\n"); 919 rc = PTR_ERR(fifo->device); 920 goto err_chrdev_region; 921 } 922 dev_set_drvdata(fifo->device, fifo); 923 924 /* create character device */ 925 cdev_init(&fifo->char_device, &fops); 926 rc = cdev_add(&fifo->char_device, fifo->devt, 1); 927 if (rc < 0) { 928 dev_err(fifo->dt_device, "couldn't create character device\n"); 929 goto err_dev; 930 } 931 932 /* create sysfs entries */ 933 rc = devm_device_add_group(fifo->device, &axis_fifo_attrs_group); 934 if (rc < 0) { 935 dev_err(fifo->dt_device, "couldn't register sysfs group\n"); 936 goto err_cdev; 937 } 938 939 dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i, major=%i, minor=%i\n", 940 &r_mem->start, &fifo->base_addr, fifo->irq, 941 MAJOR(fifo->devt), MINOR(fifo->devt)); 942 943 return 0; 944 945 err_cdev: 946 cdev_del(&fifo->char_device); 947 err_dev: 948 device_destroy(axis_fifo_driver_class, fifo->devt); 949 err_chrdev_region: 950 unregister_chrdev_region(fifo->devt, 1); 951 err_initial: 952 dev_set_drvdata(dev, NULL); 953 return rc; 954 } 955 956 static int axis_fifo_remove(struct platform_device *pdev) 957 { 958 struct device *dev = &pdev->dev; 959 struct axis_fifo *fifo = dev_get_drvdata(dev); 960 961 cdev_del(&fifo->char_device); 962 dev_set_drvdata(fifo->device, NULL); 963 device_destroy(axis_fifo_driver_class, fifo->devt); 964 unregister_chrdev_region(fifo->devt, 1); 965 dev_set_drvdata(dev, NULL); 966 967 return 0; 968 } 969 970 static const struct of_device_id axis_fifo_of_match[] = { 971 { .compatible = "xlnx,axi-fifo-mm-s-4.1", }, 972 {}, 973 }; 974 MODULE_DEVICE_TABLE(of, axis_fifo_of_match); 975 976 static struct platform_driver axis_fifo_driver = { 977 .driver = { 978 .name = DRIVER_NAME, 979 .of_match_table = axis_fifo_of_match, 980 }, 981 .probe = axis_fifo_probe, 982 .remove = axis_fifo_remove, 983 }; 984 985 static int __init axis_fifo_init(void) 986 { 987 pr_info("axis-fifo driver loaded with parameters read_timeout = %i, write_timeout = %i\n", 988 read_timeout, write_timeout); 989 axis_fifo_driver_class = class_create(THIS_MODULE, DRIVER_NAME); 990 if (IS_ERR(axis_fifo_driver_class)) 991 return PTR_ERR(axis_fifo_driver_class); 992 return platform_driver_register(&axis_fifo_driver); 993 } 994 995 module_init(axis_fifo_init); 996 997 static void __exit axis_fifo_exit(void) 998 { 999 platform_driver_unregister(&axis_fifo_driver); 1000 class_destroy(axis_fifo_driver_class); 1001 } 1002 1003 module_exit(axis_fifo_exit); 1004 1005 MODULE_LICENSE("GPL"); 1006 MODULE_AUTHOR("Jacob Feder <jacobsfeder@gmail.com>"); 1007 MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO v4.1 IP core driver"); 1008