1 /* 2 * Copyright © 2009 - Maxim Levitsky 3 * driver for Ricoh xD readers 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #define DRV_NAME "r852" 11 #define pr_fmt(fmt) DRV_NAME ": " fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/jiffies.h> 16 #include <linux/workqueue.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/pci_ids.h> 20 #include <linux/delay.h> 21 #include <linux/slab.h> 22 #include <asm/byteorder.h> 23 #include <linux/sched.h> 24 #include "sm_common.h" 25 #include "r852.h" 26 27 28 static bool r852_enable_dma = 1; 29 module_param(r852_enable_dma, bool, S_IRUGO); 30 MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); 31 32 static int debug; 33 module_param(debug, int, S_IRUGO | S_IWUSR); 34 MODULE_PARM_DESC(debug, "Debug level (0-2)"); 35 36 /* read register */ 37 static inline uint8_t r852_read_reg(struct r852_device *dev, int address) 38 { 39 uint8_t reg = readb(dev->mmio + address); 40 return reg; 41 } 42 43 /* write register */ 44 static inline void r852_write_reg(struct r852_device *dev, 45 int address, uint8_t value) 46 { 47 writeb(value, dev->mmio + address); 48 } 49 50 51 /* read dword sized register */ 52 static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) 53 { 54 uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); 55 return reg; 56 } 57 58 /* write dword sized register */ 59 static inline void r852_write_reg_dword(struct r852_device *dev, 60 int address, uint32_t value) 61 { 62 writel(cpu_to_le32(value), dev->mmio + address); 63 } 64 65 /* returns pointer to our private structure */ 66 static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) 67 { 68 struct nand_chip *chip = mtd_to_nand(mtd); 69 return nand_get_controller_data(chip); 70 } 71 72 73 /* check if controller supports dma */ 74 static void r852_dma_test(struct r852_device *dev) 75 { 76 dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & 77 (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); 78 79 if (!dev->dma_usable) 80 message("Non dma capable device detected, dma disabled"); 81 82 if (!r852_enable_dma) { 83 message("disabling dma on user request"); 84 dev->dma_usable = 0; 85 } 86 } 87 88 /* 89 * Enable dma. Enables ether first or second stage of the DMA, 90 * Expects dev->dma_dir and dev->dma_state be set 91 */ 92 static void r852_dma_enable(struct r852_device *dev) 93 { 94 uint8_t dma_reg, dma_irq_reg; 95 96 /* Set up dma settings */ 97 dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); 98 dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); 99 100 if (dev->dma_dir) 101 dma_reg |= R852_DMA_READ; 102 103 if (dev->dma_state == DMA_INTERNAL) { 104 dma_reg |= R852_DMA_INTERNAL; 105 /* Precaution to make sure HW doesn't write */ 106 /* to random kernel memory */ 107 r852_write_reg_dword(dev, R852_DMA_ADDR, 108 cpu_to_le32(dev->phys_bounce_buffer)); 109 } else { 110 dma_reg |= R852_DMA_MEMORY; 111 r852_write_reg_dword(dev, R852_DMA_ADDR, 112 cpu_to_le32(dev->phys_dma_addr)); 113 } 114 115 /* Precaution: make sure write reached the device */ 116 r852_read_reg_dword(dev, R852_DMA_ADDR); 117 118 r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); 119 120 /* Set dma irq */ 121 dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); 122 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 123 dma_irq_reg | 124 R852_DMA_IRQ_INTERNAL | 125 R852_DMA_IRQ_ERROR | 126 R852_DMA_IRQ_MEMORY); 127 } 128 129 /* 130 * Disable dma, called from the interrupt handler, which specifies 131 * success of the operation via 'error' argument 132 */ 133 static void r852_dma_done(struct r852_device *dev, int error) 134 { 135 WARN_ON(dev->dma_stage == 0); 136 137 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, 138 r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); 139 140 r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); 141 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); 142 143 /* Precaution to make sure HW doesn't write to random kernel memory */ 144 r852_write_reg_dword(dev, R852_DMA_ADDR, 145 cpu_to_le32(dev->phys_bounce_buffer)); 146 r852_read_reg_dword(dev, R852_DMA_ADDR); 147 148 dev->dma_error = error; 149 dev->dma_stage = 0; 150 151 if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) 152 dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr, 153 R852_DMA_LEN, 154 dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 155 } 156 157 /* 158 * Wait, till dma is done, which includes both phases of it 159 */ 160 static int r852_dma_wait(struct r852_device *dev) 161 { 162 long timeout = wait_for_completion_timeout(&dev->dma_done, 163 msecs_to_jiffies(1000)); 164 if (!timeout) { 165 dbg("timeout waiting for DMA interrupt"); 166 return -ETIMEDOUT; 167 } 168 169 return 0; 170 } 171 172 /* 173 * Read/Write one page using dma. Only pages can be read (512 bytes) 174 */ 175 static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) 176 { 177 int bounce = 0; 178 unsigned long flags; 179 int error; 180 181 dev->dma_error = 0; 182 183 /* Set dma direction */ 184 dev->dma_dir = do_read; 185 dev->dma_stage = 1; 186 reinit_completion(&dev->dma_done); 187 188 dbg_verbose("doing dma %s ", do_read ? "read" : "write"); 189 190 /* Set initial dma state: for reading first fill on board buffer, 191 from device, for writes first fill the buffer from memory*/ 192 dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; 193 194 /* if incoming buffer is not page aligned, we should do bounce */ 195 if ((unsigned long)buf & (R852_DMA_LEN-1)) 196 bounce = 1; 197 198 if (!bounce) { 199 dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf, 200 R852_DMA_LEN, 201 do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 202 if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr)) 203 bounce = 1; 204 } 205 206 if (bounce) { 207 dbg_verbose("dma: using bounce buffer"); 208 dev->phys_dma_addr = dev->phys_bounce_buffer; 209 if (!do_read) 210 memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); 211 } 212 213 /* Enable DMA */ 214 spin_lock_irqsave(&dev->irqlock, flags); 215 r852_dma_enable(dev); 216 spin_unlock_irqrestore(&dev->irqlock, flags); 217 218 /* Wait till complete */ 219 error = r852_dma_wait(dev); 220 221 if (error) { 222 r852_dma_done(dev, error); 223 return; 224 } 225 226 if (do_read && bounce) 227 memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); 228 } 229 230 /* 231 * Program data lines of the nand chip to send data to it 232 */ 233 static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) 234 { 235 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 236 uint32_t reg; 237 238 /* Don't allow any access to hardware if we suspect card removal */ 239 if (dev->card_unstable) 240 return; 241 242 /* Special case for whole sector read */ 243 if (len == R852_DMA_LEN && dev->dma_usable) { 244 r852_do_dma(dev, (uint8_t *)buf, 0); 245 return; 246 } 247 248 /* write DWORD chinks - faster */ 249 while (len >= 4) { 250 reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; 251 r852_write_reg_dword(dev, R852_DATALINE, reg); 252 buf += 4; 253 len -= 4; 254 255 } 256 257 /* write rest */ 258 while (len > 0) { 259 r852_write_reg(dev, R852_DATALINE, *buf++); 260 len--; 261 } 262 } 263 264 /* 265 * Read data lines of the nand chip to retrieve data 266 */ 267 static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 268 { 269 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 270 uint32_t reg; 271 272 if (dev->card_unstable) { 273 /* since we can't signal error here, at least, return 274 predictable buffer */ 275 memset(buf, 0, len); 276 return; 277 } 278 279 /* special case for whole sector read */ 280 if (len == R852_DMA_LEN && dev->dma_usable) { 281 r852_do_dma(dev, buf, 1); 282 return; 283 } 284 285 /* read in dword sized chunks */ 286 while (len >= 4) { 287 288 reg = r852_read_reg_dword(dev, R852_DATALINE); 289 *buf++ = reg & 0xFF; 290 *buf++ = (reg >> 8) & 0xFF; 291 *buf++ = (reg >> 16) & 0xFF; 292 *buf++ = (reg >> 24) & 0xFF; 293 len -= 4; 294 } 295 296 /* read the reset by bytes */ 297 while (len--) 298 *buf++ = r852_read_reg(dev, R852_DATALINE); 299 } 300 301 /* 302 * Read one byte from nand chip 303 */ 304 static uint8_t r852_read_byte(struct nand_chip *chip) 305 { 306 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 307 308 /* Same problem as in r852_read_buf.... */ 309 if (dev->card_unstable) 310 return 0; 311 312 return r852_read_reg(dev, R852_DATALINE); 313 } 314 315 /* 316 * Control several chip lines & send commands 317 */ 318 static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl) 319 { 320 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 321 322 if (dev->card_unstable) 323 return; 324 325 if (ctrl & NAND_CTRL_CHANGE) { 326 327 dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | 328 R852_CTL_ON | R852_CTL_CARDENABLE); 329 330 if (ctrl & NAND_ALE) 331 dev->ctlreg |= R852_CTL_DATA; 332 333 if (ctrl & NAND_CLE) 334 dev->ctlreg |= R852_CTL_COMMAND; 335 336 if (ctrl & NAND_NCE) 337 dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); 338 else 339 dev->ctlreg &= ~R852_CTL_WRITE; 340 341 /* when write is stareted, enable write access */ 342 if (dat == NAND_CMD_ERASE1) 343 dev->ctlreg |= R852_CTL_WRITE; 344 345 r852_write_reg(dev, R852_CTL, dev->ctlreg); 346 } 347 348 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need 349 to set write mode */ 350 if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { 351 dev->ctlreg |= R852_CTL_WRITE; 352 r852_write_reg(dev, R852_CTL, dev->ctlreg); 353 } 354 355 if (dat != NAND_CMD_NONE) 356 r852_write_reg(dev, R852_DATALINE, dat); 357 } 358 359 /* 360 * Wait till card is ready. 361 * based on nand_wait, but returns errors on DMA error 362 */ 363 static int r852_wait(struct nand_chip *chip) 364 { 365 struct r852_device *dev = nand_get_controller_data(chip); 366 367 unsigned long timeout; 368 u8 status; 369 370 timeout = jiffies + msecs_to_jiffies(400); 371 372 while (time_before(jiffies, timeout)) 373 if (chip->legacy.dev_ready(chip)) 374 break; 375 376 nand_status_op(chip, &status); 377 378 /* Unfortunelly, no way to send detailed error status... */ 379 if (dev->dma_error) { 380 status |= NAND_STATUS_FAIL; 381 dev->dma_error = 0; 382 } 383 return status; 384 } 385 386 /* 387 * Check if card is ready 388 */ 389 390 static int r852_ready(struct nand_chip *chip) 391 { 392 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 393 return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); 394 } 395 396 397 /* 398 * Set ECC engine mode 399 */ 400 401 static void r852_ecc_hwctl(struct nand_chip *chip, int mode) 402 { 403 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 404 405 if (dev->card_unstable) 406 return; 407 408 switch (mode) { 409 case NAND_ECC_READ: 410 case NAND_ECC_WRITE: 411 /* enable ecc generation/check*/ 412 dev->ctlreg |= R852_CTL_ECC_ENABLE; 413 414 /* flush ecc buffer */ 415 r852_write_reg(dev, R852_CTL, 416 dev->ctlreg | R852_CTL_ECC_ACCESS); 417 418 r852_read_reg_dword(dev, R852_DATALINE); 419 r852_write_reg(dev, R852_CTL, dev->ctlreg); 420 return; 421 422 case NAND_ECC_READSYN: 423 /* disable ecc generation */ 424 dev->ctlreg &= ~R852_CTL_ECC_ENABLE; 425 r852_write_reg(dev, R852_CTL, dev->ctlreg); 426 } 427 } 428 429 /* 430 * Calculate ECC, only used for writes 431 */ 432 433 static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat, 434 uint8_t *ecc_code) 435 { 436 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 437 struct sm_oob *oob = (struct sm_oob *)ecc_code; 438 uint32_t ecc1, ecc2; 439 440 if (dev->card_unstable) 441 return 0; 442 443 dev->ctlreg &= ~R852_CTL_ECC_ENABLE; 444 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); 445 446 ecc1 = r852_read_reg_dword(dev, R852_DATALINE); 447 ecc2 = r852_read_reg_dword(dev, R852_DATALINE); 448 449 oob->ecc1[0] = (ecc1) & 0xFF; 450 oob->ecc1[1] = (ecc1 >> 8) & 0xFF; 451 oob->ecc1[2] = (ecc1 >> 16) & 0xFF; 452 453 oob->ecc2[0] = (ecc2) & 0xFF; 454 oob->ecc2[1] = (ecc2 >> 8) & 0xFF; 455 oob->ecc2[2] = (ecc2 >> 16) & 0xFF; 456 457 r852_write_reg(dev, R852_CTL, dev->ctlreg); 458 return 0; 459 } 460 461 /* 462 * Correct the data using ECC, hw did almost everything for us 463 */ 464 465 static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat, 466 uint8_t *read_ecc, uint8_t *calc_ecc) 467 { 468 uint32_t ecc_reg; 469 uint8_t ecc_status, err_byte; 470 int i, error = 0; 471 472 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 473 474 if (dev->card_unstable) 475 return 0; 476 477 if (dev->dma_error) { 478 dev->dma_error = 0; 479 return -EIO; 480 } 481 482 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); 483 ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); 484 r852_write_reg(dev, R852_CTL, dev->ctlreg); 485 486 for (i = 0 ; i <= 1 ; i++) { 487 488 ecc_status = (ecc_reg >> 8) & 0xFF; 489 490 /* ecc uncorrectable error */ 491 if (ecc_status & R852_ECC_FAIL) { 492 dbg("ecc: unrecoverable error, in half %d", i); 493 error = -EBADMSG; 494 goto exit; 495 } 496 497 /* correctable error */ 498 if (ecc_status & R852_ECC_CORRECTABLE) { 499 500 err_byte = ecc_reg & 0xFF; 501 dbg("ecc: recoverable error, " 502 "in half %d, byte %d, bit %d", i, 503 err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); 504 505 dat[err_byte] ^= 506 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); 507 error++; 508 } 509 510 dat += 256; 511 ecc_reg >>= 16; 512 } 513 exit: 514 return error; 515 } 516 517 /* 518 * This is copy of nand_read_oob_std 519 * nand_read_oob_syndrome assumes we can send column address - we can't 520 */ 521 static int r852_read_oob(struct nand_chip *chip, int page) 522 { 523 struct mtd_info *mtd = nand_to_mtd(chip); 524 525 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 526 } 527 528 /* 529 * Start the nand engine 530 */ 531 532 static void r852_engine_enable(struct r852_device *dev) 533 { 534 if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { 535 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); 536 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); 537 } else { 538 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); 539 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); 540 } 541 msleep(300); 542 r852_write_reg(dev, R852_CTL, 0); 543 } 544 545 546 /* 547 * Stop the nand engine 548 */ 549 550 static void r852_engine_disable(struct r852_device *dev) 551 { 552 r852_write_reg_dword(dev, R852_HW, 0); 553 r852_write_reg(dev, R852_CTL, R852_CTL_RESET); 554 } 555 556 /* 557 * Test if card is present 558 */ 559 560 static void r852_card_update_present(struct r852_device *dev) 561 { 562 unsigned long flags; 563 uint8_t reg; 564 565 spin_lock_irqsave(&dev->irqlock, flags); 566 reg = r852_read_reg(dev, R852_CARD_STA); 567 dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); 568 spin_unlock_irqrestore(&dev->irqlock, flags); 569 } 570 571 /* 572 * Update card detection IRQ state according to current card state 573 * which is read in r852_card_update_present 574 */ 575 static void r852_update_card_detect(struct r852_device *dev) 576 { 577 int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); 578 dev->card_unstable = 0; 579 580 card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); 581 card_detect_reg |= R852_CARD_IRQ_GENABLE; 582 583 card_detect_reg |= dev->card_detected ? 584 R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; 585 586 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); 587 } 588 589 static ssize_t r852_media_type_show(struct device *sys_dev, 590 struct device_attribute *attr, char *buf) 591 { 592 struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); 593 struct r852_device *dev = r852_get_dev(mtd); 594 char *data = dev->sm ? "smartmedia" : "xd"; 595 596 strcpy(buf, data); 597 return strlen(data); 598 } 599 600 static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); 601 602 603 /* Detect properties of card in slot */ 604 static void r852_update_media_status(struct r852_device *dev) 605 { 606 uint8_t reg; 607 unsigned long flags; 608 int readonly; 609 610 spin_lock_irqsave(&dev->irqlock, flags); 611 if (!dev->card_detected) { 612 message("card removed"); 613 spin_unlock_irqrestore(&dev->irqlock, flags); 614 return ; 615 } 616 617 readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; 618 reg = r852_read_reg(dev, R852_DMA_CAP); 619 dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); 620 621 message("detected %s %s card in slot", 622 dev->sm ? "SmartMedia" : "xD", 623 readonly ? "readonly" : "writeable"); 624 625 dev->readonly = readonly; 626 spin_unlock_irqrestore(&dev->irqlock, flags); 627 } 628 629 /* 630 * Register the nand device 631 * Called when the card is detected 632 */ 633 static int r852_register_nand_device(struct r852_device *dev) 634 { 635 struct mtd_info *mtd = nand_to_mtd(dev->chip); 636 637 WARN_ON(dev->card_registered); 638 639 mtd->dev.parent = &dev->pci_dev->dev; 640 641 if (dev->readonly) 642 dev->chip->options |= NAND_ROM; 643 644 r852_engine_enable(dev); 645 646 if (sm_register_device(mtd, dev->sm)) 647 goto error1; 648 649 if (device_create_file(&mtd->dev, &dev_attr_media_type)) { 650 message("can't create media type sysfs attribute"); 651 goto error3; 652 } 653 654 dev->card_registered = 1; 655 return 0; 656 error3: 657 nand_release(dev->chip); 658 error1: 659 /* Force card redetect */ 660 dev->card_detected = 0; 661 return -1; 662 } 663 664 /* 665 * Unregister the card 666 */ 667 668 static void r852_unregister_nand_device(struct r852_device *dev) 669 { 670 struct mtd_info *mtd = nand_to_mtd(dev->chip); 671 672 if (!dev->card_registered) 673 return; 674 675 device_remove_file(&mtd->dev, &dev_attr_media_type); 676 nand_release(dev->chip); 677 r852_engine_disable(dev); 678 dev->card_registered = 0; 679 } 680 681 /* Card state updater */ 682 static void r852_card_detect_work(struct work_struct *work) 683 { 684 struct r852_device *dev = 685 container_of(work, struct r852_device, card_detect_work.work); 686 687 r852_card_update_present(dev); 688 r852_update_card_detect(dev); 689 dev->card_unstable = 0; 690 691 /* False alarm */ 692 if (dev->card_detected == dev->card_registered) 693 goto exit; 694 695 /* Read media properties */ 696 r852_update_media_status(dev); 697 698 /* Register the card */ 699 if (dev->card_detected) 700 r852_register_nand_device(dev); 701 else 702 r852_unregister_nand_device(dev); 703 exit: 704 r852_update_card_detect(dev); 705 } 706 707 /* Ack + disable IRQ generation */ 708 static void r852_disable_irqs(struct r852_device *dev) 709 { 710 uint8_t reg; 711 reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); 712 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); 713 714 reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); 715 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 716 reg & ~R852_DMA_IRQ_MASK); 717 718 r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); 719 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); 720 } 721 722 /* Interrupt handler */ 723 static irqreturn_t r852_irq(int irq, void *data) 724 { 725 struct r852_device *dev = (struct r852_device *)data; 726 727 uint8_t card_status, dma_status; 728 unsigned long flags; 729 irqreturn_t ret = IRQ_NONE; 730 731 spin_lock_irqsave(&dev->irqlock, flags); 732 733 /* handle card detection interrupts first */ 734 card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); 735 r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); 736 737 if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { 738 739 ret = IRQ_HANDLED; 740 dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); 741 742 /* we shouldn't receive any interrupts if we wait for card 743 to settle */ 744 WARN_ON(dev->card_unstable); 745 746 /* disable irqs while card is unstable */ 747 /* this will timeout DMA if active, but better that garbage */ 748 r852_disable_irqs(dev); 749 750 if (dev->card_unstable) 751 goto out; 752 753 /* let, card state to settle a bit, and then do the work */ 754 dev->card_unstable = 1; 755 queue_delayed_work(dev->card_workqueue, 756 &dev->card_detect_work, msecs_to_jiffies(100)); 757 goto out; 758 } 759 760 761 /* Handle dma interrupts */ 762 dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); 763 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); 764 765 if (dma_status & R852_DMA_IRQ_MASK) { 766 767 ret = IRQ_HANDLED; 768 769 if (dma_status & R852_DMA_IRQ_ERROR) { 770 dbg("received dma error IRQ"); 771 r852_dma_done(dev, -EIO); 772 complete(&dev->dma_done); 773 goto out; 774 } 775 776 /* received DMA interrupt out of nowhere? */ 777 WARN_ON_ONCE(dev->dma_stage == 0); 778 779 if (dev->dma_stage == 0) 780 goto out; 781 782 /* done device access */ 783 if (dev->dma_state == DMA_INTERNAL && 784 (dma_status & R852_DMA_IRQ_INTERNAL)) { 785 786 dev->dma_state = DMA_MEMORY; 787 dev->dma_stage++; 788 } 789 790 /* done memory DMA */ 791 if (dev->dma_state == DMA_MEMORY && 792 (dma_status & R852_DMA_IRQ_MEMORY)) { 793 dev->dma_state = DMA_INTERNAL; 794 dev->dma_stage++; 795 } 796 797 /* Enable 2nd half of dma dance */ 798 if (dev->dma_stage == 2) 799 r852_dma_enable(dev); 800 801 /* Operation done */ 802 if (dev->dma_stage == 3) { 803 r852_dma_done(dev, 0); 804 complete(&dev->dma_done); 805 } 806 goto out; 807 } 808 809 /* Handle unknown interrupts */ 810 if (dma_status) 811 dbg("bad dma IRQ status = %x", dma_status); 812 813 if (card_status & ~R852_CARD_STA_CD) 814 dbg("strange card status = %x", card_status); 815 816 out: 817 spin_unlock_irqrestore(&dev->irqlock, flags); 818 return ret; 819 } 820 821 static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 822 { 823 int error; 824 struct nand_chip *chip; 825 struct r852_device *dev; 826 827 /* pci initialization */ 828 error = pci_enable_device(pci_dev); 829 830 if (error) 831 goto error1; 832 833 pci_set_master(pci_dev); 834 835 error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32)); 836 if (error) 837 goto error2; 838 839 error = pci_request_regions(pci_dev, DRV_NAME); 840 841 if (error) 842 goto error3; 843 844 error = -ENOMEM; 845 846 /* init nand chip, but register it only on card insert */ 847 chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); 848 849 if (!chip) 850 goto error4; 851 852 /* commands */ 853 chip->legacy.cmd_ctrl = r852_cmdctl; 854 chip->legacy.waitfunc = r852_wait; 855 chip->legacy.dev_ready = r852_ready; 856 857 /* I/O */ 858 chip->legacy.read_byte = r852_read_byte; 859 chip->legacy.read_buf = r852_read_buf; 860 chip->legacy.write_buf = r852_write_buf; 861 862 /* ecc */ 863 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 864 chip->ecc.size = R852_DMA_LEN; 865 chip->ecc.bytes = SM_OOB_SIZE; 866 chip->ecc.strength = 2; 867 chip->ecc.hwctl = r852_ecc_hwctl; 868 chip->ecc.calculate = r852_ecc_calculate; 869 chip->ecc.correct = r852_ecc_correct; 870 871 /* TODO: hack */ 872 chip->ecc.read_oob = r852_read_oob; 873 874 /* init our device structure */ 875 dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); 876 877 if (!dev) 878 goto error5; 879 880 nand_set_controller_data(chip, dev); 881 dev->chip = chip; 882 dev->pci_dev = pci_dev; 883 pci_set_drvdata(pci_dev, dev); 884 885 dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN, 886 &dev->phys_bounce_buffer, GFP_KERNEL); 887 888 if (!dev->bounce_buffer) 889 goto error6; 890 891 892 error = -ENODEV; 893 dev->mmio = pci_ioremap_bar(pci_dev, 0); 894 895 if (!dev->mmio) 896 goto error7; 897 898 error = -ENOMEM; 899 dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); 900 901 if (!dev->tmp_buffer) 902 goto error8; 903 904 init_completion(&dev->dma_done); 905 906 dev->card_workqueue = create_freezable_workqueue(DRV_NAME); 907 908 if (!dev->card_workqueue) 909 goto error9; 910 911 INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); 912 913 /* shutdown everything - precation */ 914 r852_engine_disable(dev); 915 r852_disable_irqs(dev); 916 917 r852_dma_test(dev); 918 919 dev->irq = pci_dev->irq; 920 spin_lock_init(&dev->irqlock); 921 922 dev->card_detected = 0; 923 r852_card_update_present(dev); 924 925 /*register irq handler*/ 926 error = -ENODEV; 927 if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, 928 DRV_NAME, dev)) 929 goto error10; 930 931 /* kick initial present test */ 932 queue_delayed_work(dev->card_workqueue, 933 &dev->card_detect_work, 0); 934 935 936 pr_notice("driver loaded successfully\n"); 937 return 0; 938 939 error10: 940 destroy_workqueue(dev->card_workqueue); 941 error9: 942 kfree(dev->tmp_buffer); 943 error8: 944 pci_iounmap(pci_dev, dev->mmio); 945 error7: 946 dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer, 947 dev->phys_bounce_buffer); 948 error6: 949 kfree(dev); 950 error5: 951 kfree(chip); 952 error4: 953 pci_release_regions(pci_dev); 954 error3: 955 error2: 956 pci_disable_device(pci_dev); 957 error1: 958 return error; 959 } 960 961 static void r852_remove(struct pci_dev *pci_dev) 962 { 963 struct r852_device *dev = pci_get_drvdata(pci_dev); 964 965 /* Stop detect workqueue - 966 we are going to unregister the device anyway*/ 967 cancel_delayed_work_sync(&dev->card_detect_work); 968 destroy_workqueue(dev->card_workqueue); 969 970 /* Unregister the device, this might make more IO */ 971 r852_unregister_nand_device(dev); 972 973 /* Stop interrupts */ 974 r852_disable_irqs(dev); 975 free_irq(dev->irq, dev); 976 977 /* Cleanup */ 978 kfree(dev->tmp_buffer); 979 pci_iounmap(pci_dev, dev->mmio); 980 dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer, 981 dev->phys_bounce_buffer); 982 983 kfree(dev->chip); 984 kfree(dev); 985 986 /* Shutdown the PCI device */ 987 pci_release_regions(pci_dev); 988 pci_disable_device(pci_dev); 989 } 990 991 static void r852_shutdown(struct pci_dev *pci_dev) 992 { 993 struct r852_device *dev = pci_get_drvdata(pci_dev); 994 995 cancel_delayed_work_sync(&dev->card_detect_work); 996 r852_disable_irqs(dev); 997 synchronize_irq(dev->irq); 998 pci_disable_device(pci_dev); 999 } 1000 1001 #ifdef CONFIG_PM_SLEEP 1002 static int r852_suspend(struct device *device) 1003 { 1004 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1005 1006 if (dev->ctlreg & R852_CTL_CARDENABLE) 1007 return -EBUSY; 1008 1009 /* First make sure the detect work is gone */ 1010 cancel_delayed_work_sync(&dev->card_detect_work); 1011 1012 /* Turn off the interrupts and stop the device */ 1013 r852_disable_irqs(dev); 1014 r852_engine_disable(dev); 1015 1016 /* If card was pulled off just during the suspend, which is very 1017 unlikely, we will remove it on resume, it too late now 1018 anyway... */ 1019 dev->card_unstable = 0; 1020 return 0; 1021 } 1022 1023 static int r852_resume(struct device *device) 1024 { 1025 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1026 1027 r852_disable_irqs(dev); 1028 r852_card_update_present(dev); 1029 r852_engine_disable(dev); 1030 1031 1032 /* If card status changed, just do the work */ 1033 if (dev->card_detected != dev->card_registered) { 1034 dbg("card was %s during low power state", 1035 dev->card_detected ? "added" : "removed"); 1036 1037 queue_delayed_work(dev->card_workqueue, 1038 &dev->card_detect_work, msecs_to_jiffies(1000)); 1039 return 0; 1040 } 1041 1042 /* Otherwise, initialize the card */ 1043 if (dev->card_registered) { 1044 r852_engine_enable(dev); 1045 nand_select_target(dev->chip, 0); 1046 nand_reset_op(dev->chip); 1047 nand_deselect_target(dev->chip); 1048 } 1049 1050 /* Program card detection IRQ */ 1051 r852_update_card_detect(dev); 1052 return 0; 1053 } 1054 #endif 1055 1056 static const struct pci_device_id r852_pci_id_tbl[] = { 1057 1058 { PCI_VDEVICE(RICOH, 0x0852), }, 1059 { }, 1060 }; 1061 1062 MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); 1063 1064 static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1065 1066 static struct pci_driver r852_pci_driver = { 1067 .name = DRV_NAME, 1068 .id_table = r852_pci_id_tbl, 1069 .probe = r852_probe, 1070 .remove = r852_remove, 1071 .shutdown = r852_shutdown, 1072 .driver.pm = &r852_pm_ops, 1073 }; 1074 1075 module_pci_driver(r852_pci_driver); 1076 1077 MODULE_LICENSE("GPL"); 1078 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1079 MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver"); 1080