1 /* 2 * Common Flash Interface support: 3 * ST Advanced Architecture Command Set (ID 0x0020) 4 * 5 * (C) 2000 Red Hat. GPL'd 6 * 7 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net> 8 * - completely revamped method functions so they are aware and 9 * independent of the flash geometry (buswidth, interleave, etc.) 10 * - scalability vs code size is completely set at compile-time 11 * (see include/linux/mtd/cfi.h for selection) 12 * - optimized write buffer method 13 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others 14 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture 15 * (command set 0x0020) 16 * - added a writev function 17 * 07/13/2005 Joern Engel <joern@wh.fh-wedel.de> 18 * - Plugged memory leak in cfi_staa_writev(). 19 */ 20 21 #include <linux/module.h> 22 #include <linux/types.h> 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <asm/io.h> 26 #include <asm/byteorder.h> 27 28 #include <linux/errno.h> 29 #include <linux/slab.h> 30 #include <linux/delay.h> 31 #include <linux/interrupt.h> 32 #include <linux/mtd/map.h> 33 #include <linux/mtd/cfi.h> 34 #include <linux/mtd/mtd.h> 35 36 37 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); 38 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 39 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, 40 unsigned long count, loff_t to, size_t *retlen); 41 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *); 42 static void cfi_staa_sync (struct mtd_info *); 43 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 44 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 45 static int cfi_staa_suspend (struct mtd_info *); 46 static void cfi_staa_resume (struct mtd_info *); 47 48 static void cfi_staa_destroy(struct mtd_info *); 49 50 struct mtd_info *cfi_cmdset_0020(struct map_info *, int); 51 52 static struct mtd_info *cfi_staa_setup (struct map_info *); 53 54 static struct mtd_chip_driver cfi_staa_chipdrv = { 55 .probe = NULL, /* Not usable directly */ 56 .destroy = cfi_staa_destroy, 57 .name = "cfi_cmdset_0020", 58 .module = THIS_MODULE 59 }; 60 61 /* #define DEBUG_LOCK_BITS */ 62 //#define DEBUG_CFI_FEATURES 63 64 #ifdef DEBUG_CFI_FEATURES 65 static void cfi_tell_features(struct cfi_pri_intelext *extp) 66 { 67 int i; 68 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport); 69 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported"); 70 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported"); 71 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported"); 72 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported"); 73 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported"); 74 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported"); 75 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported"); 76 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); 77 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); 78 for (i=9; i<32; i++) { 79 if (extp->FeatureSupport & (1<<i)) 80 printk(" - Unknown Bit %X: supported\n", i); 81 } 82 83 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); 84 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); 85 for (i=1; i<8; i++) { 86 if (extp->SuspendCmdSupport & (1<<i)) 87 printk(" - Unknown Bit %X: supported\n", i); 88 } 89 90 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); 91 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); 92 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); 93 for (i=2; i<16; i++) { 94 if (extp->BlkStatusRegMask & (1<<i)) 95 printk(" - Unknown Bit %X Active: yes\n",i); 96 } 97 98 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 99 extp->VccOptimal >> 8, extp->VccOptimal & 0xf); 100 if (extp->VppOptimal) 101 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 102 extp->VppOptimal >> 8, extp->VppOptimal & 0xf); 103 } 104 #endif 105 106 /* This routine is made available to other mtd code via 107 * inter_module_register. It must only be accessed through 108 * inter_module_get which will bump the use count of this module. The 109 * addresses passed back in cfi are valid as long as the use count of 110 * this module is non-zero, i.e. between inter_module_get and 111 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000. 112 */ 113 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary) 114 { 115 struct cfi_private *cfi = map->fldrv_priv; 116 int i; 117 118 if (cfi->cfi_mode) { 119 /* 120 * It's a real CFI chip, not one for which the probe 121 * routine faked a CFI structure. So we read the feature 122 * table from it. 123 */ 124 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 125 struct cfi_pri_intelext *extp; 126 127 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics"); 128 if (!extp) 129 return NULL; 130 131 if (extp->MajorVersion != '1' || 132 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) { 133 printk(KERN_ERR " Unknown ST Microelectronics" 134 " Extended Query version %c.%c.\n", 135 extp->MajorVersion, extp->MinorVersion); 136 kfree(extp); 137 return NULL; 138 } 139 140 /* Do some byteswapping if necessary */ 141 extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport); 142 extp->BlkStatusRegMask = cfi32_to_cpu(map, 143 extp->BlkStatusRegMask); 144 145 #ifdef DEBUG_CFI_FEATURES 146 /* Tell the user about it in lots of lovely detail */ 147 cfi_tell_features(extp); 148 #endif 149 150 /* Install our own private info structure */ 151 cfi->cmdset_priv = extp; 152 } 153 154 for (i=0; i< cfi->numchips; i++) { 155 cfi->chips[i].word_write_time = 128; 156 cfi->chips[i].buffer_write_time = 128; 157 cfi->chips[i].erase_time = 1024; 158 cfi->chips[i].ref_point_counter = 0; 159 init_waitqueue_head(&(cfi->chips[i].wq)); 160 } 161 162 return cfi_staa_setup(map); 163 } 164 EXPORT_SYMBOL_GPL(cfi_cmdset_0020); 165 166 static struct mtd_info *cfi_staa_setup(struct map_info *map) 167 { 168 struct cfi_private *cfi = map->fldrv_priv; 169 struct mtd_info *mtd; 170 unsigned long offset = 0; 171 int i,j; 172 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 173 174 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 175 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips); 176 177 if (!mtd) { 178 kfree(cfi->cmdset_priv); 179 return NULL; 180 } 181 182 mtd->priv = map; 183 mtd->type = MTD_NORFLASH; 184 mtd->size = devsize * cfi->numchips; 185 186 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 187 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 188 * mtd->numeraseregions, GFP_KERNEL); 189 if (!mtd->eraseregions) { 190 kfree(cfi->cmdset_priv); 191 kfree(mtd); 192 return NULL; 193 } 194 195 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 196 unsigned long ernum, ersize; 197 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 198 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 199 200 if (mtd->erasesize < ersize) { 201 mtd->erasesize = ersize; 202 } 203 for (j=0; j<cfi->numchips; j++) { 204 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 205 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 206 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 207 } 208 offset += (ersize * ernum); 209 } 210 211 if (offset != devsize) { 212 /* Argh */ 213 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 214 kfree(mtd->eraseregions); 215 kfree(cfi->cmdset_priv); 216 kfree(mtd); 217 return NULL; 218 } 219 220 for (i=0; i<mtd->numeraseregions;i++){ 221 printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n", 222 i, (unsigned long long)mtd->eraseregions[i].offset, 223 mtd->eraseregions[i].erasesize, 224 mtd->eraseregions[i].numblocks); 225 } 226 227 /* Also select the correct geometry setup too */ 228 mtd->_erase = cfi_staa_erase_varsize; 229 mtd->_read = cfi_staa_read; 230 mtd->_write = cfi_staa_write_buffers; 231 mtd->_writev = cfi_staa_writev; 232 mtd->_sync = cfi_staa_sync; 233 mtd->_lock = cfi_staa_lock; 234 mtd->_unlock = cfi_staa_unlock; 235 mtd->_suspend = cfi_staa_suspend; 236 mtd->_resume = cfi_staa_resume; 237 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; 238 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ 239 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 240 map->fldrv = &cfi_staa_chipdrv; 241 __module_get(THIS_MODULE); 242 mtd->name = map->name; 243 return mtd; 244 } 245 246 247 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 248 { 249 map_word status, status_OK; 250 unsigned long timeo; 251 DECLARE_WAITQUEUE(wait, current); 252 int suspended = 0; 253 unsigned long cmd_addr; 254 struct cfi_private *cfi = map->fldrv_priv; 255 256 adr += chip->start; 257 258 /* Ensure cmd read/writes are aligned. */ 259 cmd_addr = adr & ~(map_bankwidth(map)-1); 260 261 /* Let's determine this according to the interleave only once */ 262 status_OK = CMD(0x80); 263 264 timeo = jiffies + HZ; 265 retry: 266 mutex_lock(&chip->mutex); 267 268 /* Check that the chip's ready to talk to us. 269 * If it's in FL_ERASING state, suspend it and make it talk now. 270 */ 271 switch (chip->state) { 272 case FL_ERASING: 273 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)) 274 goto sleep; /* We don't support erase suspend */ 275 276 map_write (map, CMD(0xb0), cmd_addr); 277 /* If the flash has finished erasing, then 'erase suspend' 278 * appears to make some (28F320) flash devices switch to 279 * 'read' mode. Make sure that we switch to 'read status' 280 * mode so we get the right data. --rmk 281 */ 282 map_write(map, CMD(0x70), cmd_addr); 283 chip->oldstate = FL_ERASING; 284 chip->state = FL_ERASE_SUSPENDING; 285 // printk("Erase suspending at 0x%lx\n", cmd_addr); 286 for (;;) { 287 status = map_read(map, cmd_addr); 288 if (map_word_andequal(map, status, status_OK, status_OK)) 289 break; 290 291 if (time_after(jiffies, timeo)) { 292 /* Urgh */ 293 map_write(map, CMD(0xd0), cmd_addr); 294 /* make sure we're in 'read status' mode */ 295 map_write(map, CMD(0x70), cmd_addr); 296 chip->state = FL_ERASING; 297 wake_up(&chip->wq); 298 mutex_unlock(&chip->mutex); 299 printk(KERN_ERR "Chip not ready after erase " 300 "suspended: status = 0x%lx\n", status.x[0]); 301 return -EIO; 302 } 303 304 mutex_unlock(&chip->mutex); 305 cfi_udelay(1); 306 mutex_lock(&chip->mutex); 307 } 308 309 suspended = 1; 310 map_write(map, CMD(0xff), cmd_addr); 311 chip->state = FL_READY; 312 break; 313 314 #if 0 315 case FL_WRITING: 316 /* Not quite yet */ 317 #endif 318 319 case FL_READY: 320 break; 321 322 case FL_CFI_QUERY: 323 case FL_JEDEC_QUERY: 324 map_write(map, CMD(0x70), cmd_addr); 325 chip->state = FL_STATUS; 326 327 case FL_STATUS: 328 status = map_read(map, cmd_addr); 329 if (map_word_andequal(map, status, status_OK, status_OK)) { 330 map_write(map, CMD(0xff), cmd_addr); 331 chip->state = FL_READY; 332 break; 333 } 334 335 /* Urgh. Chip not yet ready to talk to us. */ 336 if (time_after(jiffies, timeo)) { 337 mutex_unlock(&chip->mutex); 338 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); 339 return -EIO; 340 } 341 342 /* Latency issues. Drop the lock, wait a while and retry */ 343 mutex_unlock(&chip->mutex); 344 cfi_udelay(1); 345 goto retry; 346 347 default: 348 sleep: 349 /* Stick ourselves on a wait queue to be woken when 350 someone changes the status */ 351 set_current_state(TASK_UNINTERRUPTIBLE); 352 add_wait_queue(&chip->wq, &wait); 353 mutex_unlock(&chip->mutex); 354 schedule(); 355 remove_wait_queue(&chip->wq, &wait); 356 timeo = jiffies + HZ; 357 goto retry; 358 } 359 360 map_copy_from(map, buf, adr, len); 361 362 if (suspended) { 363 chip->state = chip->oldstate; 364 /* What if one interleaved chip has finished and the 365 other hasn't? The old code would leave the finished 366 one in READY mode. That's bad, and caused -EROFS 367 errors to be returned from do_erase_oneblock because 368 that's the only bit it checked for at the time. 369 As the state machine appears to explicitly allow 370 sending the 0x70 (Read Status) command to an erasing 371 chip and expecting it to be ignored, that's what we 372 do. */ 373 map_write(map, CMD(0xd0), cmd_addr); 374 map_write(map, CMD(0x70), cmd_addr); 375 } 376 377 wake_up(&chip->wq); 378 mutex_unlock(&chip->mutex); 379 return 0; 380 } 381 382 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 383 { 384 struct map_info *map = mtd->priv; 385 struct cfi_private *cfi = map->fldrv_priv; 386 unsigned long ofs; 387 int chipnum; 388 int ret = 0; 389 390 /* ofs: offset within the first chip that the first read should start */ 391 chipnum = (from >> cfi->chipshift); 392 ofs = from - (chipnum << cfi->chipshift); 393 394 while (len) { 395 unsigned long thislen; 396 397 if (chipnum >= cfi->numchips) 398 break; 399 400 if ((len + ofs -1) >> cfi->chipshift) 401 thislen = (1<<cfi->chipshift) - ofs; 402 else 403 thislen = len; 404 405 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 406 if (ret) 407 break; 408 409 *retlen += thislen; 410 len -= thislen; 411 buf += thislen; 412 413 ofs = 0; 414 chipnum++; 415 } 416 return ret; 417 } 418 419 static int do_write_buffer(struct map_info *map, struct flchip *chip, 420 unsigned long adr, const u_char *buf, int len) 421 { 422 struct cfi_private *cfi = map->fldrv_priv; 423 map_word status, status_OK; 424 unsigned long cmd_adr, timeo; 425 DECLARE_WAITQUEUE(wait, current); 426 int wbufsize, z; 427 428 /* M58LW064A requires bus alignment for buffer wriets -- saw */ 429 if (adr & (map_bankwidth(map)-1)) 430 return -EINVAL; 431 432 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 433 adr += chip->start; 434 cmd_adr = adr & ~(wbufsize-1); 435 436 /* Let's determine this according to the interleave only once */ 437 status_OK = CMD(0x80); 438 439 timeo = jiffies + HZ; 440 retry: 441 442 #ifdef DEBUG_CFI_FEATURES 443 printk("%s: chip->state[%d]\n", __func__, chip->state); 444 #endif 445 mutex_lock(&chip->mutex); 446 447 /* Check that the chip's ready to talk to us. 448 * Later, we can actually think about interrupting it 449 * if it's in FL_ERASING state. 450 * Not just yet, though. 451 */ 452 switch (chip->state) { 453 case FL_READY: 454 break; 455 456 case FL_CFI_QUERY: 457 case FL_JEDEC_QUERY: 458 map_write(map, CMD(0x70), cmd_adr); 459 chip->state = FL_STATUS; 460 #ifdef DEBUG_CFI_FEATURES 461 printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr)); 462 #endif 463 464 case FL_STATUS: 465 status = map_read(map, cmd_adr); 466 if (map_word_andequal(map, status, status_OK, status_OK)) 467 break; 468 /* Urgh. Chip not yet ready to talk to us. */ 469 if (time_after(jiffies, timeo)) { 470 mutex_unlock(&chip->mutex); 471 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", 472 status.x[0], map_read(map, cmd_adr).x[0]); 473 return -EIO; 474 } 475 476 /* Latency issues. Drop the lock, wait a while and retry */ 477 mutex_unlock(&chip->mutex); 478 cfi_udelay(1); 479 goto retry; 480 481 default: 482 /* Stick ourselves on a wait queue to be woken when 483 someone changes the status */ 484 set_current_state(TASK_UNINTERRUPTIBLE); 485 add_wait_queue(&chip->wq, &wait); 486 mutex_unlock(&chip->mutex); 487 schedule(); 488 remove_wait_queue(&chip->wq, &wait); 489 timeo = jiffies + HZ; 490 goto retry; 491 } 492 493 ENABLE_VPP(map); 494 map_write(map, CMD(0xe8), cmd_adr); 495 chip->state = FL_WRITING_TO_BUFFER; 496 497 z = 0; 498 for (;;) { 499 status = map_read(map, cmd_adr); 500 if (map_word_andequal(map, status, status_OK, status_OK)) 501 break; 502 503 mutex_unlock(&chip->mutex); 504 cfi_udelay(1); 505 mutex_lock(&chip->mutex); 506 507 if (++z > 100) { 508 /* Argh. Not ready for write to buffer */ 509 DISABLE_VPP(map); 510 map_write(map, CMD(0x70), cmd_adr); 511 chip->state = FL_STATUS; 512 mutex_unlock(&chip->mutex); 513 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); 514 return -EIO; 515 } 516 } 517 518 /* Write length of data to come */ 519 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr ); 520 521 /* Write data */ 522 for (z = 0; z < len; 523 z += map_bankwidth(map), buf += map_bankwidth(map)) { 524 map_word d; 525 d = map_word_load(map, buf); 526 map_write(map, d, adr+z); 527 } 528 /* GO GO GO */ 529 map_write(map, CMD(0xd0), cmd_adr); 530 chip->state = FL_WRITING; 531 532 mutex_unlock(&chip->mutex); 533 cfi_udelay(chip->buffer_write_time); 534 mutex_lock(&chip->mutex); 535 536 timeo = jiffies + (HZ/2); 537 z = 0; 538 for (;;) { 539 if (chip->state != FL_WRITING) { 540 /* Someone's suspended the write. Sleep */ 541 set_current_state(TASK_UNINTERRUPTIBLE); 542 add_wait_queue(&chip->wq, &wait); 543 mutex_unlock(&chip->mutex); 544 schedule(); 545 remove_wait_queue(&chip->wq, &wait); 546 timeo = jiffies + (HZ / 2); /* FIXME */ 547 mutex_lock(&chip->mutex); 548 continue; 549 } 550 551 status = map_read(map, cmd_adr); 552 if (map_word_andequal(map, status, status_OK, status_OK)) 553 break; 554 555 /* OK Still waiting */ 556 if (time_after(jiffies, timeo)) { 557 /* clear status */ 558 map_write(map, CMD(0x50), cmd_adr); 559 /* put back into read status register mode */ 560 map_write(map, CMD(0x70), adr); 561 chip->state = FL_STATUS; 562 DISABLE_VPP(map); 563 mutex_unlock(&chip->mutex); 564 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 565 return -EIO; 566 } 567 568 /* Latency issues. Drop the lock, wait a while and retry */ 569 mutex_unlock(&chip->mutex); 570 cfi_udelay(1); 571 z++; 572 mutex_lock(&chip->mutex); 573 } 574 if (!z) { 575 chip->buffer_write_time--; 576 if (!chip->buffer_write_time) 577 chip->buffer_write_time++; 578 } 579 if (z > 1) 580 chip->buffer_write_time++; 581 582 /* Done and happy. */ 583 DISABLE_VPP(map); 584 chip->state = FL_STATUS; 585 586 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */ 587 if (map_word_bitsset(map, status, CMD(0x3a))) { 588 #ifdef DEBUG_CFI_FEATURES 589 printk("%s: 2 status[%lx]\n", __func__, status.x[0]); 590 #endif 591 /* clear status */ 592 map_write(map, CMD(0x50), cmd_adr); 593 /* put back into read status register mode */ 594 map_write(map, CMD(0x70), adr); 595 wake_up(&chip->wq); 596 mutex_unlock(&chip->mutex); 597 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; 598 } 599 wake_up(&chip->wq); 600 mutex_unlock(&chip->mutex); 601 602 return 0; 603 } 604 605 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, 606 size_t len, size_t *retlen, const u_char *buf) 607 { 608 struct map_info *map = mtd->priv; 609 struct cfi_private *cfi = map->fldrv_priv; 610 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 611 int ret = 0; 612 int chipnum; 613 unsigned long ofs; 614 615 chipnum = to >> cfi->chipshift; 616 ofs = to - (chipnum << cfi->chipshift); 617 618 #ifdef DEBUG_CFI_FEATURES 619 printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map)); 620 printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize); 621 printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len); 622 #endif 623 624 /* Write buffer is worth it only if more than one word to write... */ 625 while (len > 0) { 626 /* We must not cross write block boundaries */ 627 int size = wbufsize - (ofs & (wbufsize-1)); 628 629 if (size > len) 630 size = len; 631 632 ret = do_write_buffer(map, &cfi->chips[chipnum], 633 ofs, buf, size); 634 if (ret) 635 return ret; 636 637 ofs += size; 638 buf += size; 639 (*retlen) += size; 640 len -= size; 641 642 if (ofs >> cfi->chipshift) { 643 chipnum ++; 644 ofs = 0; 645 if (chipnum == cfi->numchips) 646 return 0; 647 } 648 } 649 650 return 0; 651 } 652 653 /* 654 * Writev for ECC-Flashes is a little more complicated. We need to maintain 655 * a small buffer for this. 656 * XXX: If the buffer size is not a multiple of 2, this will break 657 */ 658 #define ECCBUF_SIZE (mtd->writesize) 659 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1)) 660 #define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1)) 661 static int 662 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, 663 unsigned long count, loff_t to, size_t *retlen) 664 { 665 unsigned long i; 666 size_t totlen = 0, thislen; 667 int ret = 0; 668 size_t buflen = 0; 669 char *buffer; 670 671 if (!ECCBUF_SIZE) { 672 /* We should fall back to a general writev implementation. 673 * Until that is written, just break. 674 */ 675 return -EIO; 676 } 677 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL); 678 if (!buffer) 679 return -ENOMEM; 680 681 for (i=0; i<count; i++) { 682 size_t elem_len = vecs[i].iov_len; 683 void *elem_base = vecs[i].iov_base; 684 if (!elem_len) /* FIXME: Might be unnecessary. Check that */ 685 continue; 686 if (buflen) { /* cut off head */ 687 if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */ 688 memcpy(buffer+buflen, elem_base, elem_len); 689 buflen += elem_len; 690 continue; 691 } 692 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen); 693 ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen, 694 buffer); 695 totlen += thislen; 696 if (ret || thislen != ECCBUF_SIZE) 697 goto write_error; 698 elem_len -= thislen-buflen; 699 elem_base += thislen-buflen; 700 to += ECCBUF_SIZE; 701 } 702 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */ 703 ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len), 704 &thislen, elem_base); 705 totlen += thislen; 706 if (ret || thislen != ECCBUF_DIV(elem_len)) 707 goto write_error; 708 to += thislen; 709 } 710 buflen = ECCBUF_MOD(elem_len); /* cut off tail */ 711 if (buflen) { 712 memset(buffer, 0xff, ECCBUF_SIZE); 713 memcpy(buffer, elem_base + thislen, buflen); 714 } 715 } 716 if (buflen) { /* flush last page, even if not full */ 717 /* This is sometimes intended behaviour, really */ 718 ret = mtd_write(mtd, to, buflen, &thislen, buffer); 719 totlen += thislen; 720 if (ret || thislen != ECCBUF_SIZE) 721 goto write_error; 722 } 723 write_error: 724 if (retlen) 725 *retlen = totlen; 726 kfree(buffer); 727 return ret; 728 } 729 730 731 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr) 732 { 733 struct cfi_private *cfi = map->fldrv_priv; 734 map_word status, status_OK; 735 unsigned long timeo; 736 int retries = 3; 737 DECLARE_WAITQUEUE(wait, current); 738 int ret = 0; 739 740 adr += chip->start; 741 742 /* Let's determine this according to the interleave only once */ 743 status_OK = CMD(0x80); 744 745 timeo = jiffies + HZ; 746 retry: 747 mutex_lock(&chip->mutex); 748 749 /* Check that the chip's ready to talk to us. */ 750 switch (chip->state) { 751 case FL_CFI_QUERY: 752 case FL_JEDEC_QUERY: 753 case FL_READY: 754 map_write(map, CMD(0x70), adr); 755 chip->state = FL_STATUS; 756 757 case FL_STATUS: 758 status = map_read(map, adr); 759 if (map_word_andequal(map, status, status_OK, status_OK)) 760 break; 761 762 /* Urgh. Chip not yet ready to talk to us. */ 763 if (time_after(jiffies, timeo)) { 764 mutex_unlock(&chip->mutex); 765 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); 766 return -EIO; 767 } 768 769 /* Latency issues. Drop the lock, wait a while and retry */ 770 mutex_unlock(&chip->mutex); 771 cfi_udelay(1); 772 goto retry; 773 774 default: 775 /* Stick ourselves on a wait queue to be woken when 776 someone changes the status */ 777 set_current_state(TASK_UNINTERRUPTIBLE); 778 add_wait_queue(&chip->wq, &wait); 779 mutex_unlock(&chip->mutex); 780 schedule(); 781 remove_wait_queue(&chip->wq, &wait); 782 timeo = jiffies + HZ; 783 goto retry; 784 } 785 786 ENABLE_VPP(map); 787 /* Clear the status register first */ 788 map_write(map, CMD(0x50), adr); 789 790 /* Now erase */ 791 map_write(map, CMD(0x20), adr); 792 map_write(map, CMD(0xD0), adr); 793 chip->state = FL_ERASING; 794 795 mutex_unlock(&chip->mutex); 796 msleep(1000); 797 mutex_lock(&chip->mutex); 798 799 /* FIXME. Use a timer to check this, and return immediately. */ 800 /* Once the state machine's known to be working I'll do that */ 801 802 timeo = jiffies + (HZ*20); 803 for (;;) { 804 if (chip->state != FL_ERASING) { 805 /* Someone's suspended the erase. Sleep */ 806 set_current_state(TASK_UNINTERRUPTIBLE); 807 add_wait_queue(&chip->wq, &wait); 808 mutex_unlock(&chip->mutex); 809 schedule(); 810 remove_wait_queue(&chip->wq, &wait); 811 timeo = jiffies + (HZ*20); /* FIXME */ 812 mutex_lock(&chip->mutex); 813 continue; 814 } 815 816 status = map_read(map, adr); 817 if (map_word_andequal(map, status, status_OK, status_OK)) 818 break; 819 820 /* OK Still waiting */ 821 if (time_after(jiffies, timeo)) { 822 map_write(map, CMD(0x70), adr); 823 chip->state = FL_STATUS; 824 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 825 DISABLE_VPP(map); 826 mutex_unlock(&chip->mutex); 827 return -EIO; 828 } 829 830 /* Latency issues. Drop the lock, wait a while and retry */ 831 mutex_unlock(&chip->mutex); 832 cfi_udelay(1); 833 mutex_lock(&chip->mutex); 834 } 835 836 DISABLE_VPP(map); 837 ret = 0; 838 839 /* We've broken this before. It doesn't hurt to be safe */ 840 map_write(map, CMD(0x70), adr); 841 chip->state = FL_STATUS; 842 status = map_read(map, adr); 843 844 /* check for lock bit */ 845 if (map_word_bitsset(map, status, CMD(0x3a))) { 846 unsigned char chipstatus = status.x[0]; 847 if (!map_word_equal(map, status, CMD(chipstatus))) { 848 int i, w; 849 for (w=0; w<map_words(map); w++) { 850 for (i = 0; i<cfi_interleave(cfi); i++) { 851 chipstatus |= status.x[w] >> (cfi->device_type * 8); 852 } 853 } 854 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n", 855 status.x[0], chipstatus); 856 } 857 /* Reset the error bits */ 858 map_write(map, CMD(0x50), adr); 859 map_write(map, CMD(0x70), adr); 860 861 if ((chipstatus & 0x30) == 0x30) { 862 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus); 863 ret = -EIO; 864 } else if (chipstatus & 0x02) { 865 /* Protection bit set */ 866 ret = -EROFS; 867 } else if (chipstatus & 0x8) { 868 /* Voltage */ 869 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus); 870 ret = -EIO; 871 } else if (chipstatus & 0x20) { 872 if (retries--) { 873 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); 874 timeo = jiffies + HZ; 875 chip->state = FL_STATUS; 876 mutex_unlock(&chip->mutex); 877 goto retry; 878 } 879 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); 880 ret = -EIO; 881 } 882 } 883 884 wake_up(&chip->wq); 885 mutex_unlock(&chip->mutex); 886 return ret; 887 } 888 889 static int cfi_staa_erase_varsize(struct mtd_info *mtd, 890 struct erase_info *instr) 891 { struct map_info *map = mtd->priv; 892 struct cfi_private *cfi = map->fldrv_priv; 893 unsigned long adr, len; 894 int chipnum, ret = 0; 895 int i, first; 896 struct mtd_erase_region_info *regions = mtd->eraseregions; 897 898 /* Check that both start and end of the requested erase are 899 * aligned with the erasesize at the appropriate addresses. 900 */ 901 902 i = 0; 903 904 /* Skip all erase regions which are ended before the start of 905 the requested erase. Actually, to save on the calculations, 906 we skip to the first erase region which starts after the 907 start of the requested erase, and then go back one. 908 */ 909 910 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset) 911 i++; 912 i--; 913 914 /* OK, now i is pointing at the erase region in which this 915 erase request starts. Check the start of the requested 916 erase range is aligned with the erase size which is in 917 effect here. 918 */ 919 920 if (instr->addr & (regions[i].erasesize-1)) 921 return -EINVAL; 922 923 /* Remember the erase region we start on */ 924 first = i; 925 926 /* Next, check that the end of the requested erase is aligned 927 * with the erase region at that address. 928 */ 929 930 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset) 931 i++; 932 933 /* As before, drop back one to point at the region in which 934 the address actually falls 935 */ 936 i--; 937 938 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) 939 return -EINVAL; 940 941 chipnum = instr->addr >> cfi->chipshift; 942 adr = instr->addr - (chipnum << cfi->chipshift); 943 len = instr->len; 944 945 i=first; 946 947 while(len) { 948 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr); 949 950 if (ret) 951 return ret; 952 953 adr += regions[i].erasesize; 954 len -= regions[i].erasesize; 955 956 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) 957 i++; 958 959 if (adr >> cfi->chipshift) { 960 adr = 0; 961 chipnum++; 962 963 if (chipnum >= cfi->numchips) 964 break; 965 } 966 } 967 968 return 0; 969 } 970 971 static void cfi_staa_sync (struct mtd_info *mtd) 972 { 973 struct map_info *map = mtd->priv; 974 struct cfi_private *cfi = map->fldrv_priv; 975 int i; 976 struct flchip *chip; 977 int ret = 0; 978 DECLARE_WAITQUEUE(wait, current); 979 980 for (i=0; !ret && i<cfi->numchips; i++) { 981 chip = &cfi->chips[i]; 982 983 retry: 984 mutex_lock(&chip->mutex); 985 986 switch(chip->state) { 987 case FL_READY: 988 case FL_STATUS: 989 case FL_CFI_QUERY: 990 case FL_JEDEC_QUERY: 991 chip->oldstate = chip->state; 992 chip->state = FL_SYNCING; 993 /* No need to wake_up() on this state change - 994 * as the whole point is that nobody can do anything 995 * with the chip now anyway. 996 */ 997 case FL_SYNCING: 998 mutex_unlock(&chip->mutex); 999 break; 1000 1001 default: 1002 /* Not an idle state */ 1003 set_current_state(TASK_UNINTERRUPTIBLE); 1004 add_wait_queue(&chip->wq, &wait); 1005 1006 mutex_unlock(&chip->mutex); 1007 schedule(); 1008 remove_wait_queue(&chip->wq, &wait); 1009 1010 goto retry; 1011 } 1012 } 1013 1014 /* Unlock the chips again */ 1015 1016 for (i--; i >=0; i--) { 1017 chip = &cfi->chips[i]; 1018 1019 mutex_lock(&chip->mutex); 1020 1021 if (chip->state == FL_SYNCING) { 1022 chip->state = chip->oldstate; 1023 wake_up(&chip->wq); 1024 } 1025 mutex_unlock(&chip->mutex); 1026 } 1027 } 1028 1029 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr) 1030 { 1031 struct cfi_private *cfi = map->fldrv_priv; 1032 map_word status, status_OK; 1033 unsigned long timeo = jiffies + HZ; 1034 DECLARE_WAITQUEUE(wait, current); 1035 1036 adr += chip->start; 1037 1038 /* Let's determine this according to the interleave only once */ 1039 status_OK = CMD(0x80); 1040 1041 timeo = jiffies + HZ; 1042 retry: 1043 mutex_lock(&chip->mutex); 1044 1045 /* Check that the chip's ready to talk to us. */ 1046 switch (chip->state) { 1047 case FL_CFI_QUERY: 1048 case FL_JEDEC_QUERY: 1049 case FL_READY: 1050 map_write(map, CMD(0x70), adr); 1051 chip->state = FL_STATUS; 1052 1053 case FL_STATUS: 1054 status = map_read(map, adr); 1055 if (map_word_andequal(map, status, status_OK, status_OK)) 1056 break; 1057 1058 /* Urgh. Chip not yet ready to talk to us. */ 1059 if (time_after(jiffies, timeo)) { 1060 mutex_unlock(&chip->mutex); 1061 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); 1062 return -EIO; 1063 } 1064 1065 /* Latency issues. Drop the lock, wait a while and retry */ 1066 mutex_unlock(&chip->mutex); 1067 cfi_udelay(1); 1068 goto retry; 1069 1070 default: 1071 /* Stick ourselves on a wait queue to be woken when 1072 someone changes the status */ 1073 set_current_state(TASK_UNINTERRUPTIBLE); 1074 add_wait_queue(&chip->wq, &wait); 1075 mutex_unlock(&chip->mutex); 1076 schedule(); 1077 remove_wait_queue(&chip->wq, &wait); 1078 timeo = jiffies + HZ; 1079 goto retry; 1080 } 1081 1082 ENABLE_VPP(map); 1083 map_write(map, CMD(0x60), adr); 1084 map_write(map, CMD(0x01), adr); 1085 chip->state = FL_LOCKING; 1086 1087 mutex_unlock(&chip->mutex); 1088 msleep(1000); 1089 mutex_lock(&chip->mutex); 1090 1091 /* FIXME. Use a timer to check this, and return immediately. */ 1092 /* Once the state machine's known to be working I'll do that */ 1093 1094 timeo = jiffies + (HZ*2); 1095 for (;;) { 1096 1097 status = map_read(map, adr); 1098 if (map_word_andequal(map, status, status_OK, status_OK)) 1099 break; 1100 1101 /* OK Still waiting */ 1102 if (time_after(jiffies, timeo)) { 1103 map_write(map, CMD(0x70), adr); 1104 chip->state = FL_STATUS; 1105 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 1106 DISABLE_VPP(map); 1107 mutex_unlock(&chip->mutex); 1108 return -EIO; 1109 } 1110 1111 /* Latency issues. Drop the lock, wait a while and retry */ 1112 mutex_unlock(&chip->mutex); 1113 cfi_udelay(1); 1114 mutex_lock(&chip->mutex); 1115 } 1116 1117 /* Done and happy. */ 1118 chip->state = FL_STATUS; 1119 DISABLE_VPP(map); 1120 wake_up(&chip->wq); 1121 mutex_unlock(&chip->mutex); 1122 return 0; 1123 } 1124 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1125 { 1126 struct map_info *map = mtd->priv; 1127 struct cfi_private *cfi = map->fldrv_priv; 1128 unsigned long adr; 1129 int chipnum, ret = 0; 1130 #ifdef DEBUG_LOCK_BITS 1131 int ofs_factor = cfi->interleave * cfi->device_type; 1132 #endif 1133 1134 if (ofs & (mtd->erasesize - 1)) 1135 return -EINVAL; 1136 1137 if (len & (mtd->erasesize -1)) 1138 return -EINVAL; 1139 1140 chipnum = ofs >> cfi->chipshift; 1141 adr = ofs - (chipnum << cfi->chipshift); 1142 1143 while(len) { 1144 1145 #ifdef DEBUG_LOCK_BITS 1146 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); 1147 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); 1148 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); 1149 #endif 1150 1151 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr); 1152 1153 #ifdef DEBUG_LOCK_BITS 1154 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); 1155 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); 1156 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); 1157 #endif 1158 1159 if (ret) 1160 return ret; 1161 1162 adr += mtd->erasesize; 1163 len -= mtd->erasesize; 1164 1165 if (adr >> cfi->chipshift) { 1166 adr = 0; 1167 chipnum++; 1168 1169 if (chipnum >= cfi->numchips) 1170 break; 1171 } 1172 } 1173 return 0; 1174 } 1175 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr) 1176 { 1177 struct cfi_private *cfi = map->fldrv_priv; 1178 map_word status, status_OK; 1179 unsigned long timeo = jiffies + HZ; 1180 DECLARE_WAITQUEUE(wait, current); 1181 1182 adr += chip->start; 1183 1184 /* Let's determine this according to the interleave only once */ 1185 status_OK = CMD(0x80); 1186 1187 timeo = jiffies + HZ; 1188 retry: 1189 mutex_lock(&chip->mutex); 1190 1191 /* Check that the chip's ready to talk to us. */ 1192 switch (chip->state) { 1193 case FL_CFI_QUERY: 1194 case FL_JEDEC_QUERY: 1195 case FL_READY: 1196 map_write(map, CMD(0x70), adr); 1197 chip->state = FL_STATUS; 1198 1199 case FL_STATUS: 1200 status = map_read(map, adr); 1201 if (map_word_andequal(map, status, status_OK, status_OK)) 1202 break; 1203 1204 /* Urgh. Chip not yet ready to talk to us. */ 1205 if (time_after(jiffies, timeo)) { 1206 mutex_unlock(&chip->mutex); 1207 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); 1208 return -EIO; 1209 } 1210 1211 /* Latency issues. Drop the lock, wait a while and retry */ 1212 mutex_unlock(&chip->mutex); 1213 cfi_udelay(1); 1214 goto retry; 1215 1216 default: 1217 /* Stick ourselves on a wait queue to be woken when 1218 someone changes the status */ 1219 set_current_state(TASK_UNINTERRUPTIBLE); 1220 add_wait_queue(&chip->wq, &wait); 1221 mutex_unlock(&chip->mutex); 1222 schedule(); 1223 remove_wait_queue(&chip->wq, &wait); 1224 timeo = jiffies + HZ; 1225 goto retry; 1226 } 1227 1228 ENABLE_VPP(map); 1229 map_write(map, CMD(0x60), adr); 1230 map_write(map, CMD(0xD0), adr); 1231 chip->state = FL_UNLOCKING; 1232 1233 mutex_unlock(&chip->mutex); 1234 msleep(1000); 1235 mutex_lock(&chip->mutex); 1236 1237 /* FIXME. Use a timer to check this, and return immediately. */ 1238 /* Once the state machine's known to be working I'll do that */ 1239 1240 timeo = jiffies + (HZ*2); 1241 for (;;) { 1242 1243 status = map_read(map, adr); 1244 if (map_word_andequal(map, status, status_OK, status_OK)) 1245 break; 1246 1247 /* OK Still waiting */ 1248 if (time_after(jiffies, timeo)) { 1249 map_write(map, CMD(0x70), adr); 1250 chip->state = FL_STATUS; 1251 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 1252 DISABLE_VPP(map); 1253 mutex_unlock(&chip->mutex); 1254 return -EIO; 1255 } 1256 1257 /* Latency issues. Drop the unlock, wait a while and retry */ 1258 mutex_unlock(&chip->mutex); 1259 cfi_udelay(1); 1260 mutex_lock(&chip->mutex); 1261 } 1262 1263 /* Done and happy. */ 1264 chip->state = FL_STATUS; 1265 DISABLE_VPP(map); 1266 wake_up(&chip->wq); 1267 mutex_unlock(&chip->mutex); 1268 return 0; 1269 } 1270 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1271 { 1272 struct map_info *map = mtd->priv; 1273 struct cfi_private *cfi = map->fldrv_priv; 1274 unsigned long adr; 1275 int chipnum, ret = 0; 1276 #ifdef DEBUG_LOCK_BITS 1277 int ofs_factor = cfi->interleave * cfi->device_type; 1278 #endif 1279 1280 chipnum = ofs >> cfi->chipshift; 1281 adr = ofs - (chipnum << cfi->chipshift); 1282 1283 #ifdef DEBUG_LOCK_BITS 1284 { 1285 unsigned long temp_adr = adr; 1286 unsigned long temp_len = len; 1287 1288 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); 1289 while (temp_len) { 1290 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor))); 1291 temp_adr += mtd->erasesize; 1292 temp_len -= mtd->erasesize; 1293 } 1294 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); 1295 } 1296 #endif 1297 1298 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr); 1299 1300 #ifdef DEBUG_LOCK_BITS 1301 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); 1302 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); 1303 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); 1304 #endif 1305 1306 return ret; 1307 } 1308 1309 static int cfi_staa_suspend(struct mtd_info *mtd) 1310 { 1311 struct map_info *map = mtd->priv; 1312 struct cfi_private *cfi = map->fldrv_priv; 1313 int i; 1314 struct flchip *chip; 1315 int ret = 0; 1316 1317 for (i=0; !ret && i<cfi->numchips; i++) { 1318 chip = &cfi->chips[i]; 1319 1320 mutex_lock(&chip->mutex); 1321 1322 switch(chip->state) { 1323 case FL_READY: 1324 case FL_STATUS: 1325 case FL_CFI_QUERY: 1326 case FL_JEDEC_QUERY: 1327 chip->oldstate = chip->state; 1328 chip->state = FL_PM_SUSPENDED; 1329 /* No need to wake_up() on this state change - 1330 * as the whole point is that nobody can do anything 1331 * with the chip now anyway. 1332 */ 1333 case FL_PM_SUSPENDED: 1334 break; 1335 1336 default: 1337 ret = -EAGAIN; 1338 break; 1339 } 1340 mutex_unlock(&chip->mutex); 1341 } 1342 1343 /* Unlock the chips again */ 1344 1345 if (ret) { 1346 for (i--; i >=0; i--) { 1347 chip = &cfi->chips[i]; 1348 1349 mutex_lock(&chip->mutex); 1350 1351 if (chip->state == FL_PM_SUSPENDED) { 1352 /* No need to force it into a known state here, 1353 because we're returning failure, and it didn't 1354 get power cycled */ 1355 chip->state = chip->oldstate; 1356 wake_up(&chip->wq); 1357 } 1358 mutex_unlock(&chip->mutex); 1359 } 1360 } 1361 1362 return ret; 1363 } 1364 1365 static void cfi_staa_resume(struct mtd_info *mtd) 1366 { 1367 struct map_info *map = mtd->priv; 1368 struct cfi_private *cfi = map->fldrv_priv; 1369 int i; 1370 struct flchip *chip; 1371 1372 for (i=0; i<cfi->numchips; i++) { 1373 1374 chip = &cfi->chips[i]; 1375 1376 mutex_lock(&chip->mutex); 1377 1378 /* Go to known state. Chip may have been power cycled */ 1379 if (chip->state == FL_PM_SUSPENDED) { 1380 map_write(map, CMD(0xFF), 0); 1381 chip->state = FL_READY; 1382 wake_up(&chip->wq); 1383 } 1384 1385 mutex_unlock(&chip->mutex); 1386 } 1387 } 1388 1389 static void cfi_staa_destroy(struct mtd_info *mtd) 1390 { 1391 struct map_info *map = mtd->priv; 1392 struct cfi_private *cfi = map->fldrv_priv; 1393 kfree(cfi->cmdset_priv); 1394 kfree(cfi); 1395 } 1396 1397 MODULE_LICENSE("GPL"); 1398