1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 17 * 18 * This code is GPL 19 * 20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $ 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/types.h> 26 #include <linux/kernel.h> 27 #include <linux/sched.h> 28 #include <linux/init.h> 29 #include <asm/io.h> 30 #include <asm/byteorder.h> 31 32 #include <linux/errno.h> 33 #include <linux/slab.h> 34 #include <linux/delay.h> 35 #include <linux/interrupt.h> 36 #include <linux/mtd/compatmac.h> 37 #include <linux/mtd/map.h> 38 #include <linux/mtd/mtd.h> 39 #include <linux/mtd/cfi.h> 40 #include <linux/mtd/xip.h> 41 42 #define AMD_BOOTLOC_BUG 43 #define FORCE_WORD_WRITE 0 44 45 #define MAX_WORD_RETRIES 3 46 47 #define MANUFACTURER_AMD 0x0001 48 #define MANUFACTURER_ATMEL 0x001F 49 #define MANUFACTURER_SST 0x00BF 50 #define SST49LF004B 0x0060 51 #define SST49LF040B 0x0050 52 #define SST49LF008A 0x005a 53 #define AT49BV6416 0x00d6 54 55 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 56 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 57 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 58 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 59 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 60 static void cfi_amdstd_sync (struct mtd_info *); 61 static int cfi_amdstd_suspend (struct mtd_info *); 62 static void cfi_amdstd_resume (struct mtd_info *); 63 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 64 65 static void cfi_amdstd_destroy(struct mtd_info *); 66 67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 69 70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 72 #include "fwh_lock.h" 73 74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 76 77 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 78 .probe = NULL, /* Not usable directly */ 79 .destroy = cfi_amdstd_destroy, 80 .name = "cfi_cmdset_0002", 81 .module = THIS_MODULE 82 }; 83 84 85 /* #define DEBUG_CFI_FEATURES */ 86 87 88 #ifdef DEBUG_CFI_FEATURES 89 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 90 { 91 const char* erase_suspend[3] = { 92 "Not supported", "Read only", "Read/write" 93 }; 94 const char* top_bottom[6] = { 95 "No WP", "8x8KiB sectors at top & bottom, no WP", 96 "Bottom boot", "Top boot", 97 "Uniform, Bottom WP", "Uniform, Top WP" 98 }; 99 100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 101 printk(" Address sensitive unlock: %s\n", 102 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 103 104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 106 else 107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 108 109 if (extp->BlkProt == 0) 110 printk(" Block protection: Not supported\n"); 111 else 112 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 113 114 115 printk(" Temporary block unprotect: %s\n", 116 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 119 printk(" Burst mode: %s\n", 120 extp->BurstMode ? "Supported" : "Not supported"); 121 if (extp->PageMode == 0) 122 printk(" Page mode: Not supported\n"); 123 else 124 printk(" Page mode: %d word page\n", extp->PageMode << 2); 125 126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 127 extp->VppMin >> 4, extp->VppMin & 0xf); 128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 129 extp->VppMax >> 4, extp->VppMax & 0xf); 130 131 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 133 else 134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 135 } 136 #endif 137 138 #ifdef AMD_BOOTLOC_BUG 139 /* Wheee. Bring me the head of someone at AMD. */ 140 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) 141 { 142 struct map_info *map = mtd->priv; 143 struct cfi_private *cfi = map->fldrv_priv; 144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 145 __u8 major = extp->MajorVersion; 146 __u8 minor = extp->MinorVersion; 147 148 if (((major << 8) | minor) < 0x3131) { 149 /* CFI version 1.0 => don't trust bootloc */ 150 if (cfi->id & 0x80) { 151 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 152 extp->TopBottom = 3; /* top boot */ 153 } else { 154 extp->TopBottom = 2; /* bottom boot */ 155 } 156 } 157 } 158 #endif 159 160 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 161 { 162 struct map_info *map = mtd->priv; 163 struct cfi_private *cfi = map->fldrv_priv; 164 if (cfi->cfiq->BufWriteTimeoutTyp) { 165 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); 166 mtd->write = cfi_amdstd_write_buffers; 167 } 168 } 169 170 /* Atmel chips don't use the same PRI format as AMD chips */ 171 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 172 { 173 struct map_info *map = mtd->priv; 174 struct cfi_private *cfi = map->fldrv_priv; 175 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 176 struct cfi_pri_atmel atmel_pri; 177 178 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 179 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 180 181 if (atmel_pri.Features & 0x02) 182 extp->EraseSuspend = 2; 183 184 if (atmel_pri.BottomBoot) 185 extp->TopBottom = 2; 186 else 187 extp->TopBottom = 3; 188 189 /* burst write mode not supported */ 190 cfi->cfiq->BufWriteTimeoutTyp = 0; 191 cfi->cfiq->BufWriteTimeoutMax = 0; 192 } 193 194 static void fixup_use_secsi(struct mtd_info *mtd, void *param) 195 { 196 /* Setup for chips with a secsi area */ 197 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 198 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 199 } 200 201 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) 202 { 203 struct map_info *map = mtd->priv; 204 struct cfi_private *cfi = map->fldrv_priv; 205 if ((cfi->cfiq->NumEraseRegions == 1) && 206 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 207 mtd->erase = cfi_amdstd_erase_chip; 208 } 209 210 } 211 212 /* 213 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 214 * locked by default. 215 */ 216 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) 217 { 218 mtd->lock = cfi_atmel_lock; 219 mtd->unlock = cfi_atmel_unlock; 220 mtd->flags |= MTD_POWERUP_LOCK; 221 } 222 223 static struct cfi_fixup cfi_fixup_table[] = { 224 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 225 #ifdef AMD_BOOTLOC_BUG 226 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 227 #endif 228 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 229 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 230 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, 231 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, 232 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, 233 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, 234 #if !FORCE_WORD_WRITE 235 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 236 #endif 237 { 0, 0, NULL, NULL } 238 }; 239 static struct cfi_fixup jedec_fixup_table[] = { 240 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 241 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 242 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 243 { 0, 0, NULL, NULL } 244 }; 245 246 static struct cfi_fixup fixup_table[] = { 247 /* The CFI vendor ids and the JEDEC vendor IDs appear 248 * to be common. It is like the devices id's are as 249 * well. This table is to pick all cases where 250 * we know that is the case. 251 */ 252 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, 253 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL }, 254 { 0, 0, NULL, NULL } 255 }; 256 257 258 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 259 { 260 struct cfi_private *cfi = map->fldrv_priv; 261 struct mtd_info *mtd; 262 int i; 263 264 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 265 if (!mtd) { 266 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 267 return NULL; 268 } 269 mtd->priv = map; 270 mtd->type = MTD_NORFLASH; 271 272 /* Fill in the default mtd operations */ 273 mtd->erase = cfi_amdstd_erase_varsize; 274 mtd->write = cfi_amdstd_write_words; 275 mtd->read = cfi_amdstd_read; 276 mtd->sync = cfi_amdstd_sync; 277 mtd->suspend = cfi_amdstd_suspend; 278 mtd->resume = cfi_amdstd_resume; 279 mtd->flags = MTD_CAP_NORFLASH; 280 mtd->name = map->name; 281 mtd->writesize = 1; 282 283 if (cfi->cfi_mode==CFI_MODE_CFI){ 284 unsigned char bootloc; 285 /* 286 * It's a real CFI chip, not one for which the probe 287 * routine faked a CFI structure. So we read the feature 288 * table from it. 289 */ 290 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 291 struct cfi_pri_amdstd *extp; 292 293 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 294 if (!extp) { 295 kfree(mtd); 296 return NULL; 297 } 298 299 if (extp->MajorVersion != '1' || 300 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 301 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 302 "version %c.%c.\n", extp->MajorVersion, 303 extp->MinorVersion); 304 kfree(extp); 305 kfree(mtd); 306 return NULL; 307 } 308 309 /* Install our own private info structure */ 310 cfi->cmdset_priv = extp; 311 312 /* Apply cfi device specific fixups */ 313 cfi_fixup(mtd, cfi_fixup_table); 314 315 #ifdef DEBUG_CFI_FEATURES 316 /* Tell the user about it in lots of lovely detail */ 317 cfi_tell_features(extp); 318 #endif 319 320 bootloc = extp->TopBottom; 321 if ((bootloc != 2) && (bootloc != 3)) { 322 printk(KERN_WARNING "%s: CFI does not contain boot " 323 "bank location. Assuming top.\n", map->name); 324 bootloc = 2; 325 } 326 327 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 328 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); 329 330 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 331 int j = (cfi->cfiq->NumEraseRegions-1)-i; 332 __u32 swap; 333 334 swap = cfi->cfiq->EraseRegionInfo[i]; 335 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 336 cfi->cfiq->EraseRegionInfo[j] = swap; 337 } 338 } 339 /* Set the default CFI lock/unlock addresses */ 340 cfi->addr_unlock1 = 0x555; 341 cfi->addr_unlock2 = 0x2aa; 342 /* Modify the unlock address if we are in compatibility mode */ 343 if ( /* x16 in x8 mode */ 344 ((cfi->device_type == CFI_DEVICETYPE_X8) && 345 (cfi->cfiq->InterfaceDesc == 346 CFI_INTERFACE_X8_BY_X16_ASYNC)) || 347 /* x32 in x16 mode */ 348 ((cfi->device_type == CFI_DEVICETYPE_X16) && 349 (cfi->cfiq->InterfaceDesc == 350 CFI_INTERFACE_X16_BY_X32_ASYNC))) 351 { 352 cfi->addr_unlock1 = 0xaaa; 353 cfi->addr_unlock2 = 0x555; 354 } 355 356 } /* CFI mode */ 357 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 358 /* Apply jedec specific fixups */ 359 cfi_fixup(mtd, jedec_fixup_table); 360 } 361 /* Apply generic fixups */ 362 cfi_fixup(mtd, fixup_table); 363 364 for (i=0; i< cfi->numchips; i++) { 365 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 366 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 367 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 368 cfi->chips[i].ref_point_counter = 0; 369 init_waitqueue_head(&(cfi->chips[i].wq)); 370 } 371 372 map->fldrv = &cfi_amdstd_chipdrv; 373 374 return cfi_amdstd_setup(mtd); 375 } 376 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 377 378 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 379 { 380 struct map_info *map = mtd->priv; 381 struct cfi_private *cfi = map->fldrv_priv; 382 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 383 unsigned long offset = 0; 384 int i,j; 385 386 printk(KERN_NOTICE "number of %s chips: %d\n", 387 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 388 /* Select the correct geometry setup */ 389 mtd->size = devsize * cfi->numchips; 390 391 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 392 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 393 * mtd->numeraseregions, GFP_KERNEL); 394 if (!mtd->eraseregions) { 395 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 396 goto setup_err; 397 } 398 399 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 400 unsigned long ernum, ersize; 401 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 402 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 403 404 if (mtd->erasesize < ersize) { 405 mtd->erasesize = ersize; 406 } 407 for (j=0; j<cfi->numchips; j++) { 408 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 409 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 410 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 411 } 412 offset += (ersize * ernum); 413 } 414 if (offset != devsize) { 415 /* Argh */ 416 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 417 goto setup_err; 418 } 419 #if 0 420 // debug 421 for (i=0; i<mtd->numeraseregions;i++){ 422 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n", 423 i,mtd->eraseregions[i].offset, 424 mtd->eraseregions[i].erasesize, 425 mtd->eraseregions[i].numblocks); 426 } 427 #endif 428 429 /* FIXME: erase-suspend-program is broken. See 430 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */ 431 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n"); 432 433 __module_get(THIS_MODULE); 434 return mtd; 435 436 setup_err: 437 if(mtd) { 438 kfree(mtd->eraseregions); 439 kfree(mtd); 440 } 441 kfree(cfi->cmdset_priv); 442 kfree(cfi->cfiq); 443 return NULL; 444 } 445 446 /* 447 * Return true if the chip is ready. 448 * 449 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 450 * non-suspended sector) and is indicated by no toggle bits toggling. 451 * 452 * Note that anything more complicated than checking if no bits are toggling 453 * (including checking DQ5 for an error status) is tricky to get working 454 * correctly and is therefore not done (particulary with interleaved chips 455 * as each chip must be checked independantly of the others). 456 */ 457 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 458 { 459 map_word d, t; 460 461 d = map_read(map, addr); 462 t = map_read(map, addr); 463 464 return map_word_equal(map, d, t); 465 } 466 467 /* 468 * Return true if the chip is ready and has the correct value. 469 * 470 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 471 * non-suspended sector) and it is indicated by no bits toggling. 472 * 473 * Error are indicated by toggling bits or bits held with the wrong value, 474 * or with bits toggling. 475 * 476 * Note that anything more complicated than checking if no bits are toggling 477 * (including checking DQ5 for an error status) is tricky to get working 478 * correctly and is therefore not done (particulary with interleaved chips 479 * as each chip must be checked independantly of the others). 480 * 481 */ 482 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 483 { 484 map_word oldd, curd; 485 486 oldd = map_read(map, addr); 487 curd = map_read(map, addr); 488 489 return map_word_equal(map, oldd, curd) && 490 map_word_equal(map, curd, expected); 491 } 492 493 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 494 { 495 DECLARE_WAITQUEUE(wait, current); 496 struct cfi_private *cfi = map->fldrv_priv; 497 unsigned long timeo; 498 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 499 500 resettime: 501 timeo = jiffies + HZ; 502 retry: 503 switch (chip->state) { 504 505 case FL_STATUS: 506 for (;;) { 507 if (chip_ready(map, adr)) 508 break; 509 510 if (time_after(jiffies, timeo)) { 511 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 512 spin_unlock(chip->mutex); 513 return -EIO; 514 } 515 spin_unlock(chip->mutex); 516 cfi_udelay(1); 517 spin_lock(chip->mutex); 518 /* Someone else might have been playing with it. */ 519 goto retry; 520 } 521 522 case FL_READY: 523 case FL_CFI_QUERY: 524 case FL_JEDEC_QUERY: 525 return 0; 526 527 case FL_ERASING: 528 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */ 529 goto sleep; 530 531 if (!( mode == FL_READY 532 || mode == FL_POINT 533 || !cfip 534 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)) 535 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1) 536 ))) 537 goto sleep; 538 539 /* We could check to see if we're trying to access the sector 540 * that is currently being erased. However, no user will try 541 * anything like that so we just wait for the timeout. */ 542 543 /* Erase suspend */ 544 /* It's harmless to issue the Erase-Suspend and Erase-Resume 545 * commands when the erase algorithm isn't in progress. */ 546 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 547 chip->oldstate = FL_ERASING; 548 chip->state = FL_ERASE_SUSPENDING; 549 chip->erase_suspended = 1; 550 for (;;) { 551 if (chip_ready(map, adr)) 552 break; 553 554 if (time_after(jiffies, timeo)) { 555 /* Should have suspended the erase by now. 556 * Send an Erase-Resume command as either 557 * there was an error (so leave the erase 558 * routine to recover from it) or we trying to 559 * use the erase-in-progress sector. */ 560 map_write(map, CMD(0x30), chip->in_progress_block_addr); 561 chip->state = FL_ERASING; 562 chip->oldstate = FL_READY; 563 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 564 return -EIO; 565 } 566 567 spin_unlock(chip->mutex); 568 cfi_udelay(1); 569 spin_lock(chip->mutex); 570 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 571 So we can just loop here. */ 572 } 573 chip->state = FL_READY; 574 return 0; 575 576 case FL_XIP_WHILE_ERASING: 577 if (mode != FL_READY && mode != FL_POINT && 578 (!cfip || !(cfip->EraseSuspend&2))) 579 goto sleep; 580 chip->oldstate = chip->state; 581 chip->state = FL_READY; 582 return 0; 583 584 case FL_POINT: 585 /* Only if there's no operation suspended... */ 586 if (mode == FL_READY && chip->oldstate == FL_READY) 587 return 0; 588 589 default: 590 sleep: 591 set_current_state(TASK_UNINTERRUPTIBLE); 592 add_wait_queue(&chip->wq, &wait); 593 spin_unlock(chip->mutex); 594 schedule(); 595 remove_wait_queue(&chip->wq, &wait); 596 spin_lock(chip->mutex); 597 goto resettime; 598 } 599 } 600 601 602 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 603 { 604 struct cfi_private *cfi = map->fldrv_priv; 605 606 switch(chip->oldstate) { 607 case FL_ERASING: 608 chip->state = chip->oldstate; 609 map_write(map, CMD(0x30), chip->in_progress_block_addr); 610 chip->oldstate = FL_READY; 611 chip->state = FL_ERASING; 612 break; 613 614 case FL_XIP_WHILE_ERASING: 615 chip->state = chip->oldstate; 616 chip->oldstate = FL_READY; 617 break; 618 619 case FL_READY: 620 case FL_STATUS: 621 /* We should really make set_vpp() count, rather than doing this */ 622 DISABLE_VPP(map); 623 break; 624 default: 625 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 626 } 627 wake_up(&chip->wq); 628 } 629 630 #ifdef CONFIG_MTD_XIP 631 632 /* 633 * No interrupt what so ever can be serviced while the flash isn't in array 634 * mode. This is ensured by the xip_disable() and xip_enable() functions 635 * enclosing any code path where the flash is known not to be in array mode. 636 * And within a XIP disabled code path, only functions marked with __xipram 637 * may be called and nothing else (it's a good thing to inspect generated 638 * assembly to make sure inline functions were actually inlined and that gcc 639 * didn't emit calls to its own support functions). Also configuring MTD CFI 640 * support to a single buswidth and a single interleave is also recommended. 641 */ 642 643 static void xip_disable(struct map_info *map, struct flchip *chip, 644 unsigned long adr) 645 { 646 /* TODO: chips with no XIP use should ignore and return */ 647 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 648 local_irq_disable(); 649 } 650 651 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 652 unsigned long adr) 653 { 654 struct cfi_private *cfi = map->fldrv_priv; 655 656 if (chip->state != FL_POINT && chip->state != FL_READY) { 657 map_write(map, CMD(0xf0), adr); 658 chip->state = FL_READY; 659 } 660 (void) map_read(map, adr); 661 xip_iprefetch(); 662 local_irq_enable(); 663 } 664 665 /* 666 * When a delay is required for the flash operation to complete, the 667 * xip_udelay() function is polling for both the given timeout and pending 668 * (but still masked) hardware interrupts. Whenever there is an interrupt 669 * pending then the flash erase operation is suspended, array mode restored 670 * and interrupts unmasked. Task scheduling might also happen at that 671 * point. The CPU eventually returns from the interrupt or the call to 672 * schedule() and the suspended flash operation is resumed for the remaining 673 * of the delay period. 674 * 675 * Warning: this function _will_ fool interrupt latency tracing tools. 676 */ 677 678 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 679 unsigned long adr, int usec) 680 { 681 struct cfi_private *cfi = map->fldrv_priv; 682 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 683 map_word status, OK = CMD(0x80); 684 unsigned long suspended, start = xip_currtime(); 685 flstate_t oldstate; 686 687 do { 688 cpu_relax(); 689 if (xip_irqpending() && extp && 690 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 691 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 692 /* 693 * Let's suspend the erase operation when supported. 694 * Note that we currently don't try to suspend 695 * interleaved chips if there is already another 696 * operation suspended (imagine what happens 697 * when one chip was already done with the current 698 * operation while another chip suspended it, then 699 * we resume the whole thing at once). Yes, it 700 * can happen! 701 */ 702 map_write(map, CMD(0xb0), adr); 703 usec -= xip_elapsed_since(start); 704 suspended = xip_currtime(); 705 do { 706 if (xip_elapsed_since(suspended) > 100000) { 707 /* 708 * The chip doesn't want to suspend 709 * after waiting for 100 msecs. 710 * This is a critical error but there 711 * is not much we can do here. 712 */ 713 return; 714 } 715 status = map_read(map, adr); 716 } while (!map_word_andequal(map, status, OK, OK)); 717 718 /* Suspend succeeded */ 719 oldstate = chip->state; 720 if (!map_word_bitsset(map, status, CMD(0x40))) 721 break; 722 chip->state = FL_XIP_WHILE_ERASING; 723 chip->erase_suspended = 1; 724 map_write(map, CMD(0xf0), adr); 725 (void) map_read(map, adr); 726 asm volatile (".rep 8; nop; .endr"); 727 local_irq_enable(); 728 spin_unlock(chip->mutex); 729 asm volatile (".rep 8; nop; .endr"); 730 cond_resched(); 731 732 /* 733 * We're back. However someone else might have 734 * decided to go write to the chip if we are in 735 * a suspended erase state. If so let's wait 736 * until it's done. 737 */ 738 spin_lock(chip->mutex); 739 while (chip->state != FL_XIP_WHILE_ERASING) { 740 DECLARE_WAITQUEUE(wait, current); 741 set_current_state(TASK_UNINTERRUPTIBLE); 742 add_wait_queue(&chip->wq, &wait); 743 spin_unlock(chip->mutex); 744 schedule(); 745 remove_wait_queue(&chip->wq, &wait); 746 spin_lock(chip->mutex); 747 } 748 /* Disallow XIP again */ 749 local_irq_disable(); 750 751 /* Resume the write or erase operation */ 752 map_write(map, CMD(0x30), adr); 753 chip->state = oldstate; 754 start = xip_currtime(); 755 } else if (usec >= 1000000/HZ) { 756 /* 757 * Try to save on CPU power when waiting delay 758 * is at least a system timer tick period. 759 * No need to be extremely accurate here. 760 */ 761 xip_cpu_idle(); 762 } 763 status = map_read(map, adr); 764 } while (!map_word_andequal(map, status, OK, OK) 765 && xip_elapsed_since(start) < usec); 766 } 767 768 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 769 770 /* 771 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 772 * the flash is actively programming or erasing since we have to poll for 773 * the operation to complete anyway. We can't do that in a generic way with 774 * a XIP setup so do it before the actual flash operation in this case 775 * and stub it out from INVALIDATE_CACHE_UDELAY. 776 */ 777 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 778 INVALIDATE_CACHED_RANGE(map, from, size) 779 780 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 781 UDELAY(map, chip, adr, usec) 782 783 /* 784 * Extra notes: 785 * 786 * Activating this XIP support changes the way the code works a bit. For 787 * example the code to suspend the current process when concurrent access 788 * happens is never executed because xip_udelay() will always return with the 789 * same chip state as it was entered with. This is why there is no care for 790 * the presence of add_wait_queue() or schedule() calls from within a couple 791 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 792 * The queueing and scheduling are always happening within xip_udelay(). 793 * 794 * Similarly, get_chip() and put_chip() just happen to always be executed 795 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 796 * is in array mode, therefore never executing many cases therein and not 797 * causing any problem with XIP. 798 */ 799 800 #else 801 802 #define xip_disable(map, chip, adr) 803 #define xip_enable(map, chip, adr) 804 #define XIP_INVAL_CACHED_RANGE(x...) 805 806 #define UDELAY(map, chip, adr, usec) \ 807 do { \ 808 spin_unlock(chip->mutex); \ 809 cfi_udelay(usec); \ 810 spin_lock(chip->mutex); \ 811 } while (0) 812 813 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 814 do { \ 815 spin_unlock(chip->mutex); \ 816 INVALIDATE_CACHED_RANGE(map, adr, len); \ 817 cfi_udelay(usec); \ 818 spin_lock(chip->mutex); \ 819 } while (0) 820 821 #endif 822 823 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 824 { 825 unsigned long cmd_addr; 826 struct cfi_private *cfi = map->fldrv_priv; 827 int ret; 828 829 adr += chip->start; 830 831 /* Ensure cmd read/writes are aligned. */ 832 cmd_addr = adr & ~(map_bankwidth(map)-1); 833 834 spin_lock(chip->mutex); 835 ret = get_chip(map, chip, cmd_addr, FL_READY); 836 if (ret) { 837 spin_unlock(chip->mutex); 838 return ret; 839 } 840 841 if (chip->state != FL_POINT && chip->state != FL_READY) { 842 map_write(map, CMD(0xf0), cmd_addr); 843 chip->state = FL_READY; 844 } 845 846 map_copy_from(map, buf, adr, len); 847 848 put_chip(map, chip, cmd_addr); 849 850 spin_unlock(chip->mutex); 851 return 0; 852 } 853 854 855 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 856 { 857 struct map_info *map = mtd->priv; 858 struct cfi_private *cfi = map->fldrv_priv; 859 unsigned long ofs; 860 int chipnum; 861 int ret = 0; 862 863 /* ofs: offset within the first chip that the first read should start */ 864 865 chipnum = (from >> cfi->chipshift); 866 ofs = from - (chipnum << cfi->chipshift); 867 868 869 *retlen = 0; 870 871 while (len) { 872 unsigned long thislen; 873 874 if (chipnum >= cfi->numchips) 875 break; 876 877 if ((len + ofs -1) >> cfi->chipshift) 878 thislen = (1<<cfi->chipshift) - ofs; 879 else 880 thislen = len; 881 882 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 883 if (ret) 884 break; 885 886 *retlen += thislen; 887 len -= thislen; 888 buf += thislen; 889 890 ofs = 0; 891 chipnum++; 892 } 893 return ret; 894 } 895 896 897 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 898 { 899 DECLARE_WAITQUEUE(wait, current); 900 unsigned long timeo = jiffies + HZ; 901 struct cfi_private *cfi = map->fldrv_priv; 902 903 retry: 904 spin_lock(chip->mutex); 905 906 if (chip->state != FL_READY){ 907 #if 0 908 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); 909 #endif 910 set_current_state(TASK_UNINTERRUPTIBLE); 911 add_wait_queue(&chip->wq, &wait); 912 913 spin_unlock(chip->mutex); 914 915 schedule(); 916 remove_wait_queue(&chip->wq, &wait); 917 #if 0 918 if(signal_pending(current)) 919 return -EINTR; 920 #endif 921 timeo = jiffies + HZ; 922 923 goto retry; 924 } 925 926 adr += chip->start; 927 928 chip->state = FL_READY; 929 930 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 931 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 932 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 933 934 map_copy_from(map, buf, adr, len); 935 936 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 937 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 938 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 939 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 940 941 wake_up(&chip->wq); 942 spin_unlock(chip->mutex); 943 944 return 0; 945 } 946 947 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 948 { 949 struct map_info *map = mtd->priv; 950 struct cfi_private *cfi = map->fldrv_priv; 951 unsigned long ofs; 952 int chipnum; 953 int ret = 0; 954 955 956 /* ofs: offset within the first chip that the first read should start */ 957 958 /* 8 secsi bytes per chip */ 959 chipnum=from>>3; 960 ofs=from & 7; 961 962 963 *retlen = 0; 964 965 while (len) { 966 unsigned long thislen; 967 968 if (chipnum >= cfi->numchips) 969 break; 970 971 if ((len + ofs -1) >> 3) 972 thislen = (1<<3) - ofs; 973 else 974 thislen = len; 975 976 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 977 if (ret) 978 break; 979 980 *retlen += thislen; 981 len -= thislen; 982 buf += thislen; 983 984 ofs = 0; 985 chipnum++; 986 } 987 return ret; 988 } 989 990 991 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 992 { 993 struct cfi_private *cfi = map->fldrv_priv; 994 unsigned long timeo = jiffies + HZ; 995 /* 996 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 997 * have a max write time of a few hundreds usec). However, we should 998 * use the maximum timeout value given by the chip at probe time 999 * instead. Unfortunately, struct flchip does have a field for 1000 * maximum timeout, only for typical which can be far too short 1001 * depending of the conditions. The ' + 1' is to avoid having a 1002 * timeout of 0 jiffies if HZ is smaller than 1000. 1003 */ 1004 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1005 int ret = 0; 1006 map_word oldd; 1007 int retry_cnt = 0; 1008 1009 adr += chip->start; 1010 1011 spin_lock(chip->mutex); 1012 ret = get_chip(map, chip, adr, FL_WRITING); 1013 if (ret) { 1014 spin_unlock(chip->mutex); 1015 return ret; 1016 } 1017 1018 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1019 __func__, adr, datum.x[0] ); 1020 1021 /* 1022 * Check for a NOP for the case when the datum to write is already 1023 * present - it saves time and works around buggy chips that corrupt 1024 * data at other locations when 0xff is written to a location that 1025 * already contains 0xff. 1026 */ 1027 oldd = map_read(map, adr); 1028 if (map_word_equal(map, oldd, datum)) { 1029 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", 1030 __func__); 1031 goto op_done; 1032 } 1033 1034 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1035 ENABLE_VPP(map); 1036 xip_disable(map, chip, adr); 1037 retry: 1038 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1039 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1040 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1041 map_write(map, datum, adr); 1042 chip->state = FL_WRITING; 1043 1044 INVALIDATE_CACHE_UDELAY(map, chip, 1045 adr, map_bankwidth(map), 1046 chip->word_write_time); 1047 1048 /* See comment above for timeout value. */ 1049 timeo = jiffies + uWriteTimeout; 1050 for (;;) { 1051 if (chip->state != FL_WRITING) { 1052 /* Someone's suspended the write. Sleep */ 1053 DECLARE_WAITQUEUE(wait, current); 1054 1055 set_current_state(TASK_UNINTERRUPTIBLE); 1056 add_wait_queue(&chip->wq, &wait); 1057 spin_unlock(chip->mutex); 1058 schedule(); 1059 remove_wait_queue(&chip->wq, &wait); 1060 timeo = jiffies + (HZ / 2); /* FIXME */ 1061 spin_lock(chip->mutex); 1062 continue; 1063 } 1064 1065 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1066 xip_enable(map, chip, adr); 1067 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1068 xip_disable(map, chip, adr); 1069 break; 1070 } 1071 1072 if (chip_ready(map, adr)) 1073 break; 1074 1075 /* Latency issues. Drop the lock, wait a while and retry */ 1076 UDELAY(map, chip, adr, 1); 1077 } 1078 /* Did we succeed? */ 1079 if (!chip_good(map, adr, datum)) { 1080 /* reset on all failures. */ 1081 map_write( map, CMD(0xF0), chip->start ); 1082 /* FIXME - should have reset delay before continuing */ 1083 1084 if (++retry_cnt <= MAX_WORD_RETRIES) 1085 goto retry; 1086 1087 ret = -EIO; 1088 } 1089 xip_enable(map, chip, adr); 1090 op_done: 1091 chip->state = FL_READY; 1092 put_chip(map, chip, adr); 1093 spin_unlock(chip->mutex); 1094 1095 return ret; 1096 } 1097 1098 1099 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1100 size_t *retlen, const u_char *buf) 1101 { 1102 struct map_info *map = mtd->priv; 1103 struct cfi_private *cfi = map->fldrv_priv; 1104 int ret = 0; 1105 int chipnum; 1106 unsigned long ofs, chipstart; 1107 DECLARE_WAITQUEUE(wait, current); 1108 1109 *retlen = 0; 1110 if (!len) 1111 return 0; 1112 1113 chipnum = to >> cfi->chipshift; 1114 ofs = to - (chipnum << cfi->chipshift); 1115 chipstart = cfi->chips[chipnum].start; 1116 1117 /* If it's not bus-aligned, do the first byte write */ 1118 if (ofs & (map_bankwidth(map)-1)) { 1119 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1120 int i = ofs - bus_ofs; 1121 int n = 0; 1122 map_word tmp_buf; 1123 1124 retry: 1125 spin_lock(cfi->chips[chipnum].mutex); 1126 1127 if (cfi->chips[chipnum].state != FL_READY) { 1128 #if 0 1129 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); 1130 #endif 1131 set_current_state(TASK_UNINTERRUPTIBLE); 1132 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1133 1134 spin_unlock(cfi->chips[chipnum].mutex); 1135 1136 schedule(); 1137 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1138 #if 0 1139 if(signal_pending(current)) 1140 return -EINTR; 1141 #endif 1142 goto retry; 1143 } 1144 1145 /* Load 'tmp_buf' with old contents of flash */ 1146 tmp_buf = map_read(map, bus_ofs+chipstart); 1147 1148 spin_unlock(cfi->chips[chipnum].mutex); 1149 1150 /* Number of bytes to copy from buffer */ 1151 n = min_t(int, len, map_bankwidth(map)-i); 1152 1153 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1154 1155 ret = do_write_oneword(map, &cfi->chips[chipnum], 1156 bus_ofs, tmp_buf); 1157 if (ret) 1158 return ret; 1159 1160 ofs += n; 1161 buf += n; 1162 (*retlen) += n; 1163 len -= n; 1164 1165 if (ofs >> cfi->chipshift) { 1166 chipnum ++; 1167 ofs = 0; 1168 if (chipnum == cfi->numchips) 1169 return 0; 1170 } 1171 } 1172 1173 /* We are now aligned, write as much as possible */ 1174 while(len >= map_bankwidth(map)) { 1175 map_word datum; 1176 1177 datum = map_word_load(map, buf); 1178 1179 ret = do_write_oneword(map, &cfi->chips[chipnum], 1180 ofs, datum); 1181 if (ret) 1182 return ret; 1183 1184 ofs += map_bankwidth(map); 1185 buf += map_bankwidth(map); 1186 (*retlen) += map_bankwidth(map); 1187 len -= map_bankwidth(map); 1188 1189 if (ofs >> cfi->chipshift) { 1190 chipnum ++; 1191 ofs = 0; 1192 if (chipnum == cfi->numchips) 1193 return 0; 1194 chipstart = cfi->chips[chipnum].start; 1195 } 1196 } 1197 1198 /* Write the trailing bytes if any */ 1199 if (len & (map_bankwidth(map)-1)) { 1200 map_word tmp_buf; 1201 1202 retry1: 1203 spin_lock(cfi->chips[chipnum].mutex); 1204 1205 if (cfi->chips[chipnum].state != FL_READY) { 1206 #if 0 1207 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); 1208 #endif 1209 set_current_state(TASK_UNINTERRUPTIBLE); 1210 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1211 1212 spin_unlock(cfi->chips[chipnum].mutex); 1213 1214 schedule(); 1215 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1216 #if 0 1217 if(signal_pending(current)) 1218 return -EINTR; 1219 #endif 1220 goto retry1; 1221 } 1222 1223 tmp_buf = map_read(map, ofs + chipstart); 1224 1225 spin_unlock(cfi->chips[chipnum].mutex); 1226 1227 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1228 1229 ret = do_write_oneword(map, &cfi->chips[chipnum], 1230 ofs, tmp_buf); 1231 if (ret) 1232 return ret; 1233 1234 (*retlen) += len; 1235 } 1236 1237 return 0; 1238 } 1239 1240 1241 /* 1242 * FIXME: interleaved mode not tested, and probably not supported! 1243 */ 1244 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1245 unsigned long adr, const u_char *buf, 1246 int len) 1247 { 1248 struct cfi_private *cfi = map->fldrv_priv; 1249 unsigned long timeo = jiffies + HZ; 1250 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1251 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1252 int ret = -EIO; 1253 unsigned long cmd_adr; 1254 int z, words; 1255 map_word datum; 1256 1257 adr += chip->start; 1258 cmd_adr = adr; 1259 1260 spin_lock(chip->mutex); 1261 ret = get_chip(map, chip, adr, FL_WRITING); 1262 if (ret) { 1263 spin_unlock(chip->mutex); 1264 return ret; 1265 } 1266 1267 datum = map_word_load(map, buf); 1268 1269 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1270 __func__, adr, datum.x[0] ); 1271 1272 XIP_INVAL_CACHED_RANGE(map, adr, len); 1273 ENABLE_VPP(map); 1274 xip_disable(map, chip, cmd_adr); 1275 1276 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1277 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1278 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1279 1280 /* Write Buffer Load */ 1281 map_write(map, CMD(0x25), cmd_adr); 1282 1283 chip->state = FL_WRITING_TO_BUFFER; 1284 1285 /* Write length of data to come */ 1286 words = len / map_bankwidth(map); 1287 map_write(map, CMD(words - 1), cmd_adr); 1288 /* Write data */ 1289 z = 0; 1290 while(z < words * map_bankwidth(map)) { 1291 datum = map_word_load(map, buf); 1292 map_write(map, datum, adr + z); 1293 1294 z += map_bankwidth(map); 1295 buf += map_bankwidth(map); 1296 } 1297 z -= map_bankwidth(map); 1298 1299 adr += z; 1300 1301 /* Write Buffer Program Confirm: GO GO GO */ 1302 map_write(map, CMD(0x29), cmd_adr); 1303 chip->state = FL_WRITING; 1304 1305 INVALIDATE_CACHE_UDELAY(map, chip, 1306 adr, map_bankwidth(map), 1307 chip->word_write_time); 1308 1309 timeo = jiffies + uWriteTimeout; 1310 1311 for (;;) { 1312 if (chip->state != FL_WRITING) { 1313 /* Someone's suspended the write. Sleep */ 1314 DECLARE_WAITQUEUE(wait, current); 1315 1316 set_current_state(TASK_UNINTERRUPTIBLE); 1317 add_wait_queue(&chip->wq, &wait); 1318 spin_unlock(chip->mutex); 1319 schedule(); 1320 remove_wait_queue(&chip->wq, &wait); 1321 timeo = jiffies + (HZ / 2); /* FIXME */ 1322 spin_lock(chip->mutex); 1323 continue; 1324 } 1325 1326 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1327 break; 1328 1329 if (chip_ready(map, adr)) { 1330 xip_enable(map, chip, adr); 1331 goto op_done; 1332 } 1333 1334 /* Latency issues. Drop the lock, wait a while and retry */ 1335 UDELAY(map, chip, adr, 1); 1336 } 1337 1338 /* reset on all failures. */ 1339 map_write( map, CMD(0xF0), chip->start ); 1340 xip_enable(map, chip, adr); 1341 /* FIXME - should have reset delay before continuing */ 1342 1343 printk(KERN_WARNING "MTD %s(): software timeout\n", 1344 __func__ ); 1345 1346 ret = -EIO; 1347 op_done: 1348 chip->state = FL_READY; 1349 put_chip(map, chip, adr); 1350 spin_unlock(chip->mutex); 1351 1352 return ret; 1353 } 1354 1355 1356 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1357 size_t *retlen, const u_char *buf) 1358 { 1359 struct map_info *map = mtd->priv; 1360 struct cfi_private *cfi = map->fldrv_priv; 1361 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1362 int ret = 0; 1363 int chipnum; 1364 unsigned long ofs; 1365 1366 *retlen = 0; 1367 if (!len) 1368 return 0; 1369 1370 chipnum = to >> cfi->chipshift; 1371 ofs = to - (chipnum << cfi->chipshift); 1372 1373 /* If it's not bus-aligned, do the first word write */ 1374 if (ofs & (map_bankwidth(map)-1)) { 1375 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1376 if (local_len > len) 1377 local_len = len; 1378 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1379 local_len, retlen, buf); 1380 if (ret) 1381 return ret; 1382 ofs += local_len; 1383 buf += local_len; 1384 len -= local_len; 1385 1386 if (ofs >> cfi->chipshift) { 1387 chipnum ++; 1388 ofs = 0; 1389 if (chipnum == cfi->numchips) 1390 return 0; 1391 } 1392 } 1393 1394 /* Write buffer is worth it only if more than one word to write... */ 1395 while (len >= map_bankwidth(map) * 2) { 1396 /* We must not cross write block boundaries */ 1397 int size = wbufsize - (ofs & (wbufsize-1)); 1398 1399 if (size > len) 1400 size = len; 1401 if (size % map_bankwidth(map)) 1402 size -= size % map_bankwidth(map); 1403 1404 ret = do_write_buffer(map, &cfi->chips[chipnum], 1405 ofs, buf, size); 1406 if (ret) 1407 return ret; 1408 1409 ofs += size; 1410 buf += size; 1411 (*retlen) += size; 1412 len -= size; 1413 1414 if (ofs >> cfi->chipshift) { 1415 chipnum ++; 1416 ofs = 0; 1417 if (chipnum == cfi->numchips) 1418 return 0; 1419 } 1420 } 1421 1422 if (len) { 1423 size_t retlen_dregs = 0; 1424 1425 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1426 len, &retlen_dregs, buf); 1427 1428 *retlen += retlen_dregs; 1429 return ret; 1430 } 1431 1432 return 0; 1433 } 1434 1435 1436 /* 1437 * Handle devices with one erase region, that only implement 1438 * the chip erase command. 1439 */ 1440 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1441 { 1442 struct cfi_private *cfi = map->fldrv_priv; 1443 unsigned long timeo = jiffies + HZ; 1444 unsigned long int adr; 1445 DECLARE_WAITQUEUE(wait, current); 1446 int ret = 0; 1447 1448 adr = cfi->addr_unlock1; 1449 1450 spin_lock(chip->mutex); 1451 ret = get_chip(map, chip, adr, FL_WRITING); 1452 if (ret) { 1453 spin_unlock(chip->mutex); 1454 return ret; 1455 } 1456 1457 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1458 __func__, chip->start ); 1459 1460 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1461 ENABLE_VPP(map); 1462 xip_disable(map, chip, adr); 1463 1464 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1465 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1466 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1467 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1468 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1469 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1470 1471 chip->state = FL_ERASING; 1472 chip->erase_suspended = 0; 1473 chip->in_progress_block_addr = adr; 1474 1475 INVALIDATE_CACHE_UDELAY(map, chip, 1476 adr, map->size, 1477 chip->erase_time*500); 1478 1479 timeo = jiffies + (HZ*20); 1480 1481 for (;;) { 1482 if (chip->state != FL_ERASING) { 1483 /* Someone's suspended the erase. Sleep */ 1484 set_current_state(TASK_UNINTERRUPTIBLE); 1485 add_wait_queue(&chip->wq, &wait); 1486 spin_unlock(chip->mutex); 1487 schedule(); 1488 remove_wait_queue(&chip->wq, &wait); 1489 spin_lock(chip->mutex); 1490 continue; 1491 } 1492 if (chip->erase_suspended) { 1493 /* This erase was suspended and resumed. 1494 Adjust the timeout */ 1495 timeo = jiffies + (HZ*20); /* FIXME */ 1496 chip->erase_suspended = 0; 1497 } 1498 1499 if (chip_ready(map, adr)) 1500 break; 1501 1502 if (time_after(jiffies, timeo)) { 1503 printk(KERN_WARNING "MTD %s(): software timeout\n", 1504 __func__ ); 1505 break; 1506 } 1507 1508 /* Latency issues. Drop the lock, wait a while and retry */ 1509 UDELAY(map, chip, adr, 1000000/HZ); 1510 } 1511 /* Did we succeed? */ 1512 if (!chip_good(map, adr, map_word_ff(map))) { 1513 /* reset on all failures. */ 1514 map_write( map, CMD(0xF0), chip->start ); 1515 /* FIXME - should have reset delay before continuing */ 1516 1517 ret = -EIO; 1518 } 1519 1520 chip->state = FL_READY; 1521 xip_enable(map, chip, adr); 1522 put_chip(map, chip, adr); 1523 spin_unlock(chip->mutex); 1524 1525 return ret; 1526 } 1527 1528 1529 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1530 { 1531 struct cfi_private *cfi = map->fldrv_priv; 1532 unsigned long timeo = jiffies + HZ; 1533 DECLARE_WAITQUEUE(wait, current); 1534 int ret = 0; 1535 1536 adr += chip->start; 1537 1538 spin_lock(chip->mutex); 1539 ret = get_chip(map, chip, adr, FL_ERASING); 1540 if (ret) { 1541 spin_unlock(chip->mutex); 1542 return ret; 1543 } 1544 1545 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1546 __func__, adr ); 1547 1548 XIP_INVAL_CACHED_RANGE(map, adr, len); 1549 ENABLE_VPP(map); 1550 xip_disable(map, chip, adr); 1551 1552 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1553 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1554 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1555 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1556 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1557 map_write(map, CMD(0x30), adr); 1558 1559 chip->state = FL_ERASING; 1560 chip->erase_suspended = 0; 1561 chip->in_progress_block_addr = adr; 1562 1563 INVALIDATE_CACHE_UDELAY(map, chip, 1564 adr, len, 1565 chip->erase_time*500); 1566 1567 timeo = jiffies + (HZ*20); 1568 1569 for (;;) { 1570 if (chip->state != FL_ERASING) { 1571 /* Someone's suspended the erase. Sleep */ 1572 set_current_state(TASK_UNINTERRUPTIBLE); 1573 add_wait_queue(&chip->wq, &wait); 1574 spin_unlock(chip->mutex); 1575 schedule(); 1576 remove_wait_queue(&chip->wq, &wait); 1577 spin_lock(chip->mutex); 1578 continue; 1579 } 1580 if (chip->erase_suspended) { 1581 /* This erase was suspended and resumed. 1582 Adjust the timeout */ 1583 timeo = jiffies + (HZ*20); /* FIXME */ 1584 chip->erase_suspended = 0; 1585 } 1586 1587 if (chip_ready(map, adr)) { 1588 xip_enable(map, chip, adr); 1589 break; 1590 } 1591 1592 if (time_after(jiffies, timeo)) { 1593 xip_enable(map, chip, adr); 1594 printk(KERN_WARNING "MTD %s(): software timeout\n", 1595 __func__ ); 1596 break; 1597 } 1598 1599 /* Latency issues. Drop the lock, wait a while and retry */ 1600 UDELAY(map, chip, adr, 1000000/HZ); 1601 } 1602 /* Did we succeed? */ 1603 if (!chip_good(map, adr, map_word_ff(map))) { 1604 /* reset on all failures. */ 1605 map_write( map, CMD(0xF0), chip->start ); 1606 /* FIXME - should have reset delay before continuing */ 1607 1608 ret = -EIO; 1609 } 1610 1611 chip->state = FL_READY; 1612 put_chip(map, chip, adr); 1613 spin_unlock(chip->mutex); 1614 return ret; 1615 } 1616 1617 1618 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1619 { 1620 unsigned long ofs, len; 1621 int ret; 1622 1623 ofs = instr->addr; 1624 len = instr->len; 1625 1626 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 1627 if (ret) 1628 return ret; 1629 1630 instr->state = MTD_ERASE_DONE; 1631 mtd_erase_callback(instr); 1632 1633 return 0; 1634 } 1635 1636 1637 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 1638 { 1639 struct map_info *map = mtd->priv; 1640 struct cfi_private *cfi = map->fldrv_priv; 1641 int ret = 0; 1642 1643 if (instr->addr != 0) 1644 return -EINVAL; 1645 1646 if (instr->len != mtd->size) 1647 return -EINVAL; 1648 1649 ret = do_erase_chip(map, &cfi->chips[0]); 1650 if (ret) 1651 return ret; 1652 1653 instr->state = MTD_ERASE_DONE; 1654 mtd_erase_callback(instr); 1655 1656 return 0; 1657 } 1658 1659 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 1660 unsigned long adr, int len, void *thunk) 1661 { 1662 struct cfi_private *cfi = map->fldrv_priv; 1663 int ret; 1664 1665 spin_lock(chip->mutex); 1666 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1667 if (ret) 1668 goto out_unlock; 1669 chip->state = FL_LOCKING; 1670 1671 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1672 __func__, adr, len); 1673 1674 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1675 cfi->device_type, NULL); 1676 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1677 cfi->device_type, NULL); 1678 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 1679 cfi->device_type, NULL); 1680 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1681 cfi->device_type, NULL); 1682 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1683 cfi->device_type, NULL); 1684 map_write(map, CMD(0x40), chip->start + adr); 1685 1686 chip->state = FL_READY; 1687 put_chip(map, chip, adr + chip->start); 1688 ret = 0; 1689 1690 out_unlock: 1691 spin_unlock(chip->mutex); 1692 return ret; 1693 } 1694 1695 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 1696 unsigned long adr, int len, void *thunk) 1697 { 1698 struct cfi_private *cfi = map->fldrv_priv; 1699 int ret; 1700 1701 spin_lock(chip->mutex); 1702 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1703 if (ret) 1704 goto out_unlock; 1705 chip->state = FL_UNLOCKING; 1706 1707 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1708 __func__, adr, len); 1709 1710 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1711 cfi->device_type, NULL); 1712 map_write(map, CMD(0x70), adr); 1713 1714 chip->state = FL_READY; 1715 put_chip(map, chip, adr + chip->start); 1716 ret = 0; 1717 1718 out_unlock: 1719 spin_unlock(chip->mutex); 1720 return ret; 1721 } 1722 1723 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1724 { 1725 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1726 } 1727 1728 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 1729 { 1730 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1731 } 1732 1733 1734 static void cfi_amdstd_sync (struct mtd_info *mtd) 1735 { 1736 struct map_info *map = mtd->priv; 1737 struct cfi_private *cfi = map->fldrv_priv; 1738 int i; 1739 struct flchip *chip; 1740 int ret = 0; 1741 DECLARE_WAITQUEUE(wait, current); 1742 1743 for (i=0; !ret && i<cfi->numchips; i++) { 1744 chip = &cfi->chips[i]; 1745 1746 retry: 1747 spin_lock(chip->mutex); 1748 1749 switch(chip->state) { 1750 case FL_READY: 1751 case FL_STATUS: 1752 case FL_CFI_QUERY: 1753 case FL_JEDEC_QUERY: 1754 chip->oldstate = chip->state; 1755 chip->state = FL_SYNCING; 1756 /* No need to wake_up() on this state change - 1757 * as the whole point is that nobody can do anything 1758 * with the chip now anyway. 1759 */ 1760 case FL_SYNCING: 1761 spin_unlock(chip->mutex); 1762 break; 1763 1764 default: 1765 /* Not an idle state */ 1766 set_current_state(TASK_UNINTERRUPTIBLE); 1767 add_wait_queue(&chip->wq, &wait); 1768 1769 spin_unlock(chip->mutex); 1770 1771 schedule(); 1772 1773 remove_wait_queue(&chip->wq, &wait); 1774 1775 goto retry; 1776 } 1777 } 1778 1779 /* Unlock the chips again */ 1780 1781 for (i--; i >=0; i--) { 1782 chip = &cfi->chips[i]; 1783 1784 spin_lock(chip->mutex); 1785 1786 if (chip->state == FL_SYNCING) { 1787 chip->state = chip->oldstate; 1788 wake_up(&chip->wq); 1789 } 1790 spin_unlock(chip->mutex); 1791 } 1792 } 1793 1794 1795 static int cfi_amdstd_suspend(struct mtd_info *mtd) 1796 { 1797 struct map_info *map = mtd->priv; 1798 struct cfi_private *cfi = map->fldrv_priv; 1799 int i; 1800 struct flchip *chip; 1801 int ret = 0; 1802 1803 for (i=0; !ret && i<cfi->numchips; i++) { 1804 chip = &cfi->chips[i]; 1805 1806 spin_lock(chip->mutex); 1807 1808 switch(chip->state) { 1809 case FL_READY: 1810 case FL_STATUS: 1811 case FL_CFI_QUERY: 1812 case FL_JEDEC_QUERY: 1813 chip->oldstate = chip->state; 1814 chip->state = FL_PM_SUSPENDED; 1815 /* No need to wake_up() on this state change - 1816 * as the whole point is that nobody can do anything 1817 * with the chip now anyway. 1818 */ 1819 case FL_PM_SUSPENDED: 1820 break; 1821 1822 default: 1823 ret = -EAGAIN; 1824 break; 1825 } 1826 spin_unlock(chip->mutex); 1827 } 1828 1829 /* Unlock the chips again */ 1830 1831 if (ret) { 1832 for (i--; i >=0; i--) { 1833 chip = &cfi->chips[i]; 1834 1835 spin_lock(chip->mutex); 1836 1837 if (chip->state == FL_PM_SUSPENDED) { 1838 chip->state = chip->oldstate; 1839 wake_up(&chip->wq); 1840 } 1841 spin_unlock(chip->mutex); 1842 } 1843 } 1844 1845 return ret; 1846 } 1847 1848 1849 static void cfi_amdstd_resume(struct mtd_info *mtd) 1850 { 1851 struct map_info *map = mtd->priv; 1852 struct cfi_private *cfi = map->fldrv_priv; 1853 int i; 1854 struct flchip *chip; 1855 1856 for (i=0; i<cfi->numchips; i++) { 1857 1858 chip = &cfi->chips[i]; 1859 1860 spin_lock(chip->mutex); 1861 1862 if (chip->state == FL_PM_SUSPENDED) { 1863 chip->state = FL_READY; 1864 map_write(map, CMD(0xF0), chip->start); 1865 wake_up(&chip->wq); 1866 } 1867 else 1868 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1869 1870 spin_unlock(chip->mutex); 1871 } 1872 } 1873 1874 static void cfi_amdstd_destroy(struct mtd_info *mtd) 1875 { 1876 struct map_info *map = mtd->priv; 1877 struct cfi_private *cfi = map->fldrv_priv; 1878 1879 kfree(cfi->cmdset_priv); 1880 kfree(cfi->cfiq); 1881 kfree(cfi); 1882 kfree(mtd->eraseregions); 1883 } 1884 1885 MODULE_LICENSE("GPL"); 1886 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 1887 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 1888