1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/init.h> 28 #include <asm/io.h> 29 #include <asm/byteorder.h> 30 31 #include <linux/errno.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/interrupt.h> 35 #include <linux/reboot.h> 36 #include <linux/mtd/map.h> 37 #include <linux/mtd/mtd.h> 38 #include <linux/mtd/cfi.h> 39 #include <linux/mtd/xip.h> 40 41 #define AMD_BOOTLOC_BUG 42 #define FORCE_WORD_WRITE 0 43 44 #define MAX_WORD_RETRIES 3 45 46 #define SST49LF004B 0x0060 47 #define SST49LF040B 0x0050 48 #define SST49LF008A 0x005a 49 #define AT49BV6416 0x00d6 50 51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 56 static void cfi_amdstd_sync (struct mtd_info *); 57 static int cfi_amdstd_suspend (struct mtd_info *); 58 static void cfi_amdstd_resume (struct mtd_info *); 59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61 62 static void cfi_amdstd_destroy(struct mtd_info *); 63 64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 66 67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 69 #include "fwh_lock.h" 70 71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 73 74 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 75 .probe = NULL, /* Not usable directly */ 76 .destroy = cfi_amdstd_destroy, 77 .name = "cfi_cmdset_0002", 78 .module = THIS_MODULE 79 }; 80 81 82 /* #define DEBUG_CFI_FEATURES */ 83 84 85 #ifdef DEBUG_CFI_FEATURES 86 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 87 { 88 const char* erase_suspend[3] = { 89 "Not supported", "Read only", "Read/write" 90 }; 91 const char* top_bottom[6] = { 92 "No WP", "8x8KiB sectors at top & bottom, no WP", 93 "Bottom boot", "Top boot", 94 "Uniform, Bottom WP", "Uniform, Top WP" 95 }; 96 97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 98 printk(" Address sensitive unlock: %s\n", 99 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 100 101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 103 else 104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 105 106 if (extp->BlkProt == 0) 107 printk(" Block protection: Not supported\n"); 108 else 109 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 110 111 112 printk(" Temporary block unprotect: %s\n", 113 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 116 printk(" Burst mode: %s\n", 117 extp->BurstMode ? "Supported" : "Not supported"); 118 if (extp->PageMode == 0) 119 printk(" Page mode: Not supported\n"); 120 else 121 printk(" Page mode: %d word page\n", extp->PageMode << 2); 122 123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 124 extp->VppMin >> 4, extp->VppMin & 0xf); 125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 126 extp->VppMax >> 4, extp->VppMax & 0xf); 127 128 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 130 else 131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 132 } 133 #endif 134 135 #ifdef AMD_BOOTLOC_BUG 136 /* Wheee. Bring me the head of someone at AMD. */ 137 static void fixup_amd_bootblock(struct mtd_info *mtd) 138 { 139 struct map_info *map = mtd->priv; 140 struct cfi_private *cfi = map->fldrv_priv; 141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 142 __u8 major = extp->MajorVersion; 143 __u8 minor = extp->MinorVersion; 144 145 if (((major << 8) | minor) < 0x3131) { 146 /* CFI version 1.0 => don't trust bootloc */ 147 148 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 149 map->name, cfi->mfr, cfi->id); 150 151 /* AFAICS all 29LV400 with a bottom boot block have a device ID 152 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 153 * These were badly detected as they have the 0x80 bit set 154 * so treat them as a special case. 155 */ 156 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 157 158 /* Macronix added CFI to their 2nd generation 159 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 160 * Fujitsu, Spansion, EON, ESI and older Macronix) 161 * has CFI. 162 * 163 * Therefore also check the manufacturer. 164 * This reduces the risk of false detection due to 165 * the 8-bit device ID. 166 */ 167 (cfi->mfr == CFI_MFR_MACRONIX)) { 168 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 169 " detected\n", map->name); 170 extp->TopBottom = 2; /* bottom boot */ 171 } else 172 if (cfi->id & 0x80) { 173 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 174 extp->TopBottom = 3; /* top boot */ 175 } else { 176 extp->TopBottom = 2; /* bottom boot */ 177 } 178 179 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 180 " deduced %s from Device ID\n", map->name, major, minor, 181 extp->TopBottom == 2 ? "bottom" : "top"); 182 } 183 } 184 #endif 185 186 static void fixup_use_write_buffers(struct mtd_info *mtd) 187 { 188 struct map_info *map = mtd->priv; 189 struct cfi_private *cfi = map->fldrv_priv; 190 if (cfi->cfiq->BufWriteTimeoutTyp) { 191 pr_debug("Using buffer write method\n" ); 192 mtd->write = cfi_amdstd_write_buffers; 193 } 194 } 195 196 /* Atmel chips don't use the same PRI format as AMD chips */ 197 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 198 { 199 struct map_info *map = mtd->priv; 200 struct cfi_private *cfi = map->fldrv_priv; 201 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 202 struct cfi_pri_atmel atmel_pri; 203 204 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 205 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 206 207 if (atmel_pri.Features & 0x02) 208 extp->EraseSuspend = 2; 209 210 /* Some chips got it backwards... */ 211 if (cfi->id == AT49BV6416) { 212 if (atmel_pri.BottomBoot) 213 extp->TopBottom = 3; 214 else 215 extp->TopBottom = 2; 216 } else { 217 if (atmel_pri.BottomBoot) 218 extp->TopBottom = 2; 219 else 220 extp->TopBottom = 3; 221 } 222 223 /* burst write mode not supported */ 224 cfi->cfiq->BufWriteTimeoutTyp = 0; 225 cfi->cfiq->BufWriteTimeoutMax = 0; 226 } 227 228 static void fixup_use_secsi(struct mtd_info *mtd) 229 { 230 /* Setup for chips with a secsi area */ 231 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 232 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 233 } 234 235 static void fixup_use_erase_chip(struct mtd_info *mtd) 236 { 237 struct map_info *map = mtd->priv; 238 struct cfi_private *cfi = map->fldrv_priv; 239 if ((cfi->cfiq->NumEraseRegions == 1) && 240 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 241 mtd->erase = cfi_amdstd_erase_chip; 242 } 243 244 } 245 246 /* 247 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 248 * locked by default. 249 */ 250 static void fixup_use_atmel_lock(struct mtd_info *mtd) 251 { 252 mtd->lock = cfi_atmel_lock; 253 mtd->unlock = cfi_atmel_unlock; 254 mtd->flags |= MTD_POWERUP_LOCK; 255 } 256 257 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 258 { 259 struct map_info *map = mtd->priv; 260 struct cfi_private *cfi = map->fldrv_priv; 261 262 /* 263 * These flashes report two separate eraseblock regions based on the 264 * sector_erase-size and block_erase-size, although they both operate on the 265 * same memory. This is not allowed according to CFI, so we just pick the 266 * sector_erase-size. 267 */ 268 cfi->cfiq->NumEraseRegions = 1; 269 } 270 271 static void fixup_sst39vf(struct mtd_info *mtd) 272 { 273 struct map_info *map = mtd->priv; 274 struct cfi_private *cfi = map->fldrv_priv; 275 276 fixup_old_sst_eraseregion(mtd); 277 278 cfi->addr_unlock1 = 0x5555; 279 cfi->addr_unlock2 = 0x2AAA; 280 } 281 282 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 283 { 284 struct map_info *map = mtd->priv; 285 struct cfi_private *cfi = map->fldrv_priv; 286 287 fixup_old_sst_eraseregion(mtd); 288 289 cfi->addr_unlock1 = 0x555; 290 cfi->addr_unlock2 = 0x2AA; 291 292 cfi->sector_erase_cmd = CMD(0x50); 293 } 294 295 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 296 { 297 struct map_info *map = mtd->priv; 298 struct cfi_private *cfi = map->fldrv_priv; 299 300 fixup_sst39vf_rev_b(mtd); 301 302 /* 303 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 304 * it should report a size of 8KBytes (0x0020*256). 305 */ 306 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 307 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 308 } 309 310 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 311 { 312 struct map_info *map = mtd->priv; 313 struct cfi_private *cfi = map->fldrv_priv; 314 315 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 316 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 317 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 318 } 319 } 320 321 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 322 { 323 struct map_info *map = mtd->priv; 324 struct cfi_private *cfi = map->fldrv_priv; 325 326 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 327 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 328 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 329 } 330 } 331 332 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 333 static struct cfi_fixup cfi_nopri_fixup_table[] = { 334 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 335 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 336 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 337 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 338 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 339 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 340 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 341 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 342 { 0, 0, NULL } 343 }; 344 345 static struct cfi_fixup cfi_fixup_table[] = { 346 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 347 #ifdef AMD_BOOTLOC_BUG 348 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 349 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 350 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 351 #endif 352 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 353 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 354 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 355 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 356 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 357 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 358 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 359 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 360 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 361 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 362 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 363 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 364 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 365 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 366 #if !FORCE_WORD_WRITE 367 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 368 #endif 369 { 0, 0, NULL } 370 }; 371 static struct cfi_fixup jedec_fixup_table[] = { 372 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 373 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 374 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 375 { 0, 0, NULL } 376 }; 377 378 static struct cfi_fixup fixup_table[] = { 379 /* The CFI vendor ids and the JEDEC vendor IDs appear 380 * to be common. It is like the devices id's are as 381 * well. This table is to pick all cases where 382 * we know that is the case. 383 */ 384 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 385 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 386 { 0, 0, NULL } 387 }; 388 389 390 static void cfi_fixup_major_minor(struct cfi_private *cfi, 391 struct cfi_pri_amdstd *extp) 392 { 393 if (cfi->mfr == CFI_MFR_SAMSUNG) { 394 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 395 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 396 /* 397 * Samsung K8P2815UQB and K8D6x16UxM chips 398 * report major=0 / minor=0. 399 * K8D3x16UxC chips report major=3 / minor=3. 400 */ 401 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 402 " Extended Query version to 1.%c\n", 403 extp->MinorVersion); 404 extp->MajorVersion = '1'; 405 } 406 } 407 408 /* 409 * SST 38VF640x chips report major=0xFF / minor=0xFF. 410 */ 411 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 412 extp->MajorVersion = '1'; 413 extp->MinorVersion = '0'; 414 } 415 } 416 417 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 418 { 419 struct cfi_private *cfi = map->fldrv_priv; 420 struct mtd_info *mtd; 421 int i; 422 423 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 424 if (!mtd) { 425 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 426 return NULL; 427 } 428 mtd->priv = map; 429 mtd->type = MTD_NORFLASH; 430 431 /* Fill in the default mtd operations */ 432 mtd->erase = cfi_amdstd_erase_varsize; 433 mtd->write = cfi_amdstd_write_words; 434 mtd->read = cfi_amdstd_read; 435 mtd->sync = cfi_amdstd_sync; 436 mtd->suspend = cfi_amdstd_suspend; 437 mtd->resume = cfi_amdstd_resume; 438 mtd->flags = MTD_CAP_NORFLASH; 439 mtd->name = map->name; 440 mtd->writesize = 1; 441 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 442 443 pr_debug("MTD %s(): write buffer size %d\n", __func__, 444 mtd->writebufsize); 445 446 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 447 448 if (cfi->cfi_mode==CFI_MODE_CFI){ 449 unsigned char bootloc; 450 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 451 struct cfi_pri_amdstd *extp; 452 453 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 454 if (extp) { 455 /* 456 * It's a real CFI chip, not one for which the probe 457 * routine faked a CFI structure. 458 */ 459 cfi_fixup_major_minor(cfi, extp); 460 461 /* 462 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 463 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 464 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 465 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 466 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 467 */ 468 if (extp->MajorVersion != '1' || 469 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 470 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 471 "version %c.%c (%#02x/%#02x).\n", 472 extp->MajorVersion, extp->MinorVersion, 473 extp->MajorVersion, extp->MinorVersion); 474 kfree(extp); 475 kfree(mtd); 476 return NULL; 477 } 478 479 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 480 extp->MajorVersion, extp->MinorVersion); 481 482 /* Install our own private info structure */ 483 cfi->cmdset_priv = extp; 484 485 /* Apply cfi device specific fixups */ 486 cfi_fixup(mtd, cfi_fixup_table); 487 488 #ifdef DEBUG_CFI_FEATURES 489 /* Tell the user about it in lots of lovely detail */ 490 cfi_tell_features(extp); 491 #endif 492 493 bootloc = extp->TopBottom; 494 if ((bootloc < 2) || (bootloc > 5)) { 495 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 496 "bank location (%d). Assuming bottom.\n", 497 map->name, bootloc); 498 bootloc = 2; 499 } 500 501 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 502 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 503 504 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 505 int j = (cfi->cfiq->NumEraseRegions-1)-i; 506 __u32 swap; 507 508 swap = cfi->cfiq->EraseRegionInfo[i]; 509 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 510 cfi->cfiq->EraseRegionInfo[j] = swap; 511 } 512 } 513 /* Set the default CFI lock/unlock addresses */ 514 cfi->addr_unlock1 = 0x555; 515 cfi->addr_unlock2 = 0x2aa; 516 } 517 cfi_fixup(mtd, cfi_nopri_fixup_table); 518 519 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 520 kfree(mtd); 521 return NULL; 522 } 523 524 } /* CFI mode */ 525 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 526 /* Apply jedec specific fixups */ 527 cfi_fixup(mtd, jedec_fixup_table); 528 } 529 /* Apply generic fixups */ 530 cfi_fixup(mtd, fixup_table); 531 532 for (i=0; i< cfi->numchips; i++) { 533 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 534 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 535 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 536 cfi->chips[i].ref_point_counter = 0; 537 init_waitqueue_head(&(cfi->chips[i].wq)); 538 } 539 540 map->fldrv = &cfi_amdstd_chipdrv; 541 542 return cfi_amdstd_setup(mtd); 543 } 544 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 545 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 546 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 547 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 548 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 549 550 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 551 { 552 struct map_info *map = mtd->priv; 553 struct cfi_private *cfi = map->fldrv_priv; 554 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 555 unsigned long offset = 0; 556 int i,j; 557 558 printk(KERN_NOTICE "number of %s chips: %d\n", 559 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 560 /* Select the correct geometry setup */ 561 mtd->size = devsize * cfi->numchips; 562 563 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 564 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 565 * mtd->numeraseregions, GFP_KERNEL); 566 if (!mtd->eraseregions) { 567 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 568 goto setup_err; 569 } 570 571 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 572 unsigned long ernum, ersize; 573 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 574 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 575 576 if (mtd->erasesize < ersize) { 577 mtd->erasesize = ersize; 578 } 579 for (j=0; j<cfi->numchips; j++) { 580 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 581 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 582 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 583 } 584 offset += (ersize * ernum); 585 } 586 if (offset != devsize) { 587 /* Argh */ 588 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 589 goto setup_err; 590 } 591 592 __module_get(THIS_MODULE); 593 register_reboot_notifier(&mtd->reboot_notifier); 594 return mtd; 595 596 setup_err: 597 kfree(mtd->eraseregions); 598 kfree(mtd); 599 kfree(cfi->cmdset_priv); 600 kfree(cfi->cfiq); 601 return NULL; 602 } 603 604 /* 605 * Return true if the chip is ready. 606 * 607 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 608 * non-suspended sector) and is indicated by no toggle bits toggling. 609 * 610 * Note that anything more complicated than checking if no bits are toggling 611 * (including checking DQ5 for an error status) is tricky to get working 612 * correctly and is therefore not done (particularly with interleaved chips 613 * as each chip must be checked independently of the others). 614 */ 615 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 616 { 617 map_word d, t; 618 619 d = map_read(map, addr); 620 t = map_read(map, addr); 621 622 return map_word_equal(map, d, t); 623 } 624 625 /* 626 * Return true if the chip is ready and has the correct value. 627 * 628 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 629 * non-suspended sector) and it is indicated by no bits toggling. 630 * 631 * Error are indicated by toggling bits or bits held with the wrong value, 632 * or with bits toggling. 633 * 634 * Note that anything more complicated than checking if no bits are toggling 635 * (including checking DQ5 for an error status) is tricky to get working 636 * correctly and is therefore not done (particularly with interleaved chips 637 * as each chip must be checked independently of the others). 638 * 639 */ 640 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 641 { 642 map_word oldd, curd; 643 644 oldd = map_read(map, addr); 645 curd = map_read(map, addr); 646 647 return map_word_equal(map, oldd, curd) && 648 map_word_equal(map, curd, expected); 649 } 650 651 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 652 { 653 DECLARE_WAITQUEUE(wait, current); 654 struct cfi_private *cfi = map->fldrv_priv; 655 unsigned long timeo; 656 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 657 658 resettime: 659 timeo = jiffies + HZ; 660 retry: 661 switch (chip->state) { 662 663 case FL_STATUS: 664 for (;;) { 665 if (chip_ready(map, adr)) 666 break; 667 668 if (time_after(jiffies, timeo)) { 669 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 670 return -EIO; 671 } 672 mutex_unlock(&chip->mutex); 673 cfi_udelay(1); 674 mutex_lock(&chip->mutex); 675 /* Someone else might have been playing with it. */ 676 goto retry; 677 } 678 679 case FL_READY: 680 case FL_CFI_QUERY: 681 case FL_JEDEC_QUERY: 682 return 0; 683 684 case FL_ERASING: 685 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 686 !(mode == FL_READY || mode == FL_POINT || 687 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 688 goto sleep; 689 690 /* We could check to see if we're trying to access the sector 691 * that is currently being erased. However, no user will try 692 * anything like that so we just wait for the timeout. */ 693 694 /* Erase suspend */ 695 /* It's harmless to issue the Erase-Suspend and Erase-Resume 696 * commands when the erase algorithm isn't in progress. */ 697 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 698 chip->oldstate = FL_ERASING; 699 chip->state = FL_ERASE_SUSPENDING; 700 chip->erase_suspended = 1; 701 for (;;) { 702 if (chip_ready(map, adr)) 703 break; 704 705 if (time_after(jiffies, timeo)) { 706 /* Should have suspended the erase by now. 707 * Send an Erase-Resume command as either 708 * there was an error (so leave the erase 709 * routine to recover from it) or we trying to 710 * use the erase-in-progress sector. */ 711 put_chip(map, chip, adr); 712 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 713 return -EIO; 714 } 715 716 mutex_unlock(&chip->mutex); 717 cfi_udelay(1); 718 mutex_lock(&chip->mutex); 719 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 720 So we can just loop here. */ 721 } 722 chip->state = FL_READY; 723 return 0; 724 725 case FL_XIP_WHILE_ERASING: 726 if (mode != FL_READY && mode != FL_POINT && 727 (!cfip || !(cfip->EraseSuspend&2))) 728 goto sleep; 729 chip->oldstate = chip->state; 730 chip->state = FL_READY; 731 return 0; 732 733 case FL_SHUTDOWN: 734 /* The machine is rebooting */ 735 return -EIO; 736 737 case FL_POINT: 738 /* Only if there's no operation suspended... */ 739 if (mode == FL_READY && chip->oldstate == FL_READY) 740 return 0; 741 742 default: 743 sleep: 744 set_current_state(TASK_UNINTERRUPTIBLE); 745 add_wait_queue(&chip->wq, &wait); 746 mutex_unlock(&chip->mutex); 747 schedule(); 748 remove_wait_queue(&chip->wq, &wait); 749 mutex_lock(&chip->mutex); 750 goto resettime; 751 } 752 } 753 754 755 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 756 { 757 struct cfi_private *cfi = map->fldrv_priv; 758 759 switch(chip->oldstate) { 760 case FL_ERASING: 761 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 762 chip->oldstate = FL_READY; 763 chip->state = FL_ERASING; 764 break; 765 766 case FL_XIP_WHILE_ERASING: 767 chip->state = chip->oldstate; 768 chip->oldstate = FL_READY; 769 break; 770 771 case FL_READY: 772 case FL_STATUS: 773 /* We should really make set_vpp() count, rather than doing this */ 774 DISABLE_VPP(map); 775 break; 776 default: 777 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 778 } 779 wake_up(&chip->wq); 780 } 781 782 #ifdef CONFIG_MTD_XIP 783 784 /* 785 * No interrupt what so ever can be serviced while the flash isn't in array 786 * mode. This is ensured by the xip_disable() and xip_enable() functions 787 * enclosing any code path where the flash is known not to be in array mode. 788 * And within a XIP disabled code path, only functions marked with __xipram 789 * may be called and nothing else (it's a good thing to inspect generated 790 * assembly to make sure inline functions were actually inlined and that gcc 791 * didn't emit calls to its own support functions). Also configuring MTD CFI 792 * support to a single buswidth and a single interleave is also recommended. 793 */ 794 795 static void xip_disable(struct map_info *map, struct flchip *chip, 796 unsigned long adr) 797 { 798 /* TODO: chips with no XIP use should ignore and return */ 799 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 800 local_irq_disable(); 801 } 802 803 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 804 unsigned long adr) 805 { 806 struct cfi_private *cfi = map->fldrv_priv; 807 808 if (chip->state != FL_POINT && chip->state != FL_READY) { 809 map_write(map, CMD(0xf0), adr); 810 chip->state = FL_READY; 811 } 812 (void) map_read(map, adr); 813 xip_iprefetch(); 814 local_irq_enable(); 815 } 816 817 /* 818 * When a delay is required for the flash operation to complete, the 819 * xip_udelay() function is polling for both the given timeout and pending 820 * (but still masked) hardware interrupts. Whenever there is an interrupt 821 * pending then the flash erase operation is suspended, array mode restored 822 * and interrupts unmasked. Task scheduling might also happen at that 823 * point. The CPU eventually returns from the interrupt or the call to 824 * schedule() and the suspended flash operation is resumed for the remaining 825 * of the delay period. 826 * 827 * Warning: this function _will_ fool interrupt latency tracing tools. 828 */ 829 830 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 831 unsigned long adr, int usec) 832 { 833 struct cfi_private *cfi = map->fldrv_priv; 834 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 835 map_word status, OK = CMD(0x80); 836 unsigned long suspended, start = xip_currtime(); 837 flstate_t oldstate; 838 839 do { 840 cpu_relax(); 841 if (xip_irqpending() && extp && 842 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 843 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 844 /* 845 * Let's suspend the erase operation when supported. 846 * Note that we currently don't try to suspend 847 * interleaved chips if there is already another 848 * operation suspended (imagine what happens 849 * when one chip was already done with the current 850 * operation while another chip suspended it, then 851 * we resume the whole thing at once). Yes, it 852 * can happen! 853 */ 854 map_write(map, CMD(0xb0), adr); 855 usec -= xip_elapsed_since(start); 856 suspended = xip_currtime(); 857 do { 858 if (xip_elapsed_since(suspended) > 100000) { 859 /* 860 * The chip doesn't want to suspend 861 * after waiting for 100 msecs. 862 * This is a critical error but there 863 * is not much we can do here. 864 */ 865 return; 866 } 867 status = map_read(map, adr); 868 } while (!map_word_andequal(map, status, OK, OK)); 869 870 /* Suspend succeeded */ 871 oldstate = chip->state; 872 if (!map_word_bitsset(map, status, CMD(0x40))) 873 break; 874 chip->state = FL_XIP_WHILE_ERASING; 875 chip->erase_suspended = 1; 876 map_write(map, CMD(0xf0), adr); 877 (void) map_read(map, adr); 878 xip_iprefetch(); 879 local_irq_enable(); 880 mutex_unlock(&chip->mutex); 881 xip_iprefetch(); 882 cond_resched(); 883 884 /* 885 * We're back. However someone else might have 886 * decided to go write to the chip if we are in 887 * a suspended erase state. If so let's wait 888 * until it's done. 889 */ 890 mutex_lock(&chip->mutex); 891 while (chip->state != FL_XIP_WHILE_ERASING) { 892 DECLARE_WAITQUEUE(wait, current); 893 set_current_state(TASK_UNINTERRUPTIBLE); 894 add_wait_queue(&chip->wq, &wait); 895 mutex_unlock(&chip->mutex); 896 schedule(); 897 remove_wait_queue(&chip->wq, &wait); 898 mutex_lock(&chip->mutex); 899 } 900 /* Disallow XIP again */ 901 local_irq_disable(); 902 903 /* Resume the write or erase operation */ 904 map_write(map, cfi->sector_erase_cmd, adr); 905 chip->state = oldstate; 906 start = xip_currtime(); 907 } else if (usec >= 1000000/HZ) { 908 /* 909 * Try to save on CPU power when waiting delay 910 * is at least a system timer tick period. 911 * No need to be extremely accurate here. 912 */ 913 xip_cpu_idle(); 914 } 915 status = map_read(map, adr); 916 } while (!map_word_andequal(map, status, OK, OK) 917 && xip_elapsed_since(start) < usec); 918 } 919 920 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 921 922 /* 923 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 924 * the flash is actively programming or erasing since we have to poll for 925 * the operation to complete anyway. We can't do that in a generic way with 926 * a XIP setup so do it before the actual flash operation in this case 927 * and stub it out from INVALIDATE_CACHE_UDELAY. 928 */ 929 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 930 INVALIDATE_CACHED_RANGE(map, from, size) 931 932 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 933 UDELAY(map, chip, adr, usec) 934 935 /* 936 * Extra notes: 937 * 938 * Activating this XIP support changes the way the code works a bit. For 939 * example the code to suspend the current process when concurrent access 940 * happens is never executed because xip_udelay() will always return with the 941 * same chip state as it was entered with. This is why there is no care for 942 * the presence of add_wait_queue() or schedule() calls from within a couple 943 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 944 * The queueing and scheduling are always happening within xip_udelay(). 945 * 946 * Similarly, get_chip() and put_chip() just happen to always be executed 947 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 948 * is in array mode, therefore never executing many cases therein and not 949 * causing any problem with XIP. 950 */ 951 952 #else 953 954 #define xip_disable(map, chip, adr) 955 #define xip_enable(map, chip, adr) 956 #define XIP_INVAL_CACHED_RANGE(x...) 957 958 #define UDELAY(map, chip, adr, usec) \ 959 do { \ 960 mutex_unlock(&chip->mutex); \ 961 cfi_udelay(usec); \ 962 mutex_lock(&chip->mutex); \ 963 } while (0) 964 965 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 966 do { \ 967 mutex_unlock(&chip->mutex); \ 968 INVALIDATE_CACHED_RANGE(map, adr, len); \ 969 cfi_udelay(usec); \ 970 mutex_lock(&chip->mutex); \ 971 } while (0) 972 973 #endif 974 975 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 976 { 977 unsigned long cmd_addr; 978 struct cfi_private *cfi = map->fldrv_priv; 979 int ret; 980 981 adr += chip->start; 982 983 /* Ensure cmd read/writes are aligned. */ 984 cmd_addr = adr & ~(map_bankwidth(map)-1); 985 986 mutex_lock(&chip->mutex); 987 ret = get_chip(map, chip, cmd_addr, FL_READY); 988 if (ret) { 989 mutex_unlock(&chip->mutex); 990 return ret; 991 } 992 993 if (chip->state != FL_POINT && chip->state != FL_READY) { 994 map_write(map, CMD(0xf0), cmd_addr); 995 chip->state = FL_READY; 996 } 997 998 map_copy_from(map, buf, adr, len); 999 1000 put_chip(map, chip, cmd_addr); 1001 1002 mutex_unlock(&chip->mutex); 1003 return 0; 1004 } 1005 1006 1007 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1008 { 1009 struct map_info *map = mtd->priv; 1010 struct cfi_private *cfi = map->fldrv_priv; 1011 unsigned long ofs; 1012 int chipnum; 1013 int ret = 0; 1014 1015 /* ofs: offset within the first chip that the first read should start */ 1016 1017 chipnum = (from >> cfi->chipshift); 1018 ofs = from - (chipnum << cfi->chipshift); 1019 1020 1021 *retlen = 0; 1022 1023 while (len) { 1024 unsigned long thislen; 1025 1026 if (chipnum >= cfi->numchips) 1027 break; 1028 1029 if ((len + ofs -1) >> cfi->chipshift) 1030 thislen = (1<<cfi->chipshift) - ofs; 1031 else 1032 thislen = len; 1033 1034 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1035 if (ret) 1036 break; 1037 1038 *retlen += thislen; 1039 len -= thislen; 1040 buf += thislen; 1041 1042 ofs = 0; 1043 chipnum++; 1044 } 1045 return ret; 1046 } 1047 1048 1049 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1050 { 1051 DECLARE_WAITQUEUE(wait, current); 1052 unsigned long timeo = jiffies + HZ; 1053 struct cfi_private *cfi = map->fldrv_priv; 1054 1055 retry: 1056 mutex_lock(&chip->mutex); 1057 1058 if (chip->state != FL_READY){ 1059 set_current_state(TASK_UNINTERRUPTIBLE); 1060 add_wait_queue(&chip->wq, &wait); 1061 1062 mutex_unlock(&chip->mutex); 1063 1064 schedule(); 1065 remove_wait_queue(&chip->wq, &wait); 1066 timeo = jiffies + HZ; 1067 1068 goto retry; 1069 } 1070 1071 adr += chip->start; 1072 1073 chip->state = FL_READY; 1074 1075 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1076 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1077 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1078 1079 map_copy_from(map, buf, adr, len); 1080 1081 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1082 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1083 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1084 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1085 1086 wake_up(&chip->wq); 1087 mutex_unlock(&chip->mutex); 1088 1089 return 0; 1090 } 1091 1092 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1093 { 1094 struct map_info *map = mtd->priv; 1095 struct cfi_private *cfi = map->fldrv_priv; 1096 unsigned long ofs; 1097 int chipnum; 1098 int ret = 0; 1099 1100 1101 /* ofs: offset within the first chip that the first read should start */ 1102 1103 /* 8 secsi bytes per chip */ 1104 chipnum=from>>3; 1105 ofs=from & 7; 1106 1107 1108 *retlen = 0; 1109 1110 while (len) { 1111 unsigned long thislen; 1112 1113 if (chipnum >= cfi->numchips) 1114 break; 1115 1116 if ((len + ofs -1) >> 3) 1117 thislen = (1<<3) - ofs; 1118 else 1119 thislen = len; 1120 1121 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1122 if (ret) 1123 break; 1124 1125 *retlen += thislen; 1126 len -= thislen; 1127 buf += thislen; 1128 1129 ofs = 0; 1130 chipnum++; 1131 } 1132 return ret; 1133 } 1134 1135 1136 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1137 { 1138 struct cfi_private *cfi = map->fldrv_priv; 1139 unsigned long timeo = jiffies + HZ; 1140 /* 1141 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1142 * have a max write time of a few hundreds usec). However, we should 1143 * use the maximum timeout value given by the chip at probe time 1144 * instead. Unfortunately, struct flchip does have a field for 1145 * maximum timeout, only for typical which can be far too short 1146 * depending of the conditions. The ' + 1' is to avoid having a 1147 * timeout of 0 jiffies if HZ is smaller than 1000. 1148 */ 1149 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1150 int ret = 0; 1151 map_word oldd; 1152 int retry_cnt = 0; 1153 1154 adr += chip->start; 1155 1156 mutex_lock(&chip->mutex); 1157 ret = get_chip(map, chip, adr, FL_WRITING); 1158 if (ret) { 1159 mutex_unlock(&chip->mutex); 1160 return ret; 1161 } 1162 1163 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1164 __func__, adr, datum.x[0] ); 1165 1166 /* 1167 * Check for a NOP for the case when the datum to write is already 1168 * present - it saves time and works around buggy chips that corrupt 1169 * data at other locations when 0xff is written to a location that 1170 * already contains 0xff. 1171 */ 1172 oldd = map_read(map, adr); 1173 if (map_word_equal(map, oldd, datum)) { 1174 pr_debug("MTD %s(): NOP\n", 1175 __func__); 1176 goto op_done; 1177 } 1178 1179 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1180 ENABLE_VPP(map); 1181 xip_disable(map, chip, adr); 1182 retry: 1183 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1184 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1185 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1186 map_write(map, datum, adr); 1187 chip->state = FL_WRITING; 1188 1189 INVALIDATE_CACHE_UDELAY(map, chip, 1190 adr, map_bankwidth(map), 1191 chip->word_write_time); 1192 1193 /* See comment above for timeout value. */ 1194 timeo = jiffies + uWriteTimeout; 1195 for (;;) { 1196 if (chip->state != FL_WRITING) { 1197 /* Someone's suspended the write. Sleep */ 1198 DECLARE_WAITQUEUE(wait, current); 1199 1200 set_current_state(TASK_UNINTERRUPTIBLE); 1201 add_wait_queue(&chip->wq, &wait); 1202 mutex_unlock(&chip->mutex); 1203 schedule(); 1204 remove_wait_queue(&chip->wq, &wait); 1205 timeo = jiffies + (HZ / 2); /* FIXME */ 1206 mutex_lock(&chip->mutex); 1207 continue; 1208 } 1209 1210 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1211 xip_enable(map, chip, adr); 1212 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1213 xip_disable(map, chip, adr); 1214 break; 1215 } 1216 1217 if (chip_ready(map, adr)) 1218 break; 1219 1220 /* Latency issues. Drop the lock, wait a while and retry */ 1221 UDELAY(map, chip, adr, 1); 1222 } 1223 /* Did we succeed? */ 1224 if (!chip_good(map, adr, datum)) { 1225 /* reset on all failures. */ 1226 map_write( map, CMD(0xF0), chip->start ); 1227 /* FIXME - should have reset delay before continuing */ 1228 1229 if (++retry_cnt <= MAX_WORD_RETRIES) 1230 goto retry; 1231 1232 ret = -EIO; 1233 } 1234 xip_enable(map, chip, adr); 1235 op_done: 1236 chip->state = FL_READY; 1237 put_chip(map, chip, adr); 1238 mutex_unlock(&chip->mutex); 1239 1240 return ret; 1241 } 1242 1243 1244 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1245 size_t *retlen, const u_char *buf) 1246 { 1247 struct map_info *map = mtd->priv; 1248 struct cfi_private *cfi = map->fldrv_priv; 1249 int ret = 0; 1250 int chipnum; 1251 unsigned long ofs, chipstart; 1252 DECLARE_WAITQUEUE(wait, current); 1253 1254 *retlen = 0; 1255 if (!len) 1256 return 0; 1257 1258 chipnum = to >> cfi->chipshift; 1259 ofs = to - (chipnum << cfi->chipshift); 1260 chipstart = cfi->chips[chipnum].start; 1261 1262 /* If it's not bus-aligned, do the first byte write */ 1263 if (ofs & (map_bankwidth(map)-1)) { 1264 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1265 int i = ofs - bus_ofs; 1266 int n = 0; 1267 map_word tmp_buf; 1268 1269 retry: 1270 mutex_lock(&cfi->chips[chipnum].mutex); 1271 1272 if (cfi->chips[chipnum].state != FL_READY) { 1273 set_current_state(TASK_UNINTERRUPTIBLE); 1274 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1275 1276 mutex_unlock(&cfi->chips[chipnum].mutex); 1277 1278 schedule(); 1279 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1280 goto retry; 1281 } 1282 1283 /* Load 'tmp_buf' with old contents of flash */ 1284 tmp_buf = map_read(map, bus_ofs+chipstart); 1285 1286 mutex_unlock(&cfi->chips[chipnum].mutex); 1287 1288 /* Number of bytes to copy from buffer */ 1289 n = min_t(int, len, map_bankwidth(map)-i); 1290 1291 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1292 1293 ret = do_write_oneword(map, &cfi->chips[chipnum], 1294 bus_ofs, tmp_buf); 1295 if (ret) 1296 return ret; 1297 1298 ofs += n; 1299 buf += n; 1300 (*retlen) += n; 1301 len -= n; 1302 1303 if (ofs >> cfi->chipshift) { 1304 chipnum ++; 1305 ofs = 0; 1306 if (chipnum == cfi->numchips) 1307 return 0; 1308 } 1309 } 1310 1311 /* We are now aligned, write as much as possible */ 1312 while(len >= map_bankwidth(map)) { 1313 map_word datum; 1314 1315 datum = map_word_load(map, buf); 1316 1317 ret = do_write_oneword(map, &cfi->chips[chipnum], 1318 ofs, datum); 1319 if (ret) 1320 return ret; 1321 1322 ofs += map_bankwidth(map); 1323 buf += map_bankwidth(map); 1324 (*retlen) += map_bankwidth(map); 1325 len -= map_bankwidth(map); 1326 1327 if (ofs >> cfi->chipshift) { 1328 chipnum ++; 1329 ofs = 0; 1330 if (chipnum == cfi->numchips) 1331 return 0; 1332 chipstart = cfi->chips[chipnum].start; 1333 } 1334 } 1335 1336 /* Write the trailing bytes if any */ 1337 if (len & (map_bankwidth(map)-1)) { 1338 map_word tmp_buf; 1339 1340 retry1: 1341 mutex_lock(&cfi->chips[chipnum].mutex); 1342 1343 if (cfi->chips[chipnum].state != FL_READY) { 1344 set_current_state(TASK_UNINTERRUPTIBLE); 1345 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1346 1347 mutex_unlock(&cfi->chips[chipnum].mutex); 1348 1349 schedule(); 1350 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1351 goto retry1; 1352 } 1353 1354 tmp_buf = map_read(map, ofs + chipstart); 1355 1356 mutex_unlock(&cfi->chips[chipnum].mutex); 1357 1358 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1359 1360 ret = do_write_oneword(map, &cfi->chips[chipnum], 1361 ofs, tmp_buf); 1362 if (ret) 1363 return ret; 1364 1365 (*retlen) += len; 1366 } 1367 1368 return 0; 1369 } 1370 1371 1372 /* 1373 * FIXME: interleaved mode not tested, and probably not supported! 1374 */ 1375 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1376 unsigned long adr, const u_char *buf, 1377 int len) 1378 { 1379 struct cfi_private *cfi = map->fldrv_priv; 1380 unsigned long timeo = jiffies + HZ; 1381 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1382 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1383 int ret = -EIO; 1384 unsigned long cmd_adr; 1385 int z, words; 1386 map_word datum; 1387 1388 adr += chip->start; 1389 cmd_adr = adr; 1390 1391 mutex_lock(&chip->mutex); 1392 ret = get_chip(map, chip, adr, FL_WRITING); 1393 if (ret) { 1394 mutex_unlock(&chip->mutex); 1395 return ret; 1396 } 1397 1398 datum = map_word_load(map, buf); 1399 1400 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1401 __func__, adr, datum.x[0] ); 1402 1403 XIP_INVAL_CACHED_RANGE(map, adr, len); 1404 ENABLE_VPP(map); 1405 xip_disable(map, chip, cmd_adr); 1406 1407 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1408 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1409 1410 /* Write Buffer Load */ 1411 map_write(map, CMD(0x25), cmd_adr); 1412 1413 chip->state = FL_WRITING_TO_BUFFER; 1414 1415 /* Write length of data to come */ 1416 words = len / map_bankwidth(map); 1417 map_write(map, CMD(words - 1), cmd_adr); 1418 /* Write data */ 1419 z = 0; 1420 while(z < words * map_bankwidth(map)) { 1421 datum = map_word_load(map, buf); 1422 map_write(map, datum, adr + z); 1423 1424 z += map_bankwidth(map); 1425 buf += map_bankwidth(map); 1426 } 1427 z -= map_bankwidth(map); 1428 1429 adr += z; 1430 1431 /* Write Buffer Program Confirm: GO GO GO */ 1432 map_write(map, CMD(0x29), cmd_adr); 1433 chip->state = FL_WRITING; 1434 1435 INVALIDATE_CACHE_UDELAY(map, chip, 1436 adr, map_bankwidth(map), 1437 chip->word_write_time); 1438 1439 timeo = jiffies + uWriteTimeout; 1440 1441 for (;;) { 1442 if (chip->state != FL_WRITING) { 1443 /* Someone's suspended the write. Sleep */ 1444 DECLARE_WAITQUEUE(wait, current); 1445 1446 set_current_state(TASK_UNINTERRUPTIBLE); 1447 add_wait_queue(&chip->wq, &wait); 1448 mutex_unlock(&chip->mutex); 1449 schedule(); 1450 remove_wait_queue(&chip->wq, &wait); 1451 timeo = jiffies + (HZ / 2); /* FIXME */ 1452 mutex_lock(&chip->mutex); 1453 continue; 1454 } 1455 1456 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1457 break; 1458 1459 if (chip_ready(map, adr)) { 1460 xip_enable(map, chip, adr); 1461 goto op_done; 1462 } 1463 1464 /* Latency issues. Drop the lock, wait a while and retry */ 1465 UDELAY(map, chip, adr, 1); 1466 } 1467 1468 /* reset on all failures. */ 1469 map_write( map, CMD(0xF0), chip->start ); 1470 xip_enable(map, chip, adr); 1471 /* FIXME - should have reset delay before continuing */ 1472 1473 printk(KERN_WARNING "MTD %s(): software timeout\n", 1474 __func__ ); 1475 1476 ret = -EIO; 1477 op_done: 1478 chip->state = FL_READY; 1479 put_chip(map, chip, adr); 1480 mutex_unlock(&chip->mutex); 1481 1482 return ret; 1483 } 1484 1485 1486 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1487 size_t *retlen, const u_char *buf) 1488 { 1489 struct map_info *map = mtd->priv; 1490 struct cfi_private *cfi = map->fldrv_priv; 1491 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1492 int ret = 0; 1493 int chipnum; 1494 unsigned long ofs; 1495 1496 *retlen = 0; 1497 if (!len) 1498 return 0; 1499 1500 chipnum = to >> cfi->chipshift; 1501 ofs = to - (chipnum << cfi->chipshift); 1502 1503 /* If it's not bus-aligned, do the first word write */ 1504 if (ofs & (map_bankwidth(map)-1)) { 1505 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1506 if (local_len > len) 1507 local_len = len; 1508 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1509 local_len, retlen, buf); 1510 if (ret) 1511 return ret; 1512 ofs += local_len; 1513 buf += local_len; 1514 len -= local_len; 1515 1516 if (ofs >> cfi->chipshift) { 1517 chipnum ++; 1518 ofs = 0; 1519 if (chipnum == cfi->numchips) 1520 return 0; 1521 } 1522 } 1523 1524 /* Write buffer is worth it only if more than one word to write... */ 1525 while (len >= map_bankwidth(map) * 2) { 1526 /* We must not cross write block boundaries */ 1527 int size = wbufsize - (ofs & (wbufsize-1)); 1528 1529 if (size > len) 1530 size = len; 1531 if (size % map_bankwidth(map)) 1532 size -= size % map_bankwidth(map); 1533 1534 ret = do_write_buffer(map, &cfi->chips[chipnum], 1535 ofs, buf, size); 1536 if (ret) 1537 return ret; 1538 1539 ofs += size; 1540 buf += size; 1541 (*retlen) += size; 1542 len -= size; 1543 1544 if (ofs >> cfi->chipshift) { 1545 chipnum ++; 1546 ofs = 0; 1547 if (chipnum == cfi->numchips) 1548 return 0; 1549 } 1550 } 1551 1552 if (len) { 1553 size_t retlen_dregs = 0; 1554 1555 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1556 len, &retlen_dregs, buf); 1557 1558 *retlen += retlen_dregs; 1559 return ret; 1560 } 1561 1562 return 0; 1563 } 1564 1565 1566 /* 1567 * Handle devices with one erase region, that only implement 1568 * the chip erase command. 1569 */ 1570 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1571 { 1572 struct cfi_private *cfi = map->fldrv_priv; 1573 unsigned long timeo = jiffies + HZ; 1574 unsigned long int adr; 1575 DECLARE_WAITQUEUE(wait, current); 1576 int ret = 0; 1577 1578 adr = cfi->addr_unlock1; 1579 1580 mutex_lock(&chip->mutex); 1581 ret = get_chip(map, chip, adr, FL_WRITING); 1582 if (ret) { 1583 mutex_unlock(&chip->mutex); 1584 return ret; 1585 } 1586 1587 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1588 __func__, chip->start ); 1589 1590 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1591 ENABLE_VPP(map); 1592 xip_disable(map, chip, adr); 1593 1594 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1595 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1596 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1597 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1598 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1599 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1600 1601 chip->state = FL_ERASING; 1602 chip->erase_suspended = 0; 1603 chip->in_progress_block_addr = adr; 1604 1605 INVALIDATE_CACHE_UDELAY(map, chip, 1606 adr, map->size, 1607 chip->erase_time*500); 1608 1609 timeo = jiffies + (HZ*20); 1610 1611 for (;;) { 1612 if (chip->state != FL_ERASING) { 1613 /* Someone's suspended the erase. Sleep */ 1614 set_current_state(TASK_UNINTERRUPTIBLE); 1615 add_wait_queue(&chip->wq, &wait); 1616 mutex_unlock(&chip->mutex); 1617 schedule(); 1618 remove_wait_queue(&chip->wq, &wait); 1619 mutex_lock(&chip->mutex); 1620 continue; 1621 } 1622 if (chip->erase_suspended) { 1623 /* This erase was suspended and resumed. 1624 Adjust the timeout */ 1625 timeo = jiffies + (HZ*20); /* FIXME */ 1626 chip->erase_suspended = 0; 1627 } 1628 1629 if (chip_ready(map, adr)) 1630 break; 1631 1632 if (time_after(jiffies, timeo)) { 1633 printk(KERN_WARNING "MTD %s(): software timeout\n", 1634 __func__ ); 1635 break; 1636 } 1637 1638 /* Latency issues. Drop the lock, wait a while and retry */ 1639 UDELAY(map, chip, adr, 1000000/HZ); 1640 } 1641 /* Did we succeed? */ 1642 if (!chip_good(map, adr, map_word_ff(map))) { 1643 /* reset on all failures. */ 1644 map_write( map, CMD(0xF0), chip->start ); 1645 /* FIXME - should have reset delay before continuing */ 1646 1647 ret = -EIO; 1648 } 1649 1650 chip->state = FL_READY; 1651 xip_enable(map, chip, adr); 1652 put_chip(map, chip, adr); 1653 mutex_unlock(&chip->mutex); 1654 1655 return ret; 1656 } 1657 1658 1659 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1660 { 1661 struct cfi_private *cfi = map->fldrv_priv; 1662 unsigned long timeo = jiffies + HZ; 1663 DECLARE_WAITQUEUE(wait, current); 1664 int ret = 0; 1665 1666 adr += chip->start; 1667 1668 mutex_lock(&chip->mutex); 1669 ret = get_chip(map, chip, adr, FL_ERASING); 1670 if (ret) { 1671 mutex_unlock(&chip->mutex); 1672 return ret; 1673 } 1674 1675 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1676 __func__, adr ); 1677 1678 XIP_INVAL_CACHED_RANGE(map, adr, len); 1679 ENABLE_VPP(map); 1680 xip_disable(map, chip, adr); 1681 1682 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1683 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1684 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1685 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1686 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1687 map_write(map, cfi->sector_erase_cmd, adr); 1688 1689 chip->state = FL_ERASING; 1690 chip->erase_suspended = 0; 1691 chip->in_progress_block_addr = adr; 1692 1693 INVALIDATE_CACHE_UDELAY(map, chip, 1694 adr, len, 1695 chip->erase_time*500); 1696 1697 timeo = jiffies + (HZ*20); 1698 1699 for (;;) { 1700 if (chip->state != FL_ERASING) { 1701 /* Someone's suspended the erase. Sleep */ 1702 set_current_state(TASK_UNINTERRUPTIBLE); 1703 add_wait_queue(&chip->wq, &wait); 1704 mutex_unlock(&chip->mutex); 1705 schedule(); 1706 remove_wait_queue(&chip->wq, &wait); 1707 mutex_lock(&chip->mutex); 1708 continue; 1709 } 1710 if (chip->erase_suspended) { 1711 /* This erase was suspended and resumed. 1712 Adjust the timeout */ 1713 timeo = jiffies + (HZ*20); /* FIXME */ 1714 chip->erase_suspended = 0; 1715 } 1716 1717 if (chip_ready(map, adr)) { 1718 xip_enable(map, chip, adr); 1719 break; 1720 } 1721 1722 if (time_after(jiffies, timeo)) { 1723 xip_enable(map, chip, adr); 1724 printk(KERN_WARNING "MTD %s(): software timeout\n", 1725 __func__ ); 1726 break; 1727 } 1728 1729 /* Latency issues. Drop the lock, wait a while and retry */ 1730 UDELAY(map, chip, adr, 1000000/HZ); 1731 } 1732 /* Did we succeed? */ 1733 if (!chip_good(map, adr, map_word_ff(map))) { 1734 /* reset on all failures. */ 1735 map_write( map, CMD(0xF0), chip->start ); 1736 /* FIXME - should have reset delay before continuing */ 1737 1738 ret = -EIO; 1739 } 1740 1741 chip->state = FL_READY; 1742 put_chip(map, chip, adr); 1743 mutex_unlock(&chip->mutex); 1744 return ret; 1745 } 1746 1747 1748 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1749 { 1750 unsigned long ofs, len; 1751 int ret; 1752 1753 ofs = instr->addr; 1754 len = instr->len; 1755 1756 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 1757 if (ret) 1758 return ret; 1759 1760 instr->state = MTD_ERASE_DONE; 1761 mtd_erase_callback(instr); 1762 1763 return 0; 1764 } 1765 1766 1767 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 1768 { 1769 struct map_info *map = mtd->priv; 1770 struct cfi_private *cfi = map->fldrv_priv; 1771 int ret = 0; 1772 1773 if (instr->addr != 0) 1774 return -EINVAL; 1775 1776 if (instr->len != mtd->size) 1777 return -EINVAL; 1778 1779 ret = do_erase_chip(map, &cfi->chips[0]); 1780 if (ret) 1781 return ret; 1782 1783 instr->state = MTD_ERASE_DONE; 1784 mtd_erase_callback(instr); 1785 1786 return 0; 1787 } 1788 1789 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 1790 unsigned long adr, int len, void *thunk) 1791 { 1792 struct cfi_private *cfi = map->fldrv_priv; 1793 int ret; 1794 1795 mutex_lock(&chip->mutex); 1796 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1797 if (ret) 1798 goto out_unlock; 1799 chip->state = FL_LOCKING; 1800 1801 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 1802 1803 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1804 cfi->device_type, NULL); 1805 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1806 cfi->device_type, NULL); 1807 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 1808 cfi->device_type, NULL); 1809 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1810 cfi->device_type, NULL); 1811 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1812 cfi->device_type, NULL); 1813 map_write(map, CMD(0x40), chip->start + adr); 1814 1815 chip->state = FL_READY; 1816 put_chip(map, chip, adr + chip->start); 1817 ret = 0; 1818 1819 out_unlock: 1820 mutex_unlock(&chip->mutex); 1821 return ret; 1822 } 1823 1824 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 1825 unsigned long adr, int len, void *thunk) 1826 { 1827 struct cfi_private *cfi = map->fldrv_priv; 1828 int ret; 1829 1830 mutex_lock(&chip->mutex); 1831 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1832 if (ret) 1833 goto out_unlock; 1834 chip->state = FL_UNLOCKING; 1835 1836 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 1837 1838 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1839 cfi->device_type, NULL); 1840 map_write(map, CMD(0x70), adr); 1841 1842 chip->state = FL_READY; 1843 put_chip(map, chip, adr + chip->start); 1844 ret = 0; 1845 1846 out_unlock: 1847 mutex_unlock(&chip->mutex); 1848 return ret; 1849 } 1850 1851 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1852 { 1853 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1854 } 1855 1856 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1857 { 1858 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1859 } 1860 1861 1862 static void cfi_amdstd_sync (struct mtd_info *mtd) 1863 { 1864 struct map_info *map = mtd->priv; 1865 struct cfi_private *cfi = map->fldrv_priv; 1866 int i; 1867 struct flchip *chip; 1868 int ret = 0; 1869 DECLARE_WAITQUEUE(wait, current); 1870 1871 for (i=0; !ret && i<cfi->numchips; i++) { 1872 chip = &cfi->chips[i]; 1873 1874 retry: 1875 mutex_lock(&chip->mutex); 1876 1877 switch(chip->state) { 1878 case FL_READY: 1879 case FL_STATUS: 1880 case FL_CFI_QUERY: 1881 case FL_JEDEC_QUERY: 1882 chip->oldstate = chip->state; 1883 chip->state = FL_SYNCING; 1884 /* No need to wake_up() on this state change - 1885 * as the whole point is that nobody can do anything 1886 * with the chip now anyway. 1887 */ 1888 case FL_SYNCING: 1889 mutex_unlock(&chip->mutex); 1890 break; 1891 1892 default: 1893 /* Not an idle state */ 1894 set_current_state(TASK_UNINTERRUPTIBLE); 1895 add_wait_queue(&chip->wq, &wait); 1896 1897 mutex_unlock(&chip->mutex); 1898 1899 schedule(); 1900 1901 remove_wait_queue(&chip->wq, &wait); 1902 1903 goto retry; 1904 } 1905 } 1906 1907 /* Unlock the chips again */ 1908 1909 for (i--; i >=0; i--) { 1910 chip = &cfi->chips[i]; 1911 1912 mutex_lock(&chip->mutex); 1913 1914 if (chip->state == FL_SYNCING) { 1915 chip->state = chip->oldstate; 1916 wake_up(&chip->wq); 1917 } 1918 mutex_unlock(&chip->mutex); 1919 } 1920 } 1921 1922 1923 static int cfi_amdstd_suspend(struct mtd_info *mtd) 1924 { 1925 struct map_info *map = mtd->priv; 1926 struct cfi_private *cfi = map->fldrv_priv; 1927 int i; 1928 struct flchip *chip; 1929 int ret = 0; 1930 1931 for (i=0; !ret && i<cfi->numchips; i++) { 1932 chip = &cfi->chips[i]; 1933 1934 mutex_lock(&chip->mutex); 1935 1936 switch(chip->state) { 1937 case FL_READY: 1938 case FL_STATUS: 1939 case FL_CFI_QUERY: 1940 case FL_JEDEC_QUERY: 1941 chip->oldstate = chip->state; 1942 chip->state = FL_PM_SUSPENDED; 1943 /* No need to wake_up() on this state change - 1944 * as the whole point is that nobody can do anything 1945 * with the chip now anyway. 1946 */ 1947 case FL_PM_SUSPENDED: 1948 break; 1949 1950 default: 1951 ret = -EAGAIN; 1952 break; 1953 } 1954 mutex_unlock(&chip->mutex); 1955 } 1956 1957 /* Unlock the chips again */ 1958 1959 if (ret) { 1960 for (i--; i >=0; i--) { 1961 chip = &cfi->chips[i]; 1962 1963 mutex_lock(&chip->mutex); 1964 1965 if (chip->state == FL_PM_SUSPENDED) { 1966 chip->state = chip->oldstate; 1967 wake_up(&chip->wq); 1968 } 1969 mutex_unlock(&chip->mutex); 1970 } 1971 } 1972 1973 return ret; 1974 } 1975 1976 1977 static void cfi_amdstd_resume(struct mtd_info *mtd) 1978 { 1979 struct map_info *map = mtd->priv; 1980 struct cfi_private *cfi = map->fldrv_priv; 1981 int i; 1982 struct flchip *chip; 1983 1984 for (i=0; i<cfi->numchips; i++) { 1985 1986 chip = &cfi->chips[i]; 1987 1988 mutex_lock(&chip->mutex); 1989 1990 if (chip->state == FL_PM_SUSPENDED) { 1991 chip->state = FL_READY; 1992 map_write(map, CMD(0xF0), chip->start); 1993 wake_up(&chip->wq); 1994 } 1995 else 1996 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1997 1998 mutex_unlock(&chip->mutex); 1999 } 2000 } 2001 2002 2003 /* 2004 * Ensure that the flash device is put back into read array mode before 2005 * unloading the driver or rebooting. On some systems, rebooting while 2006 * the flash is in query/program/erase mode will prevent the CPU from 2007 * fetching the bootloader code, requiring a hard reset or power cycle. 2008 */ 2009 static int cfi_amdstd_reset(struct mtd_info *mtd) 2010 { 2011 struct map_info *map = mtd->priv; 2012 struct cfi_private *cfi = map->fldrv_priv; 2013 int i, ret; 2014 struct flchip *chip; 2015 2016 for (i = 0; i < cfi->numchips; i++) { 2017 2018 chip = &cfi->chips[i]; 2019 2020 mutex_lock(&chip->mutex); 2021 2022 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2023 if (!ret) { 2024 map_write(map, CMD(0xF0), chip->start); 2025 chip->state = FL_SHUTDOWN; 2026 put_chip(map, chip, chip->start); 2027 } 2028 2029 mutex_unlock(&chip->mutex); 2030 } 2031 2032 return 0; 2033 } 2034 2035 2036 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2037 void *v) 2038 { 2039 struct mtd_info *mtd; 2040 2041 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2042 cfi_amdstd_reset(mtd); 2043 return NOTIFY_DONE; 2044 } 2045 2046 2047 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2048 { 2049 struct map_info *map = mtd->priv; 2050 struct cfi_private *cfi = map->fldrv_priv; 2051 2052 cfi_amdstd_reset(mtd); 2053 unregister_reboot_notifier(&mtd->reboot_notifier); 2054 kfree(cfi->cmdset_priv); 2055 kfree(cfi->cfiq); 2056 kfree(cfi); 2057 kfree(mtd->eraseregions); 2058 } 2059 2060 MODULE_LICENSE("GPL"); 2061 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2062 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2063 MODULE_ALIAS("cfi_cmdset_0006"); 2064 MODULE_ALIAS("cfi_cmdset_0701"); 2065