1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/init.h> 28 #include <asm/io.h> 29 #include <asm/byteorder.h> 30 31 #include <linux/errno.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/interrupt.h> 35 #include <linux/reboot.h> 36 #include <linux/mtd/map.h> 37 #include <linux/mtd/mtd.h> 38 #include <linux/mtd/cfi.h> 39 #include <linux/mtd/xip.h> 40 41 #define AMD_BOOTLOC_BUG 42 #define FORCE_WORD_WRITE 0 43 44 #define MAX_WORD_RETRIES 3 45 46 #define SST49LF004B 0x0060 47 #define SST49LF040B 0x0050 48 #define SST49LF008A 0x005a 49 #define AT49BV6416 0x00d6 50 51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 56 static void cfi_amdstd_sync (struct mtd_info *); 57 static int cfi_amdstd_suspend (struct mtd_info *); 58 static void cfi_amdstd_resume (struct mtd_info *); 59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61 62 static void cfi_amdstd_destroy(struct mtd_info *); 63 64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 66 67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 69 #include "fwh_lock.h" 70 71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 73 74 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 75 .probe = NULL, /* Not usable directly */ 76 .destroy = cfi_amdstd_destroy, 77 .name = "cfi_cmdset_0002", 78 .module = THIS_MODULE 79 }; 80 81 82 /* #define DEBUG_CFI_FEATURES */ 83 84 85 #ifdef DEBUG_CFI_FEATURES 86 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 87 { 88 const char* erase_suspend[3] = { 89 "Not supported", "Read only", "Read/write" 90 }; 91 const char* top_bottom[6] = { 92 "No WP", "8x8KiB sectors at top & bottom, no WP", 93 "Bottom boot", "Top boot", 94 "Uniform, Bottom WP", "Uniform, Top WP" 95 }; 96 97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 98 printk(" Address sensitive unlock: %s\n", 99 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 100 101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 103 else 104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 105 106 if (extp->BlkProt == 0) 107 printk(" Block protection: Not supported\n"); 108 else 109 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 110 111 112 printk(" Temporary block unprotect: %s\n", 113 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 116 printk(" Burst mode: %s\n", 117 extp->BurstMode ? "Supported" : "Not supported"); 118 if (extp->PageMode == 0) 119 printk(" Page mode: Not supported\n"); 120 else 121 printk(" Page mode: %d word page\n", extp->PageMode << 2); 122 123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 124 extp->VppMin >> 4, extp->VppMin & 0xf); 125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 126 extp->VppMax >> 4, extp->VppMax & 0xf); 127 128 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 130 else 131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 132 } 133 #endif 134 135 #ifdef AMD_BOOTLOC_BUG 136 /* Wheee. Bring me the head of someone at AMD. */ 137 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) 138 { 139 struct map_info *map = mtd->priv; 140 struct cfi_private *cfi = map->fldrv_priv; 141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 142 __u8 major = extp->MajorVersion; 143 __u8 minor = extp->MinorVersion; 144 145 if (((major << 8) | minor) < 0x3131) { 146 /* CFI version 1.0 => don't trust bootloc */ 147 148 DEBUG(MTD_DEBUG_LEVEL1, 149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 150 map->name, cfi->mfr, cfi->id); 151 152 /* AFAICS all 29LV400 with a bottom boot block have a device ID 153 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 154 * These were badly detected as they have the 0x80 bit set 155 * so treat them as a special case. 156 */ 157 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 158 159 /* Macronix added CFI to their 2nd generation 160 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 161 * Fujitsu, Spansion, EON, ESI and older Macronix) 162 * has CFI. 163 * 164 * Therefore also check the manufacturer. 165 * This reduces the risk of false detection due to 166 * the 8-bit device ID. 167 */ 168 (cfi->mfr == CFI_MFR_MACRONIX)) { 169 DEBUG(MTD_DEBUG_LEVEL1, 170 "%s: Macronix MX29LV400C with bottom boot block" 171 " detected\n", map->name); 172 extp->TopBottom = 2; /* bottom boot */ 173 } else 174 if (cfi->id & 0x80) { 175 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 176 extp->TopBottom = 3; /* top boot */ 177 } else { 178 extp->TopBottom = 2; /* bottom boot */ 179 } 180 181 DEBUG(MTD_DEBUG_LEVEL1, 182 "%s: AMD CFI PRI V%c.%c has no boot block field;" 183 " deduced %s from Device ID\n", map->name, major, minor, 184 extp->TopBottom == 2 ? "bottom" : "top"); 185 } 186 } 187 #endif 188 189 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 190 { 191 struct map_info *map = mtd->priv; 192 struct cfi_private *cfi = map->fldrv_priv; 193 if (cfi->cfiq->BufWriteTimeoutTyp) { 194 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); 195 mtd->write = cfi_amdstd_write_buffers; 196 } 197 } 198 199 /* Atmel chips don't use the same PRI format as AMD chips */ 200 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 201 { 202 struct map_info *map = mtd->priv; 203 struct cfi_private *cfi = map->fldrv_priv; 204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 205 struct cfi_pri_atmel atmel_pri; 206 207 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 208 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 209 210 if (atmel_pri.Features & 0x02) 211 extp->EraseSuspend = 2; 212 213 /* Some chips got it backwards... */ 214 if (cfi->id == AT49BV6416) { 215 if (atmel_pri.BottomBoot) 216 extp->TopBottom = 3; 217 else 218 extp->TopBottom = 2; 219 } else { 220 if (atmel_pri.BottomBoot) 221 extp->TopBottom = 2; 222 else 223 extp->TopBottom = 3; 224 } 225 226 /* burst write mode not supported */ 227 cfi->cfiq->BufWriteTimeoutTyp = 0; 228 cfi->cfiq->BufWriteTimeoutMax = 0; 229 } 230 231 static void fixup_use_secsi(struct mtd_info *mtd, void *param) 232 { 233 /* Setup for chips with a secsi area */ 234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 236 } 237 238 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) 239 { 240 struct map_info *map = mtd->priv; 241 struct cfi_private *cfi = map->fldrv_priv; 242 if ((cfi->cfiq->NumEraseRegions == 1) && 243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 244 mtd->erase = cfi_amdstd_erase_chip; 245 } 246 247 } 248 249 /* 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 251 * locked by default. 252 */ 253 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) 254 { 255 mtd->lock = cfi_atmel_lock; 256 mtd->unlock = cfi_atmel_unlock; 257 mtd->flags |= MTD_POWERUP_LOCK; 258 } 259 260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 261 { 262 struct map_info *map = mtd->priv; 263 struct cfi_private *cfi = map->fldrv_priv; 264 265 /* 266 * These flashes report two seperate eraseblock regions based on the 267 * sector_erase-size and block_erase-size, although they both operate on the 268 * same memory. This is not allowed according to CFI, so we just pick the 269 * sector_erase-size. 270 */ 271 cfi->cfiq->NumEraseRegions = 1; 272 } 273 274 static void fixup_sst39vf(struct mtd_info *mtd, void *param) 275 { 276 struct map_info *map = mtd->priv; 277 struct cfi_private *cfi = map->fldrv_priv; 278 279 fixup_old_sst_eraseregion(mtd); 280 281 cfi->addr_unlock1 = 0x5555; 282 cfi->addr_unlock2 = 0x2AAA; 283 } 284 285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param) 286 { 287 struct map_info *map = mtd->priv; 288 struct cfi_private *cfi = map->fldrv_priv; 289 290 fixup_old_sst_eraseregion(mtd); 291 292 cfi->addr_unlock1 = 0x555; 293 cfi->addr_unlock2 = 0x2AA; 294 295 cfi->sector_erase_cmd = CMD(0x50); 296 } 297 298 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param) 299 { 300 struct map_info *map = mtd->priv; 301 struct cfi_private *cfi = map->fldrv_priv; 302 303 fixup_sst39vf_rev_b(mtd, param); 304 305 /* 306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 307 * it should report a size of 8KBytes (0x0020*256). 308 */ 309 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 311 } 312 313 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) 314 { 315 struct map_info *map = mtd->priv; 316 struct cfi_private *cfi = map->fldrv_priv; 317 318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 320 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 321 } 322 } 323 324 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param) 325 { 326 struct map_info *map = mtd->priv; 327 struct cfi_private *cfi = map->fldrv_priv; 328 329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 331 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 332 } 333 } 334 335 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 336 static struct cfi_fixup cfi_nopri_fixup_table[] = { 337 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */ 338 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */ 339 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */ 340 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */ 341 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */ 342 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */ 343 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */ 344 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */ 345 { 0, 0, NULL, NULL } 346 }; 347 348 static struct cfi_fixup cfi_fixup_table[] = { 349 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 350 #ifdef AMD_BOOTLOC_BUG 351 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 352 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 353 #endif 354 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 355 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 356 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, 357 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, 358 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, 359 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, 360 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, }, 361 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, }, 362 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, }, 363 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, }, 364 { CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */ 365 { CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */ 366 { CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */ 367 { CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */ 368 #if !FORCE_WORD_WRITE 369 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 370 #endif 371 { 0, 0, NULL, NULL } 372 }; 373 static struct cfi_fixup jedec_fixup_table[] = { 374 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 375 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 376 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 377 { 0, 0, NULL, NULL } 378 }; 379 380 static struct cfi_fixup fixup_table[] = { 381 /* The CFI vendor ids and the JEDEC vendor IDs appear 382 * to be common. It is like the devices id's are as 383 * well. This table is to pick all cases where 384 * we know that is the case. 385 */ 386 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, 387 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL }, 388 { 0, 0, NULL, NULL } 389 }; 390 391 392 static void cfi_fixup_major_minor(struct cfi_private *cfi, 393 struct cfi_pri_amdstd *extp) 394 { 395 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e && 396 extp->MajorVersion == '0') 397 extp->MajorVersion = '1'; 398 /* 399 * SST 38VF640x chips report major=0xFF / minor=0xFF. 400 */ 401 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 402 extp->MajorVersion = '1'; 403 extp->MinorVersion = '0'; 404 } 405 } 406 407 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 408 { 409 struct cfi_private *cfi = map->fldrv_priv; 410 struct mtd_info *mtd; 411 int i; 412 413 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 414 if (!mtd) { 415 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 416 return NULL; 417 } 418 mtd->priv = map; 419 mtd->type = MTD_NORFLASH; 420 421 /* Fill in the default mtd operations */ 422 mtd->erase = cfi_amdstd_erase_varsize; 423 mtd->write = cfi_amdstd_write_words; 424 mtd->read = cfi_amdstd_read; 425 mtd->sync = cfi_amdstd_sync; 426 mtd->suspend = cfi_amdstd_suspend; 427 mtd->resume = cfi_amdstd_resume; 428 mtd->flags = MTD_CAP_NORFLASH; 429 mtd->name = map->name; 430 mtd->writesize = 1; 431 432 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 433 434 if (cfi->cfi_mode==CFI_MODE_CFI){ 435 unsigned char bootloc; 436 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 437 struct cfi_pri_amdstd *extp; 438 439 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 440 if (extp) { 441 /* 442 * It's a real CFI chip, not one for which the probe 443 * routine faked a CFI structure. 444 */ 445 cfi_fixup_major_minor(cfi, extp); 446 447 /* 448 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4 449 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 450 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 451 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 452 */ 453 if (extp->MajorVersion != '1' || 454 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) { 455 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 456 "version %c.%c (%#02x/%#02x).\n", 457 extp->MajorVersion, extp->MinorVersion, 458 extp->MajorVersion, extp->MinorVersion); 459 kfree(extp); 460 kfree(mtd); 461 return NULL; 462 } 463 464 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 465 extp->MajorVersion, extp->MinorVersion); 466 467 /* Install our own private info structure */ 468 cfi->cmdset_priv = extp; 469 470 /* Apply cfi device specific fixups */ 471 cfi_fixup(mtd, cfi_fixup_table); 472 473 #ifdef DEBUG_CFI_FEATURES 474 /* Tell the user about it in lots of lovely detail */ 475 cfi_tell_features(extp); 476 #endif 477 478 bootloc = extp->TopBottom; 479 if ((bootloc < 2) || (bootloc > 5)) { 480 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 481 "bank location (%d). Assuming bottom.\n", 482 map->name, bootloc); 483 bootloc = 2; 484 } 485 486 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 487 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 488 489 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 490 int j = (cfi->cfiq->NumEraseRegions-1)-i; 491 __u32 swap; 492 493 swap = cfi->cfiq->EraseRegionInfo[i]; 494 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 495 cfi->cfiq->EraseRegionInfo[j] = swap; 496 } 497 } 498 /* Set the default CFI lock/unlock addresses */ 499 cfi->addr_unlock1 = 0x555; 500 cfi->addr_unlock2 = 0x2aa; 501 } 502 cfi_fixup(mtd, cfi_nopri_fixup_table); 503 504 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 505 kfree(mtd); 506 return NULL; 507 } 508 509 } /* CFI mode */ 510 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 511 /* Apply jedec specific fixups */ 512 cfi_fixup(mtd, jedec_fixup_table); 513 } 514 /* Apply generic fixups */ 515 cfi_fixup(mtd, fixup_table); 516 517 for (i=0; i< cfi->numchips; i++) { 518 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 519 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 520 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 521 cfi->chips[i].ref_point_counter = 0; 522 init_waitqueue_head(&(cfi->chips[i].wq)); 523 } 524 525 map->fldrv = &cfi_amdstd_chipdrv; 526 527 return cfi_amdstd_setup(mtd); 528 } 529 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 530 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 531 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 532 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 533 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 534 535 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 536 { 537 struct map_info *map = mtd->priv; 538 struct cfi_private *cfi = map->fldrv_priv; 539 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 540 unsigned long offset = 0; 541 int i,j; 542 543 printk(KERN_NOTICE "number of %s chips: %d\n", 544 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 545 /* Select the correct geometry setup */ 546 mtd->size = devsize * cfi->numchips; 547 548 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 549 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 550 * mtd->numeraseregions, GFP_KERNEL); 551 if (!mtd->eraseregions) { 552 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 553 goto setup_err; 554 } 555 556 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 557 unsigned long ernum, ersize; 558 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 559 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 560 561 if (mtd->erasesize < ersize) { 562 mtd->erasesize = ersize; 563 } 564 for (j=0; j<cfi->numchips; j++) { 565 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 566 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 567 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 568 } 569 offset += (ersize * ernum); 570 } 571 if (offset != devsize) { 572 /* Argh */ 573 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 574 goto setup_err; 575 } 576 577 __module_get(THIS_MODULE); 578 register_reboot_notifier(&mtd->reboot_notifier); 579 return mtd; 580 581 setup_err: 582 kfree(mtd->eraseregions); 583 kfree(mtd); 584 kfree(cfi->cmdset_priv); 585 kfree(cfi->cfiq); 586 return NULL; 587 } 588 589 /* 590 * Return true if the chip is ready. 591 * 592 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 593 * non-suspended sector) and is indicated by no toggle bits toggling. 594 * 595 * Note that anything more complicated than checking if no bits are toggling 596 * (including checking DQ5 for an error status) is tricky to get working 597 * correctly and is therefore not done (particulary with interleaved chips 598 * as each chip must be checked independantly of the others). 599 */ 600 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 601 { 602 map_word d, t; 603 604 d = map_read(map, addr); 605 t = map_read(map, addr); 606 607 return map_word_equal(map, d, t); 608 } 609 610 /* 611 * Return true if the chip is ready and has the correct value. 612 * 613 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 614 * non-suspended sector) and it is indicated by no bits toggling. 615 * 616 * Error are indicated by toggling bits or bits held with the wrong value, 617 * or with bits toggling. 618 * 619 * Note that anything more complicated than checking if no bits are toggling 620 * (including checking DQ5 for an error status) is tricky to get working 621 * correctly and is therefore not done (particulary with interleaved chips 622 * as each chip must be checked independantly of the others). 623 * 624 */ 625 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 626 { 627 map_word oldd, curd; 628 629 oldd = map_read(map, addr); 630 curd = map_read(map, addr); 631 632 return map_word_equal(map, oldd, curd) && 633 map_word_equal(map, curd, expected); 634 } 635 636 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 637 { 638 DECLARE_WAITQUEUE(wait, current); 639 struct cfi_private *cfi = map->fldrv_priv; 640 unsigned long timeo; 641 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 642 643 resettime: 644 timeo = jiffies + HZ; 645 retry: 646 switch (chip->state) { 647 648 case FL_STATUS: 649 for (;;) { 650 if (chip_ready(map, adr)) 651 break; 652 653 if (time_after(jiffies, timeo)) { 654 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 655 return -EIO; 656 } 657 mutex_unlock(&chip->mutex); 658 cfi_udelay(1); 659 mutex_lock(&chip->mutex); 660 /* Someone else might have been playing with it. */ 661 goto retry; 662 } 663 664 case FL_READY: 665 case FL_CFI_QUERY: 666 case FL_JEDEC_QUERY: 667 return 0; 668 669 case FL_ERASING: 670 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 671 !(mode == FL_READY || mode == FL_POINT || 672 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 673 goto sleep; 674 675 /* We could check to see if we're trying to access the sector 676 * that is currently being erased. However, no user will try 677 * anything like that so we just wait for the timeout. */ 678 679 /* Erase suspend */ 680 /* It's harmless to issue the Erase-Suspend and Erase-Resume 681 * commands when the erase algorithm isn't in progress. */ 682 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 683 chip->oldstate = FL_ERASING; 684 chip->state = FL_ERASE_SUSPENDING; 685 chip->erase_suspended = 1; 686 for (;;) { 687 if (chip_ready(map, adr)) 688 break; 689 690 if (time_after(jiffies, timeo)) { 691 /* Should have suspended the erase by now. 692 * Send an Erase-Resume command as either 693 * there was an error (so leave the erase 694 * routine to recover from it) or we trying to 695 * use the erase-in-progress sector. */ 696 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 697 chip->state = FL_ERASING; 698 chip->oldstate = FL_READY; 699 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 700 return -EIO; 701 } 702 703 mutex_unlock(&chip->mutex); 704 cfi_udelay(1); 705 mutex_lock(&chip->mutex); 706 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 707 So we can just loop here. */ 708 } 709 chip->state = FL_READY; 710 return 0; 711 712 case FL_XIP_WHILE_ERASING: 713 if (mode != FL_READY && mode != FL_POINT && 714 (!cfip || !(cfip->EraseSuspend&2))) 715 goto sleep; 716 chip->oldstate = chip->state; 717 chip->state = FL_READY; 718 return 0; 719 720 case FL_SHUTDOWN: 721 /* The machine is rebooting */ 722 return -EIO; 723 724 case FL_POINT: 725 /* Only if there's no operation suspended... */ 726 if (mode == FL_READY && chip->oldstate == FL_READY) 727 return 0; 728 729 default: 730 sleep: 731 set_current_state(TASK_UNINTERRUPTIBLE); 732 add_wait_queue(&chip->wq, &wait); 733 mutex_unlock(&chip->mutex); 734 schedule(); 735 remove_wait_queue(&chip->wq, &wait); 736 mutex_lock(&chip->mutex); 737 goto resettime; 738 } 739 } 740 741 742 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 743 { 744 struct cfi_private *cfi = map->fldrv_priv; 745 746 switch(chip->oldstate) { 747 case FL_ERASING: 748 chip->state = chip->oldstate; 749 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 750 chip->oldstate = FL_READY; 751 chip->state = FL_ERASING; 752 break; 753 754 case FL_XIP_WHILE_ERASING: 755 chip->state = chip->oldstate; 756 chip->oldstate = FL_READY; 757 break; 758 759 case FL_READY: 760 case FL_STATUS: 761 /* We should really make set_vpp() count, rather than doing this */ 762 DISABLE_VPP(map); 763 break; 764 default: 765 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 766 } 767 wake_up(&chip->wq); 768 } 769 770 #ifdef CONFIG_MTD_XIP 771 772 /* 773 * No interrupt what so ever can be serviced while the flash isn't in array 774 * mode. This is ensured by the xip_disable() and xip_enable() functions 775 * enclosing any code path where the flash is known not to be in array mode. 776 * And within a XIP disabled code path, only functions marked with __xipram 777 * may be called and nothing else (it's a good thing to inspect generated 778 * assembly to make sure inline functions were actually inlined and that gcc 779 * didn't emit calls to its own support functions). Also configuring MTD CFI 780 * support to a single buswidth and a single interleave is also recommended. 781 */ 782 783 static void xip_disable(struct map_info *map, struct flchip *chip, 784 unsigned long adr) 785 { 786 /* TODO: chips with no XIP use should ignore and return */ 787 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 788 local_irq_disable(); 789 } 790 791 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 792 unsigned long adr) 793 { 794 struct cfi_private *cfi = map->fldrv_priv; 795 796 if (chip->state != FL_POINT && chip->state != FL_READY) { 797 map_write(map, CMD(0xf0), adr); 798 chip->state = FL_READY; 799 } 800 (void) map_read(map, adr); 801 xip_iprefetch(); 802 local_irq_enable(); 803 } 804 805 /* 806 * When a delay is required for the flash operation to complete, the 807 * xip_udelay() function is polling for both the given timeout and pending 808 * (but still masked) hardware interrupts. Whenever there is an interrupt 809 * pending then the flash erase operation is suspended, array mode restored 810 * and interrupts unmasked. Task scheduling might also happen at that 811 * point. The CPU eventually returns from the interrupt or the call to 812 * schedule() and the suspended flash operation is resumed for the remaining 813 * of the delay period. 814 * 815 * Warning: this function _will_ fool interrupt latency tracing tools. 816 */ 817 818 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 819 unsigned long adr, int usec) 820 { 821 struct cfi_private *cfi = map->fldrv_priv; 822 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 823 map_word status, OK = CMD(0x80); 824 unsigned long suspended, start = xip_currtime(); 825 flstate_t oldstate; 826 827 do { 828 cpu_relax(); 829 if (xip_irqpending() && extp && 830 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 831 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 832 /* 833 * Let's suspend the erase operation when supported. 834 * Note that we currently don't try to suspend 835 * interleaved chips if there is already another 836 * operation suspended (imagine what happens 837 * when one chip was already done with the current 838 * operation while another chip suspended it, then 839 * we resume the whole thing at once). Yes, it 840 * can happen! 841 */ 842 map_write(map, CMD(0xb0), adr); 843 usec -= xip_elapsed_since(start); 844 suspended = xip_currtime(); 845 do { 846 if (xip_elapsed_since(suspended) > 100000) { 847 /* 848 * The chip doesn't want to suspend 849 * after waiting for 100 msecs. 850 * This is a critical error but there 851 * is not much we can do here. 852 */ 853 return; 854 } 855 status = map_read(map, adr); 856 } while (!map_word_andequal(map, status, OK, OK)); 857 858 /* Suspend succeeded */ 859 oldstate = chip->state; 860 if (!map_word_bitsset(map, status, CMD(0x40))) 861 break; 862 chip->state = FL_XIP_WHILE_ERASING; 863 chip->erase_suspended = 1; 864 map_write(map, CMD(0xf0), adr); 865 (void) map_read(map, adr); 866 xip_iprefetch(); 867 local_irq_enable(); 868 mutex_unlock(&chip->mutex); 869 xip_iprefetch(); 870 cond_resched(); 871 872 /* 873 * We're back. However someone else might have 874 * decided to go write to the chip if we are in 875 * a suspended erase state. If so let's wait 876 * until it's done. 877 */ 878 mutex_lock(&chip->mutex); 879 while (chip->state != FL_XIP_WHILE_ERASING) { 880 DECLARE_WAITQUEUE(wait, current); 881 set_current_state(TASK_UNINTERRUPTIBLE); 882 add_wait_queue(&chip->wq, &wait); 883 mutex_unlock(&chip->mutex); 884 schedule(); 885 remove_wait_queue(&chip->wq, &wait); 886 mutex_lock(&chip->mutex); 887 } 888 /* Disallow XIP again */ 889 local_irq_disable(); 890 891 /* Resume the write or erase operation */ 892 map_write(map, cfi->sector_erase_cmd, adr); 893 chip->state = oldstate; 894 start = xip_currtime(); 895 } else if (usec >= 1000000/HZ) { 896 /* 897 * Try to save on CPU power when waiting delay 898 * is at least a system timer tick period. 899 * No need to be extremely accurate here. 900 */ 901 xip_cpu_idle(); 902 } 903 status = map_read(map, adr); 904 } while (!map_word_andequal(map, status, OK, OK) 905 && xip_elapsed_since(start) < usec); 906 } 907 908 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 909 910 /* 911 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 912 * the flash is actively programming or erasing since we have to poll for 913 * the operation to complete anyway. We can't do that in a generic way with 914 * a XIP setup so do it before the actual flash operation in this case 915 * and stub it out from INVALIDATE_CACHE_UDELAY. 916 */ 917 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 918 INVALIDATE_CACHED_RANGE(map, from, size) 919 920 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 921 UDELAY(map, chip, adr, usec) 922 923 /* 924 * Extra notes: 925 * 926 * Activating this XIP support changes the way the code works a bit. For 927 * example the code to suspend the current process when concurrent access 928 * happens is never executed because xip_udelay() will always return with the 929 * same chip state as it was entered with. This is why there is no care for 930 * the presence of add_wait_queue() or schedule() calls from within a couple 931 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 932 * The queueing and scheduling are always happening within xip_udelay(). 933 * 934 * Similarly, get_chip() and put_chip() just happen to always be executed 935 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 936 * is in array mode, therefore never executing many cases therein and not 937 * causing any problem with XIP. 938 */ 939 940 #else 941 942 #define xip_disable(map, chip, adr) 943 #define xip_enable(map, chip, adr) 944 #define XIP_INVAL_CACHED_RANGE(x...) 945 946 #define UDELAY(map, chip, adr, usec) \ 947 do { \ 948 mutex_unlock(&chip->mutex); \ 949 cfi_udelay(usec); \ 950 mutex_lock(&chip->mutex); \ 951 } while (0) 952 953 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 954 do { \ 955 mutex_unlock(&chip->mutex); \ 956 INVALIDATE_CACHED_RANGE(map, adr, len); \ 957 cfi_udelay(usec); \ 958 mutex_lock(&chip->mutex); \ 959 } while (0) 960 961 #endif 962 963 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 964 { 965 unsigned long cmd_addr; 966 struct cfi_private *cfi = map->fldrv_priv; 967 int ret; 968 969 adr += chip->start; 970 971 /* Ensure cmd read/writes are aligned. */ 972 cmd_addr = adr & ~(map_bankwidth(map)-1); 973 974 mutex_lock(&chip->mutex); 975 ret = get_chip(map, chip, cmd_addr, FL_READY); 976 if (ret) { 977 mutex_unlock(&chip->mutex); 978 return ret; 979 } 980 981 if (chip->state != FL_POINT && chip->state != FL_READY) { 982 map_write(map, CMD(0xf0), cmd_addr); 983 chip->state = FL_READY; 984 } 985 986 map_copy_from(map, buf, adr, len); 987 988 put_chip(map, chip, cmd_addr); 989 990 mutex_unlock(&chip->mutex); 991 return 0; 992 } 993 994 995 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 996 { 997 struct map_info *map = mtd->priv; 998 struct cfi_private *cfi = map->fldrv_priv; 999 unsigned long ofs; 1000 int chipnum; 1001 int ret = 0; 1002 1003 /* ofs: offset within the first chip that the first read should start */ 1004 1005 chipnum = (from >> cfi->chipshift); 1006 ofs = from - (chipnum << cfi->chipshift); 1007 1008 1009 *retlen = 0; 1010 1011 while (len) { 1012 unsigned long thislen; 1013 1014 if (chipnum >= cfi->numchips) 1015 break; 1016 1017 if ((len + ofs -1) >> cfi->chipshift) 1018 thislen = (1<<cfi->chipshift) - ofs; 1019 else 1020 thislen = len; 1021 1022 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1023 if (ret) 1024 break; 1025 1026 *retlen += thislen; 1027 len -= thislen; 1028 buf += thislen; 1029 1030 ofs = 0; 1031 chipnum++; 1032 } 1033 return ret; 1034 } 1035 1036 1037 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1038 { 1039 DECLARE_WAITQUEUE(wait, current); 1040 unsigned long timeo = jiffies + HZ; 1041 struct cfi_private *cfi = map->fldrv_priv; 1042 1043 retry: 1044 mutex_lock(&chip->mutex); 1045 1046 if (chip->state != FL_READY){ 1047 set_current_state(TASK_UNINTERRUPTIBLE); 1048 add_wait_queue(&chip->wq, &wait); 1049 1050 mutex_unlock(&chip->mutex); 1051 1052 schedule(); 1053 remove_wait_queue(&chip->wq, &wait); 1054 timeo = jiffies + HZ; 1055 1056 goto retry; 1057 } 1058 1059 adr += chip->start; 1060 1061 chip->state = FL_READY; 1062 1063 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1064 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1065 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1066 1067 map_copy_from(map, buf, adr, len); 1068 1069 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1070 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1071 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1072 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1073 1074 wake_up(&chip->wq); 1075 mutex_unlock(&chip->mutex); 1076 1077 return 0; 1078 } 1079 1080 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1081 { 1082 struct map_info *map = mtd->priv; 1083 struct cfi_private *cfi = map->fldrv_priv; 1084 unsigned long ofs; 1085 int chipnum; 1086 int ret = 0; 1087 1088 1089 /* ofs: offset within the first chip that the first read should start */ 1090 1091 /* 8 secsi bytes per chip */ 1092 chipnum=from>>3; 1093 ofs=from & 7; 1094 1095 1096 *retlen = 0; 1097 1098 while (len) { 1099 unsigned long thislen; 1100 1101 if (chipnum >= cfi->numchips) 1102 break; 1103 1104 if ((len + ofs -1) >> 3) 1105 thislen = (1<<3) - ofs; 1106 else 1107 thislen = len; 1108 1109 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1110 if (ret) 1111 break; 1112 1113 *retlen += thislen; 1114 len -= thislen; 1115 buf += thislen; 1116 1117 ofs = 0; 1118 chipnum++; 1119 } 1120 return ret; 1121 } 1122 1123 1124 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1125 { 1126 struct cfi_private *cfi = map->fldrv_priv; 1127 unsigned long timeo = jiffies + HZ; 1128 /* 1129 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1130 * have a max write time of a few hundreds usec). However, we should 1131 * use the maximum timeout value given by the chip at probe time 1132 * instead. Unfortunately, struct flchip does have a field for 1133 * maximum timeout, only for typical which can be far too short 1134 * depending of the conditions. The ' + 1' is to avoid having a 1135 * timeout of 0 jiffies if HZ is smaller than 1000. 1136 */ 1137 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1138 int ret = 0; 1139 map_word oldd; 1140 int retry_cnt = 0; 1141 1142 adr += chip->start; 1143 1144 mutex_lock(&chip->mutex); 1145 ret = get_chip(map, chip, adr, FL_WRITING); 1146 if (ret) { 1147 mutex_unlock(&chip->mutex); 1148 return ret; 1149 } 1150 1151 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1152 __func__, adr, datum.x[0] ); 1153 1154 /* 1155 * Check for a NOP for the case when the datum to write is already 1156 * present - it saves time and works around buggy chips that corrupt 1157 * data at other locations when 0xff is written to a location that 1158 * already contains 0xff. 1159 */ 1160 oldd = map_read(map, adr); 1161 if (map_word_equal(map, oldd, datum)) { 1162 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", 1163 __func__); 1164 goto op_done; 1165 } 1166 1167 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1168 ENABLE_VPP(map); 1169 xip_disable(map, chip, adr); 1170 retry: 1171 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1172 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1173 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1174 map_write(map, datum, adr); 1175 chip->state = FL_WRITING; 1176 1177 INVALIDATE_CACHE_UDELAY(map, chip, 1178 adr, map_bankwidth(map), 1179 chip->word_write_time); 1180 1181 /* See comment above for timeout value. */ 1182 timeo = jiffies + uWriteTimeout; 1183 for (;;) { 1184 if (chip->state != FL_WRITING) { 1185 /* Someone's suspended the write. Sleep */ 1186 DECLARE_WAITQUEUE(wait, current); 1187 1188 set_current_state(TASK_UNINTERRUPTIBLE); 1189 add_wait_queue(&chip->wq, &wait); 1190 mutex_unlock(&chip->mutex); 1191 schedule(); 1192 remove_wait_queue(&chip->wq, &wait); 1193 timeo = jiffies + (HZ / 2); /* FIXME */ 1194 mutex_lock(&chip->mutex); 1195 continue; 1196 } 1197 1198 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1199 xip_enable(map, chip, adr); 1200 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1201 xip_disable(map, chip, adr); 1202 break; 1203 } 1204 1205 if (chip_ready(map, adr)) 1206 break; 1207 1208 /* Latency issues. Drop the lock, wait a while and retry */ 1209 UDELAY(map, chip, adr, 1); 1210 } 1211 /* Did we succeed? */ 1212 if (!chip_good(map, adr, datum)) { 1213 /* reset on all failures. */ 1214 map_write( map, CMD(0xF0), chip->start ); 1215 /* FIXME - should have reset delay before continuing */ 1216 1217 if (++retry_cnt <= MAX_WORD_RETRIES) 1218 goto retry; 1219 1220 ret = -EIO; 1221 } 1222 xip_enable(map, chip, adr); 1223 op_done: 1224 chip->state = FL_READY; 1225 put_chip(map, chip, adr); 1226 mutex_unlock(&chip->mutex); 1227 1228 return ret; 1229 } 1230 1231 1232 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1233 size_t *retlen, const u_char *buf) 1234 { 1235 struct map_info *map = mtd->priv; 1236 struct cfi_private *cfi = map->fldrv_priv; 1237 int ret = 0; 1238 int chipnum; 1239 unsigned long ofs, chipstart; 1240 DECLARE_WAITQUEUE(wait, current); 1241 1242 *retlen = 0; 1243 if (!len) 1244 return 0; 1245 1246 chipnum = to >> cfi->chipshift; 1247 ofs = to - (chipnum << cfi->chipshift); 1248 chipstart = cfi->chips[chipnum].start; 1249 1250 /* If it's not bus-aligned, do the first byte write */ 1251 if (ofs & (map_bankwidth(map)-1)) { 1252 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1253 int i = ofs - bus_ofs; 1254 int n = 0; 1255 map_word tmp_buf; 1256 1257 retry: 1258 mutex_lock(&cfi->chips[chipnum].mutex); 1259 1260 if (cfi->chips[chipnum].state != FL_READY) { 1261 set_current_state(TASK_UNINTERRUPTIBLE); 1262 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1263 1264 mutex_unlock(&cfi->chips[chipnum].mutex); 1265 1266 schedule(); 1267 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1268 goto retry; 1269 } 1270 1271 /* Load 'tmp_buf' with old contents of flash */ 1272 tmp_buf = map_read(map, bus_ofs+chipstart); 1273 1274 mutex_unlock(&cfi->chips[chipnum].mutex); 1275 1276 /* Number of bytes to copy from buffer */ 1277 n = min_t(int, len, map_bankwidth(map)-i); 1278 1279 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1280 1281 ret = do_write_oneword(map, &cfi->chips[chipnum], 1282 bus_ofs, tmp_buf); 1283 if (ret) 1284 return ret; 1285 1286 ofs += n; 1287 buf += n; 1288 (*retlen) += n; 1289 len -= n; 1290 1291 if (ofs >> cfi->chipshift) { 1292 chipnum ++; 1293 ofs = 0; 1294 if (chipnum == cfi->numchips) 1295 return 0; 1296 } 1297 } 1298 1299 /* We are now aligned, write as much as possible */ 1300 while(len >= map_bankwidth(map)) { 1301 map_word datum; 1302 1303 datum = map_word_load(map, buf); 1304 1305 ret = do_write_oneword(map, &cfi->chips[chipnum], 1306 ofs, datum); 1307 if (ret) 1308 return ret; 1309 1310 ofs += map_bankwidth(map); 1311 buf += map_bankwidth(map); 1312 (*retlen) += map_bankwidth(map); 1313 len -= map_bankwidth(map); 1314 1315 if (ofs >> cfi->chipshift) { 1316 chipnum ++; 1317 ofs = 0; 1318 if (chipnum == cfi->numchips) 1319 return 0; 1320 chipstart = cfi->chips[chipnum].start; 1321 } 1322 } 1323 1324 /* Write the trailing bytes if any */ 1325 if (len & (map_bankwidth(map)-1)) { 1326 map_word tmp_buf; 1327 1328 retry1: 1329 mutex_lock(&cfi->chips[chipnum].mutex); 1330 1331 if (cfi->chips[chipnum].state != FL_READY) { 1332 set_current_state(TASK_UNINTERRUPTIBLE); 1333 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1334 1335 mutex_unlock(&cfi->chips[chipnum].mutex); 1336 1337 schedule(); 1338 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1339 goto retry1; 1340 } 1341 1342 tmp_buf = map_read(map, ofs + chipstart); 1343 1344 mutex_unlock(&cfi->chips[chipnum].mutex); 1345 1346 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1347 1348 ret = do_write_oneword(map, &cfi->chips[chipnum], 1349 ofs, tmp_buf); 1350 if (ret) 1351 return ret; 1352 1353 (*retlen) += len; 1354 } 1355 1356 return 0; 1357 } 1358 1359 1360 /* 1361 * FIXME: interleaved mode not tested, and probably not supported! 1362 */ 1363 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1364 unsigned long adr, const u_char *buf, 1365 int len) 1366 { 1367 struct cfi_private *cfi = map->fldrv_priv; 1368 unsigned long timeo = jiffies + HZ; 1369 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1370 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1371 int ret = -EIO; 1372 unsigned long cmd_adr; 1373 int z, words; 1374 map_word datum; 1375 1376 adr += chip->start; 1377 cmd_adr = adr; 1378 1379 mutex_lock(&chip->mutex); 1380 ret = get_chip(map, chip, adr, FL_WRITING); 1381 if (ret) { 1382 mutex_unlock(&chip->mutex); 1383 return ret; 1384 } 1385 1386 datum = map_word_load(map, buf); 1387 1388 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1389 __func__, adr, datum.x[0] ); 1390 1391 XIP_INVAL_CACHED_RANGE(map, adr, len); 1392 ENABLE_VPP(map); 1393 xip_disable(map, chip, cmd_adr); 1394 1395 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1396 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1397 1398 /* Write Buffer Load */ 1399 map_write(map, CMD(0x25), cmd_adr); 1400 1401 chip->state = FL_WRITING_TO_BUFFER; 1402 1403 /* Write length of data to come */ 1404 words = len / map_bankwidth(map); 1405 map_write(map, CMD(words - 1), cmd_adr); 1406 /* Write data */ 1407 z = 0; 1408 while(z < words * map_bankwidth(map)) { 1409 datum = map_word_load(map, buf); 1410 map_write(map, datum, adr + z); 1411 1412 z += map_bankwidth(map); 1413 buf += map_bankwidth(map); 1414 } 1415 z -= map_bankwidth(map); 1416 1417 adr += z; 1418 1419 /* Write Buffer Program Confirm: GO GO GO */ 1420 map_write(map, CMD(0x29), cmd_adr); 1421 chip->state = FL_WRITING; 1422 1423 INVALIDATE_CACHE_UDELAY(map, chip, 1424 adr, map_bankwidth(map), 1425 chip->word_write_time); 1426 1427 timeo = jiffies + uWriteTimeout; 1428 1429 for (;;) { 1430 if (chip->state != FL_WRITING) { 1431 /* Someone's suspended the write. Sleep */ 1432 DECLARE_WAITQUEUE(wait, current); 1433 1434 set_current_state(TASK_UNINTERRUPTIBLE); 1435 add_wait_queue(&chip->wq, &wait); 1436 mutex_unlock(&chip->mutex); 1437 schedule(); 1438 remove_wait_queue(&chip->wq, &wait); 1439 timeo = jiffies + (HZ / 2); /* FIXME */ 1440 mutex_lock(&chip->mutex); 1441 continue; 1442 } 1443 1444 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1445 break; 1446 1447 if (chip_ready(map, adr)) { 1448 xip_enable(map, chip, adr); 1449 goto op_done; 1450 } 1451 1452 /* Latency issues. Drop the lock, wait a while and retry */ 1453 UDELAY(map, chip, adr, 1); 1454 } 1455 1456 /* reset on all failures. */ 1457 map_write( map, CMD(0xF0), chip->start ); 1458 xip_enable(map, chip, adr); 1459 /* FIXME - should have reset delay before continuing */ 1460 1461 printk(KERN_WARNING "MTD %s(): software timeout\n", 1462 __func__ ); 1463 1464 ret = -EIO; 1465 op_done: 1466 chip->state = FL_READY; 1467 put_chip(map, chip, adr); 1468 mutex_unlock(&chip->mutex); 1469 1470 return ret; 1471 } 1472 1473 1474 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1475 size_t *retlen, const u_char *buf) 1476 { 1477 struct map_info *map = mtd->priv; 1478 struct cfi_private *cfi = map->fldrv_priv; 1479 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1480 int ret = 0; 1481 int chipnum; 1482 unsigned long ofs; 1483 1484 *retlen = 0; 1485 if (!len) 1486 return 0; 1487 1488 chipnum = to >> cfi->chipshift; 1489 ofs = to - (chipnum << cfi->chipshift); 1490 1491 /* If it's not bus-aligned, do the first word write */ 1492 if (ofs & (map_bankwidth(map)-1)) { 1493 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1494 if (local_len > len) 1495 local_len = len; 1496 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1497 local_len, retlen, buf); 1498 if (ret) 1499 return ret; 1500 ofs += local_len; 1501 buf += local_len; 1502 len -= local_len; 1503 1504 if (ofs >> cfi->chipshift) { 1505 chipnum ++; 1506 ofs = 0; 1507 if (chipnum == cfi->numchips) 1508 return 0; 1509 } 1510 } 1511 1512 /* Write buffer is worth it only if more than one word to write... */ 1513 while (len >= map_bankwidth(map) * 2) { 1514 /* We must not cross write block boundaries */ 1515 int size = wbufsize - (ofs & (wbufsize-1)); 1516 1517 if (size > len) 1518 size = len; 1519 if (size % map_bankwidth(map)) 1520 size -= size % map_bankwidth(map); 1521 1522 ret = do_write_buffer(map, &cfi->chips[chipnum], 1523 ofs, buf, size); 1524 if (ret) 1525 return ret; 1526 1527 ofs += size; 1528 buf += size; 1529 (*retlen) += size; 1530 len -= size; 1531 1532 if (ofs >> cfi->chipshift) { 1533 chipnum ++; 1534 ofs = 0; 1535 if (chipnum == cfi->numchips) 1536 return 0; 1537 } 1538 } 1539 1540 if (len) { 1541 size_t retlen_dregs = 0; 1542 1543 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1544 len, &retlen_dregs, buf); 1545 1546 *retlen += retlen_dregs; 1547 return ret; 1548 } 1549 1550 return 0; 1551 } 1552 1553 1554 /* 1555 * Handle devices with one erase region, that only implement 1556 * the chip erase command. 1557 */ 1558 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1559 { 1560 struct cfi_private *cfi = map->fldrv_priv; 1561 unsigned long timeo = jiffies + HZ; 1562 unsigned long int adr; 1563 DECLARE_WAITQUEUE(wait, current); 1564 int ret = 0; 1565 1566 adr = cfi->addr_unlock1; 1567 1568 mutex_lock(&chip->mutex); 1569 ret = get_chip(map, chip, adr, FL_WRITING); 1570 if (ret) { 1571 mutex_unlock(&chip->mutex); 1572 return ret; 1573 } 1574 1575 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1576 __func__, chip->start ); 1577 1578 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1579 ENABLE_VPP(map); 1580 xip_disable(map, chip, adr); 1581 1582 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1583 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1584 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1585 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1586 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1587 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1588 1589 chip->state = FL_ERASING; 1590 chip->erase_suspended = 0; 1591 chip->in_progress_block_addr = adr; 1592 1593 INVALIDATE_CACHE_UDELAY(map, chip, 1594 adr, map->size, 1595 chip->erase_time*500); 1596 1597 timeo = jiffies + (HZ*20); 1598 1599 for (;;) { 1600 if (chip->state != FL_ERASING) { 1601 /* Someone's suspended the erase. Sleep */ 1602 set_current_state(TASK_UNINTERRUPTIBLE); 1603 add_wait_queue(&chip->wq, &wait); 1604 mutex_unlock(&chip->mutex); 1605 schedule(); 1606 remove_wait_queue(&chip->wq, &wait); 1607 mutex_lock(&chip->mutex); 1608 continue; 1609 } 1610 if (chip->erase_suspended) { 1611 /* This erase was suspended and resumed. 1612 Adjust the timeout */ 1613 timeo = jiffies + (HZ*20); /* FIXME */ 1614 chip->erase_suspended = 0; 1615 } 1616 1617 if (chip_ready(map, adr)) 1618 break; 1619 1620 if (time_after(jiffies, timeo)) { 1621 printk(KERN_WARNING "MTD %s(): software timeout\n", 1622 __func__ ); 1623 break; 1624 } 1625 1626 /* Latency issues. Drop the lock, wait a while and retry */ 1627 UDELAY(map, chip, adr, 1000000/HZ); 1628 } 1629 /* Did we succeed? */ 1630 if (!chip_good(map, adr, map_word_ff(map))) { 1631 /* reset on all failures. */ 1632 map_write( map, CMD(0xF0), chip->start ); 1633 /* FIXME - should have reset delay before continuing */ 1634 1635 ret = -EIO; 1636 } 1637 1638 chip->state = FL_READY; 1639 xip_enable(map, chip, adr); 1640 put_chip(map, chip, adr); 1641 mutex_unlock(&chip->mutex); 1642 1643 return ret; 1644 } 1645 1646 1647 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1648 { 1649 struct cfi_private *cfi = map->fldrv_priv; 1650 unsigned long timeo = jiffies + HZ; 1651 DECLARE_WAITQUEUE(wait, current); 1652 int ret = 0; 1653 1654 adr += chip->start; 1655 1656 mutex_lock(&chip->mutex); 1657 ret = get_chip(map, chip, adr, FL_ERASING); 1658 if (ret) { 1659 mutex_unlock(&chip->mutex); 1660 return ret; 1661 } 1662 1663 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1664 __func__, adr ); 1665 1666 XIP_INVAL_CACHED_RANGE(map, adr, len); 1667 ENABLE_VPP(map); 1668 xip_disable(map, chip, adr); 1669 1670 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1671 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1672 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1673 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1674 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1675 map_write(map, cfi->sector_erase_cmd, adr); 1676 1677 chip->state = FL_ERASING; 1678 chip->erase_suspended = 0; 1679 chip->in_progress_block_addr = adr; 1680 1681 INVALIDATE_CACHE_UDELAY(map, chip, 1682 adr, len, 1683 chip->erase_time*500); 1684 1685 timeo = jiffies + (HZ*20); 1686 1687 for (;;) { 1688 if (chip->state != FL_ERASING) { 1689 /* Someone's suspended the erase. Sleep */ 1690 set_current_state(TASK_UNINTERRUPTIBLE); 1691 add_wait_queue(&chip->wq, &wait); 1692 mutex_unlock(&chip->mutex); 1693 schedule(); 1694 remove_wait_queue(&chip->wq, &wait); 1695 mutex_lock(&chip->mutex); 1696 continue; 1697 } 1698 if (chip->erase_suspended) { 1699 /* This erase was suspended and resumed. 1700 Adjust the timeout */ 1701 timeo = jiffies + (HZ*20); /* FIXME */ 1702 chip->erase_suspended = 0; 1703 } 1704 1705 if (chip_ready(map, adr)) { 1706 xip_enable(map, chip, adr); 1707 break; 1708 } 1709 1710 if (time_after(jiffies, timeo)) { 1711 xip_enable(map, chip, adr); 1712 printk(KERN_WARNING "MTD %s(): software timeout\n", 1713 __func__ ); 1714 break; 1715 } 1716 1717 /* Latency issues. Drop the lock, wait a while and retry */ 1718 UDELAY(map, chip, adr, 1000000/HZ); 1719 } 1720 /* Did we succeed? */ 1721 if (!chip_good(map, adr, map_word_ff(map))) { 1722 /* reset on all failures. */ 1723 map_write( map, CMD(0xF0), chip->start ); 1724 /* FIXME - should have reset delay before continuing */ 1725 1726 ret = -EIO; 1727 } 1728 1729 chip->state = FL_READY; 1730 put_chip(map, chip, adr); 1731 mutex_unlock(&chip->mutex); 1732 return ret; 1733 } 1734 1735 1736 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1737 { 1738 unsigned long ofs, len; 1739 int ret; 1740 1741 ofs = instr->addr; 1742 len = instr->len; 1743 1744 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 1745 if (ret) 1746 return ret; 1747 1748 instr->state = MTD_ERASE_DONE; 1749 mtd_erase_callback(instr); 1750 1751 return 0; 1752 } 1753 1754 1755 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 1756 { 1757 struct map_info *map = mtd->priv; 1758 struct cfi_private *cfi = map->fldrv_priv; 1759 int ret = 0; 1760 1761 if (instr->addr != 0) 1762 return -EINVAL; 1763 1764 if (instr->len != mtd->size) 1765 return -EINVAL; 1766 1767 ret = do_erase_chip(map, &cfi->chips[0]); 1768 if (ret) 1769 return ret; 1770 1771 instr->state = MTD_ERASE_DONE; 1772 mtd_erase_callback(instr); 1773 1774 return 0; 1775 } 1776 1777 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 1778 unsigned long adr, int len, void *thunk) 1779 { 1780 struct cfi_private *cfi = map->fldrv_priv; 1781 int ret; 1782 1783 mutex_lock(&chip->mutex); 1784 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1785 if (ret) 1786 goto out_unlock; 1787 chip->state = FL_LOCKING; 1788 1789 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1790 __func__, adr, len); 1791 1792 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1793 cfi->device_type, NULL); 1794 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1795 cfi->device_type, NULL); 1796 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 1797 cfi->device_type, NULL); 1798 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1799 cfi->device_type, NULL); 1800 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1801 cfi->device_type, NULL); 1802 map_write(map, CMD(0x40), chip->start + adr); 1803 1804 chip->state = FL_READY; 1805 put_chip(map, chip, adr + chip->start); 1806 ret = 0; 1807 1808 out_unlock: 1809 mutex_unlock(&chip->mutex); 1810 return ret; 1811 } 1812 1813 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 1814 unsigned long adr, int len, void *thunk) 1815 { 1816 struct cfi_private *cfi = map->fldrv_priv; 1817 int ret; 1818 1819 mutex_lock(&chip->mutex); 1820 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1821 if (ret) 1822 goto out_unlock; 1823 chip->state = FL_UNLOCKING; 1824 1825 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1826 __func__, adr, len); 1827 1828 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1829 cfi->device_type, NULL); 1830 map_write(map, CMD(0x70), adr); 1831 1832 chip->state = FL_READY; 1833 put_chip(map, chip, adr + chip->start); 1834 ret = 0; 1835 1836 out_unlock: 1837 mutex_unlock(&chip->mutex); 1838 return ret; 1839 } 1840 1841 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1842 { 1843 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1844 } 1845 1846 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1847 { 1848 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1849 } 1850 1851 1852 static void cfi_amdstd_sync (struct mtd_info *mtd) 1853 { 1854 struct map_info *map = mtd->priv; 1855 struct cfi_private *cfi = map->fldrv_priv; 1856 int i; 1857 struct flchip *chip; 1858 int ret = 0; 1859 DECLARE_WAITQUEUE(wait, current); 1860 1861 for (i=0; !ret && i<cfi->numchips; i++) { 1862 chip = &cfi->chips[i]; 1863 1864 retry: 1865 mutex_lock(&chip->mutex); 1866 1867 switch(chip->state) { 1868 case FL_READY: 1869 case FL_STATUS: 1870 case FL_CFI_QUERY: 1871 case FL_JEDEC_QUERY: 1872 chip->oldstate = chip->state; 1873 chip->state = FL_SYNCING; 1874 /* No need to wake_up() on this state change - 1875 * as the whole point is that nobody can do anything 1876 * with the chip now anyway. 1877 */ 1878 case FL_SYNCING: 1879 mutex_unlock(&chip->mutex); 1880 break; 1881 1882 default: 1883 /* Not an idle state */ 1884 set_current_state(TASK_UNINTERRUPTIBLE); 1885 add_wait_queue(&chip->wq, &wait); 1886 1887 mutex_unlock(&chip->mutex); 1888 1889 schedule(); 1890 1891 remove_wait_queue(&chip->wq, &wait); 1892 1893 goto retry; 1894 } 1895 } 1896 1897 /* Unlock the chips again */ 1898 1899 for (i--; i >=0; i--) { 1900 chip = &cfi->chips[i]; 1901 1902 mutex_lock(&chip->mutex); 1903 1904 if (chip->state == FL_SYNCING) { 1905 chip->state = chip->oldstate; 1906 wake_up(&chip->wq); 1907 } 1908 mutex_unlock(&chip->mutex); 1909 } 1910 } 1911 1912 1913 static int cfi_amdstd_suspend(struct mtd_info *mtd) 1914 { 1915 struct map_info *map = mtd->priv; 1916 struct cfi_private *cfi = map->fldrv_priv; 1917 int i; 1918 struct flchip *chip; 1919 int ret = 0; 1920 1921 for (i=0; !ret && i<cfi->numchips; i++) { 1922 chip = &cfi->chips[i]; 1923 1924 mutex_lock(&chip->mutex); 1925 1926 switch(chip->state) { 1927 case FL_READY: 1928 case FL_STATUS: 1929 case FL_CFI_QUERY: 1930 case FL_JEDEC_QUERY: 1931 chip->oldstate = chip->state; 1932 chip->state = FL_PM_SUSPENDED; 1933 /* No need to wake_up() on this state change - 1934 * as the whole point is that nobody can do anything 1935 * with the chip now anyway. 1936 */ 1937 case FL_PM_SUSPENDED: 1938 break; 1939 1940 default: 1941 ret = -EAGAIN; 1942 break; 1943 } 1944 mutex_unlock(&chip->mutex); 1945 } 1946 1947 /* Unlock the chips again */ 1948 1949 if (ret) { 1950 for (i--; i >=0; i--) { 1951 chip = &cfi->chips[i]; 1952 1953 mutex_lock(&chip->mutex); 1954 1955 if (chip->state == FL_PM_SUSPENDED) { 1956 chip->state = chip->oldstate; 1957 wake_up(&chip->wq); 1958 } 1959 mutex_unlock(&chip->mutex); 1960 } 1961 } 1962 1963 return ret; 1964 } 1965 1966 1967 static void cfi_amdstd_resume(struct mtd_info *mtd) 1968 { 1969 struct map_info *map = mtd->priv; 1970 struct cfi_private *cfi = map->fldrv_priv; 1971 int i; 1972 struct flchip *chip; 1973 1974 for (i=0; i<cfi->numchips; i++) { 1975 1976 chip = &cfi->chips[i]; 1977 1978 mutex_lock(&chip->mutex); 1979 1980 if (chip->state == FL_PM_SUSPENDED) { 1981 chip->state = FL_READY; 1982 map_write(map, CMD(0xF0), chip->start); 1983 wake_up(&chip->wq); 1984 } 1985 else 1986 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1987 1988 mutex_unlock(&chip->mutex); 1989 } 1990 } 1991 1992 1993 /* 1994 * Ensure that the flash device is put back into read array mode before 1995 * unloading the driver or rebooting. On some systems, rebooting while 1996 * the flash is in query/program/erase mode will prevent the CPU from 1997 * fetching the bootloader code, requiring a hard reset or power cycle. 1998 */ 1999 static int cfi_amdstd_reset(struct mtd_info *mtd) 2000 { 2001 struct map_info *map = mtd->priv; 2002 struct cfi_private *cfi = map->fldrv_priv; 2003 int i, ret; 2004 struct flchip *chip; 2005 2006 for (i = 0; i < cfi->numchips; i++) { 2007 2008 chip = &cfi->chips[i]; 2009 2010 mutex_lock(&chip->mutex); 2011 2012 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2013 if (!ret) { 2014 map_write(map, CMD(0xF0), chip->start); 2015 chip->state = FL_SHUTDOWN; 2016 put_chip(map, chip, chip->start); 2017 } 2018 2019 mutex_unlock(&chip->mutex); 2020 } 2021 2022 return 0; 2023 } 2024 2025 2026 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2027 void *v) 2028 { 2029 struct mtd_info *mtd; 2030 2031 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2032 cfi_amdstd_reset(mtd); 2033 return NOTIFY_DONE; 2034 } 2035 2036 2037 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2038 { 2039 struct map_info *map = mtd->priv; 2040 struct cfi_private *cfi = map->fldrv_priv; 2041 2042 cfi_amdstd_reset(mtd); 2043 unregister_reboot_notifier(&mtd->reboot_notifier); 2044 kfree(cfi->cmdset_priv); 2045 kfree(cfi->cfiq); 2046 kfree(cfi); 2047 kfree(mtd->eraseregions); 2048 } 2049 2050 MODULE_LICENSE("GPL"); 2051 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2052 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2053 MODULE_ALIAS("cfi_cmdset_0006"); 2054 MODULE_ALIAS("cfi_cmdset_0701"); 2055