1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/init.h> 28 #include <asm/io.h> 29 #include <asm/byteorder.h> 30 31 #include <linux/errno.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/interrupt.h> 35 #include <linux/reboot.h> 36 #include <linux/mtd/map.h> 37 #include <linux/mtd/mtd.h> 38 #include <linux/mtd/cfi.h> 39 #include <linux/mtd/xip.h> 40 41 #define AMD_BOOTLOC_BUG 42 #define FORCE_WORD_WRITE 0 43 44 #define MAX_WORD_RETRIES 3 45 46 #define SST49LF004B 0x0060 47 #define SST49LF040B 0x0050 48 #define SST49LF008A 0x005a 49 #define AT49BV6416 0x00d6 50 51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 56 static void cfi_amdstd_sync (struct mtd_info *); 57 static int cfi_amdstd_suspend (struct mtd_info *); 58 static void cfi_amdstd_resume (struct mtd_info *); 59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61 62 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 63 size_t *retlen, const u_char *buf); 64 65 static void cfi_amdstd_destroy(struct mtd_info *); 66 67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 69 70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 72 #include "fwh_lock.h" 73 74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 76 77 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 78 .probe = NULL, /* Not usable directly */ 79 .destroy = cfi_amdstd_destroy, 80 .name = "cfi_cmdset_0002", 81 .module = THIS_MODULE 82 }; 83 84 85 /* #define DEBUG_CFI_FEATURES */ 86 87 88 #ifdef DEBUG_CFI_FEATURES 89 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 90 { 91 const char* erase_suspend[3] = { 92 "Not supported", "Read only", "Read/write" 93 }; 94 const char* top_bottom[6] = { 95 "No WP", "8x8KiB sectors at top & bottom, no WP", 96 "Bottom boot", "Top boot", 97 "Uniform, Bottom WP", "Uniform, Top WP" 98 }; 99 100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 101 printk(" Address sensitive unlock: %s\n", 102 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 103 104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 106 else 107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 108 109 if (extp->BlkProt == 0) 110 printk(" Block protection: Not supported\n"); 111 else 112 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 113 114 115 printk(" Temporary block unprotect: %s\n", 116 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 119 printk(" Burst mode: %s\n", 120 extp->BurstMode ? "Supported" : "Not supported"); 121 if (extp->PageMode == 0) 122 printk(" Page mode: Not supported\n"); 123 else 124 printk(" Page mode: %d word page\n", extp->PageMode << 2); 125 126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 127 extp->VppMin >> 4, extp->VppMin & 0xf); 128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 129 extp->VppMax >> 4, extp->VppMax & 0xf); 130 131 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 133 else 134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 135 } 136 #endif 137 138 #ifdef AMD_BOOTLOC_BUG 139 /* Wheee. Bring me the head of someone at AMD. */ 140 static void fixup_amd_bootblock(struct mtd_info *mtd) 141 { 142 struct map_info *map = mtd->priv; 143 struct cfi_private *cfi = map->fldrv_priv; 144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 145 __u8 major = extp->MajorVersion; 146 __u8 minor = extp->MinorVersion; 147 148 if (((major << 8) | minor) < 0x3131) { 149 /* CFI version 1.0 => don't trust bootloc */ 150 151 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 152 map->name, cfi->mfr, cfi->id); 153 154 /* AFAICS all 29LV400 with a bottom boot block have a device ID 155 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 156 * These were badly detected as they have the 0x80 bit set 157 * so treat them as a special case. 158 */ 159 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 160 161 /* Macronix added CFI to their 2nd generation 162 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 163 * Fujitsu, Spansion, EON, ESI and older Macronix) 164 * has CFI. 165 * 166 * Therefore also check the manufacturer. 167 * This reduces the risk of false detection due to 168 * the 8-bit device ID. 169 */ 170 (cfi->mfr == CFI_MFR_MACRONIX)) { 171 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 172 " detected\n", map->name); 173 extp->TopBottom = 2; /* bottom boot */ 174 } else 175 if (cfi->id & 0x80) { 176 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 177 extp->TopBottom = 3; /* top boot */ 178 } else { 179 extp->TopBottom = 2; /* bottom boot */ 180 } 181 182 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 183 " deduced %s from Device ID\n", map->name, major, minor, 184 extp->TopBottom == 2 ? "bottom" : "top"); 185 } 186 } 187 #endif 188 189 static void fixup_use_write_buffers(struct mtd_info *mtd) 190 { 191 struct map_info *map = mtd->priv; 192 struct cfi_private *cfi = map->fldrv_priv; 193 if (cfi->cfiq->BufWriteTimeoutTyp) { 194 pr_debug("Using buffer write method\n" ); 195 mtd->_write = cfi_amdstd_write_buffers; 196 } 197 } 198 199 /* Atmel chips don't use the same PRI format as AMD chips */ 200 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 201 { 202 struct map_info *map = mtd->priv; 203 struct cfi_private *cfi = map->fldrv_priv; 204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 205 struct cfi_pri_atmel atmel_pri; 206 207 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 208 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 209 210 if (atmel_pri.Features & 0x02) 211 extp->EraseSuspend = 2; 212 213 /* Some chips got it backwards... */ 214 if (cfi->id == AT49BV6416) { 215 if (atmel_pri.BottomBoot) 216 extp->TopBottom = 3; 217 else 218 extp->TopBottom = 2; 219 } else { 220 if (atmel_pri.BottomBoot) 221 extp->TopBottom = 2; 222 else 223 extp->TopBottom = 3; 224 } 225 226 /* burst write mode not supported */ 227 cfi->cfiq->BufWriteTimeoutTyp = 0; 228 cfi->cfiq->BufWriteTimeoutMax = 0; 229 } 230 231 static void fixup_use_secsi(struct mtd_info *mtd) 232 { 233 /* Setup for chips with a secsi area */ 234 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 235 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 236 } 237 238 static void fixup_use_erase_chip(struct mtd_info *mtd) 239 { 240 struct map_info *map = mtd->priv; 241 struct cfi_private *cfi = map->fldrv_priv; 242 if ((cfi->cfiq->NumEraseRegions == 1) && 243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 244 mtd->_erase = cfi_amdstd_erase_chip; 245 } 246 247 } 248 249 /* 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 251 * locked by default. 252 */ 253 static void fixup_use_atmel_lock(struct mtd_info *mtd) 254 { 255 mtd->_lock = cfi_atmel_lock; 256 mtd->_unlock = cfi_atmel_unlock; 257 mtd->flags |= MTD_POWERUP_LOCK; 258 } 259 260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 261 { 262 struct map_info *map = mtd->priv; 263 struct cfi_private *cfi = map->fldrv_priv; 264 265 /* 266 * These flashes report two separate eraseblock regions based on the 267 * sector_erase-size and block_erase-size, although they both operate on the 268 * same memory. This is not allowed according to CFI, so we just pick the 269 * sector_erase-size. 270 */ 271 cfi->cfiq->NumEraseRegions = 1; 272 } 273 274 static void fixup_sst39vf(struct mtd_info *mtd) 275 { 276 struct map_info *map = mtd->priv; 277 struct cfi_private *cfi = map->fldrv_priv; 278 279 fixup_old_sst_eraseregion(mtd); 280 281 cfi->addr_unlock1 = 0x5555; 282 cfi->addr_unlock2 = 0x2AAA; 283 } 284 285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 286 { 287 struct map_info *map = mtd->priv; 288 struct cfi_private *cfi = map->fldrv_priv; 289 290 fixup_old_sst_eraseregion(mtd); 291 292 cfi->addr_unlock1 = 0x555; 293 cfi->addr_unlock2 = 0x2AA; 294 295 cfi->sector_erase_cmd = CMD(0x50); 296 } 297 298 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 299 { 300 struct map_info *map = mtd->priv; 301 struct cfi_private *cfi = map->fldrv_priv; 302 303 fixup_sst39vf_rev_b(mtd); 304 305 /* 306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 307 * it should report a size of 8KBytes (0x0020*256). 308 */ 309 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 311 } 312 313 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 314 { 315 struct map_info *map = mtd->priv; 316 struct cfi_private *cfi = map->fldrv_priv; 317 318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 320 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 321 } 322 } 323 324 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 325 { 326 struct map_info *map = mtd->priv; 327 struct cfi_private *cfi = map->fldrv_priv; 328 329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 331 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 332 } 333 } 334 335 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 336 static struct cfi_fixup cfi_nopri_fixup_table[] = { 337 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 338 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 339 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 340 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 341 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 342 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 343 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 344 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 345 { 0, 0, NULL } 346 }; 347 348 static struct cfi_fixup cfi_fixup_table[] = { 349 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 350 #ifdef AMD_BOOTLOC_BUG 351 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 352 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 353 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 354 #endif 355 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 356 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 357 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 358 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 359 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 360 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 361 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 362 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 363 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 364 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 365 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 366 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 367 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 368 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 369 #if !FORCE_WORD_WRITE 370 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 371 #endif 372 { 0, 0, NULL } 373 }; 374 static struct cfi_fixup jedec_fixup_table[] = { 375 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 376 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 377 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 378 { 0, 0, NULL } 379 }; 380 381 static struct cfi_fixup fixup_table[] = { 382 /* The CFI vendor ids and the JEDEC vendor IDs appear 383 * to be common. It is like the devices id's are as 384 * well. This table is to pick all cases where 385 * we know that is the case. 386 */ 387 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 388 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 389 { 0, 0, NULL } 390 }; 391 392 393 static void cfi_fixup_major_minor(struct cfi_private *cfi, 394 struct cfi_pri_amdstd *extp) 395 { 396 if (cfi->mfr == CFI_MFR_SAMSUNG) { 397 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 398 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 399 /* 400 * Samsung K8P2815UQB and K8D6x16UxM chips 401 * report major=0 / minor=0. 402 * K8D3x16UxC chips report major=3 / minor=3. 403 */ 404 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 405 " Extended Query version to 1.%c\n", 406 extp->MinorVersion); 407 extp->MajorVersion = '1'; 408 } 409 } 410 411 /* 412 * SST 38VF640x chips report major=0xFF / minor=0xFF. 413 */ 414 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 415 extp->MajorVersion = '1'; 416 extp->MinorVersion = '0'; 417 } 418 } 419 420 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 421 { 422 struct cfi_private *cfi = map->fldrv_priv; 423 struct mtd_info *mtd; 424 int i; 425 426 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 427 if (!mtd) { 428 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 429 return NULL; 430 } 431 mtd->priv = map; 432 mtd->type = MTD_NORFLASH; 433 434 /* Fill in the default mtd operations */ 435 mtd->_erase = cfi_amdstd_erase_varsize; 436 mtd->_write = cfi_amdstd_write_words; 437 mtd->_read = cfi_amdstd_read; 438 mtd->_sync = cfi_amdstd_sync; 439 mtd->_suspend = cfi_amdstd_suspend; 440 mtd->_resume = cfi_amdstd_resume; 441 mtd->flags = MTD_CAP_NORFLASH; 442 mtd->name = map->name; 443 mtd->writesize = 1; 444 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 445 446 pr_debug("MTD %s(): write buffer size %d\n", __func__, 447 mtd->writebufsize); 448 449 mtd->_panic_write = cfi_amdstd_panic_write; 450 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 451 452 if (cfi->cfi_mode==CFI_MODE_CFI){ 453 unsigned char bootloc; 454 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 455 struct cfi_pri_amdstd *extp; 456 457 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 458 if (extp) { 459 /* 460 * It's a real CFI chip, not one for which the probe 461 * routine faked a CFI structure. 462 */ 463 cfi_fixup_major_minor(cfi, extp); 464 465 /* 466 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 467 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 468 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 469 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 470 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 471 */ 472 if (extp->MajorVersion != '1' || 473 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 474 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 475 "version %c.%c (%#02x/%#02x).\n", 476 extp->MajorVersion, extp->MinorVersion, 477 extp->MajorVersion, extp->MinorVersion); 478 kfree(extp); 479 kfree(mtd); 480 return NULL; 481 } 482 483 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 484 extp->MajorVersion, extp->MinorVersion); 485 486 /* Install our own private info structure */ 487 cfi->cmdset_priv = extp; 488 489 /* Apply cfi device specific fixups */ 490 cfi_fixup(mtd, cfi_fixup_table); 491 492 #ifdef DEBUG_CFI_FEATURES 493 /* Tell the user about it in lots of lovely detail */ 494 cfi_tell_features(extp); 495 #endif 496 497 bootloc = extp->TopBottom; 498 if ((bootloc < 2) || (bootloc > 5)) { 499 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 500 "bank location (%d). Assuming bottom.\n", 501 map->name, bootloc); 502 bootloc = 2; 503 } 504 505 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 506 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 507 508 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 509 int j = (cfi->cfiq->NumEraseRegions-1)-i; 510 __u32 swap; 511 512 swap = cfi->cfiq->EraseRegionInfo[i]; 513 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 514 cfi->cfiq->EraseRegionInfo[j] = swap; 515 } 516 } 517 /* Set the default CFI lock/unlock addresses */ 518 cfi->addr_unlock1 = 0x555; 519 cfi->addr_unlock2 = 0x2aa; 520 } 521 cfi_fixup(mtd, cfi_nopri_fixup_table); 522 523 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 524 kfree(mtd); 525 return NULL; 526 } 527 528 } /* CFI mode */ 529 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 530 /* Apply jedec specific fixups */ 531 cfi_fixup(mtd, jedec_fixup_table); 532 } 533 /* Apply generic fixups */ 534 cfi_fixup(mtd, fixup_table); 535 536 for (i=0; i< cfi->numchips; i++) { 537 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 538 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 539 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 540 cfi->chips[i].ref_point_counter = 0; 541 init_waitqueue_head(&(cfi->chips[i].wq)); 542 } 543 544 map->fldrv = &cfi_amdstd_chipdrv; 545 546 return cfi_amdstd_setup(mtd); 547 } 548 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 549 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 550 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 551 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 552 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 553 554 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 555 { 556 struct map_info *map = mtd->priv; 557 struct cfi_private *cfi = map->fldrv_priv; 558 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 559 unsigned long offset = 0; 560 int i,j; 561 562 printk(KERN_NOTICE "number of %s chips: %d\n", 563 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 564 /* Select the correct geometry setup */ 565 mtd->size = devsize * cfi->numchips; 566 567 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 568 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 569 * mtd->numeraseregions, GFP_KERNEL); 570 if (!mtd->eraseregions) { 571 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 572 goto setup_err; 573 } 574 575 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 576 unsigned long ernum, ersize; 577 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 578 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 579 580 if (mtd->erasesize < ersize) { 581 mtd->erasesize = ersize; 582 } 583 for (j=0; j<cfi->numchips; j++) { 584 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 585 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 586 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 587 } 588 offset += (ersize * ernum); 589 } 590 if (offset != devsize) { 591 /* Argh */ 592 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 593 goto setup_err; 594 } 595 596 __module_get(THIS_MODULE); 597 register_reboot_notifier(&mtd->reboot_notifier); 598 return mtd; 599 600 setup_err: 601 kfree(mtd->eraseregions); 602 kfree(mtd); 603 kfree(cfi->cmdset_priv); 604 kfree(cfi->cfiq); 605 return NULL; 606 } 607 608 /* 609 * Return true if the chip is ready. 610 * 611 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 612 * non-suspended sector) and is indicated by no toggle bits toggling. 613 * 614 * Note that anything more complicated than checking if no bits are toggling 615 * (including checking DQ5 for an error status) is tricky to get working 616 * correctly and is therefore not done (particularly with interleaved chips 617 * as each chip must be checked independently of the others). 618 */ 619 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 620 { 621 map_word d, t; 622 623 d = map_read(map, addr); 624 t = map_read(map, addr); 625 626 return map_word_equal(map, d, t); 627 } 628 629 /* 630 * Return true if the chip is ready and has the correct value. 631 * 632 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 633 * non-suspended sector) and it is indicated by no bits toggling. 634 * 635 * Error are indicated by toggling bits or bits held with the wrong value, 636 * or with bits toggling. 637 * 638 * Note that anything more complicated than checking if no bits are toggling 639 * (including checking DQ5 for an error status) is tricky to get working 640 * correctly and is therefore not done (particularly with interleaved chips 641 * as each chip must be checked independently of the others). 642 * 643 */ 644 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 645 { 646 map_word oldd, curd; 647 648 oldd = map_read(map, addr); 649 curd = map_read(map, addr); 650 651 return map_word_equal(map, oldd, curd) && 652 map_word_equal(map, curd, expected); 653 } 654 655 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 656 { 657 DECLARE_WAITQUEUE(wait, current); 658 struct cfi_private *cfi = map->fldrv_priv; 659 unsigned long timeo; 660 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 661 662 resettime: 663 timeo = jiffies + HZ; 664 retry: 665 switch (chip->state) { 666 667 case FL_STATUS: 668 for (;;) { 669 if (chip_ready(map, adr)) 670 break; 671 672 if (time_after(jiffies, timeo)) { 673 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 674 return -EIO; 675 } 676 mutex_unlock(&chip->mutex); 677 cfi_udelay(1); 678 mutex_lock(&chip->mutex); 679 /* Someone else might have been playing with it. */ 680 goto retry; 681 } 682 683 case FL_READY: 684 case FL_CFI_QUERY: 685 case FL_JEDEC_QUERY: 686 return 0; 687 688 case FL_ERASING: 689 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 690 !(mode == FL_READY || mode == FL_POINT || 691 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 692 goto sleep; 693 694 /* We could check to see if we're trying to access the sector 695 * that is currently being erased. However, no user will try 696 * anything like that so we just wait for the timeout. */ 697 698 /* Erase suspend */ 699 /* It's harmless to issue the Erase-Suspend and Erase-Resume 700 * commands when the erase algorithm isn't in progress. */ 701 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 702 chip->oldstate = FL_ERASING; 703 chip->state = FL_ERASE_SUSPENDING; 704 chip->erase_suspended = 1; 705 for (;;) { 706 if (chip_ready(map, adr)) 707 break; 708 709 if (time_after(jiffies, timeo)) { 710 /* Should have suspended the erase by now. 711 * Send an Erase-Resume command as either 712 * there was an error (so leave the erase 713 * routine to recover from it) or we trying to 714 * use the erase-in-progress sector. */ 715 put_chip(map, chip, adr); 716 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 717 return -EIO; 718 } 719 720 mutex_unlock(&chip->mutex); 721 cfi_udelay(1); 722 mutex_lock(&chip->mutex); 723 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 724 So we can just loop here. */ 725 } 726 chip->state = FL_READY; 727 return 0; 728 729 case FL_XIP_WHILE_ERASING: 730 if (mode != FL_READY && mode != FL_POINT && 731 (!cfip || !(cfip->EraseSuspend&2))) 732 goto sleep; 733 chip->oldstate = chip->state; 734 chip->state = FL_READY; 735 return 0; 736 737 case FL_SHUTDOWN: 738 /* The machine is rebooting */ 739 return -EIO; 740 741 case FL_POINT: 742 /* Only if there's no operation suspended... */ 743 if (mode == FL_READY && chip->oldstate == FL_READY) 744 return 0; 745 746 default: 747 sleep: 748 set_current_state(TASK_UNINTERRUPTIBLE); 749 add_wait_queue(&chip->wq, &wait); 750 mutex_unlock(&chip->mutex); 751 schedule(); 752 remove_wait_queue(&chip->wq, &wait); 753 mutex_lock(&chip->mutex); 754 goto resettime; 755 } 756 } 757 758 759 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 760 { 761 struct cfi_private *cfi = map->fldrv_priv; 762 763 switch(chip->oldstate) { 764 case FL_ERASING: 765 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 766 chip->oldstate = FL_READY; 767 chip->state = FL_ERASING; 768 break; 769 770 case FL_XIP_WHILE_ERASING: 771 chip->state = chip->oldstate; 772 chip->oldstate = FL_READY; 773 break; 774 775 case FL_READY: 776 case FL_STATUS: 777 break; 778 default: 779 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 780 } 781 wake_up(&chip->wq); 782 } 783 784 #ifdef CONFIG_MTD_XIP 785 786 /* 787 * No interrupt what so ever can be serviced while the flash isn't in array 788 * mode. This is ensured by the xip_disable() and xip_enable() functions 789 * enclosing any code path where the flash is known not to be in array mode. 790 * And within a XIP disabled code path, only functions marked with __xipram 791 * may be called and nothing else (it's a good thing to inspect generated 792 * assembly to make sure inline functions were actually inlined and that gcc 793 * didn't emit calls to its own support functions). Also configuring MTD CFI 794 * support to a single buswidth and a single interleave is also recommended. 795 */ 796 797 static void xip_disable(struct map_info *map, struct flchip *chip, 798 unsigned long adr) 799 { 800 /* TODO: chips with no XIP use should ignore and return */ 801 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 802 local_irq_disable(); 803 } 804 805 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 806 unsigned long adr) 807 { 808 struct cfi_private *cfi = map->fldrv_priv; 809 810 if (chip->state != FL_POINT && chip->state != FL_READY) { 811 map_write(map, CMD(0xf0), adr); 812 chip->state = FL_READY; 813 } 814 (void) map_read(map, adr); 815 xip_iprefetch(); 816 local_irq_enable(); 817 } 818 819 /* 820 * When a delay is required for the flash operation to complete, the 821 * xip_udelay() function is polling for both the given timeout and pending 822 * (but still masked) hardware interrupts. Whenever there is an interrupt 823 * pending then the flash erase operation is suspended, array mode restored 824 * and interrupts unmasked. Task scheduling might also happen at that 825 * point. The CPU eventually returns from the interrupt or the call to 826 * schedule() and the suspended flash operation is resumed for the remaining 827 * of the delay period. 828 * 829 * Warning: this function _will_ fool interrupt latency tracing tools. 830 */ 831 832 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 833 unsigned long adr, int usec) 834 { 835 struct cfi_private *cfi = map->fldrv_priv; 836 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 837 map_word status, OK = CMD(0x80); 838 unsigned long suspended, start = xip_currtime(); 839 flstate_t oldstate; 840 841 do { 842 cpu_relax(); 843 if (xip_irqpending() && extp && 844 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 845 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 846 /* 847 * Let's suspend the erase operation when supported. 848 * Note that we currently don't try to suspend 849 * interleaved chips if there is already another 850 * operation suspended (imagine what happens 851 * when one chip was already done with the current 852 * operation while another chip suspended it, then 853 * we resume the whole thing at once). Yes, it 854 * can happen! 855 */ 856 map_write(map, CMD(0xb0), adr); 857 usec -= xip_elapsed_since(start); 858 suspended = xip_currtime(); 859 do { 860 if (xip_elapsed_since(suspended) > 100000) { 861 /* 862 * The chip doesn't want to suspend 863 * after waiting for 100 msecs. 864 * This is a critical error but there 865 * is not much we can do here. 866 */ 867 return; 868 } 869 status = map_read(map, adr); 870 } while (!map_word_andequal(map, status, OK, OK)); 871 872 /* Suspend succeeded */ 873 oldstate = chip->state; 874 if (!map_word_bitsset(map, status, CMD(0x40))) 875 break; 876 chip->state = FL_XIP_WHILE_ERASING; 877 chip->erase_suspended = 1; 878 map_write(map, CMD(0xf0), adr); 879 (void) map_read(map, adr); 880 xip_iprefetch(); 881 local_irq_enable(); 882 mutex_unlock(&chip->mutex); 883 xip_iprefetch(); 884 cond_resched(); 885 886 /* 887 * We're back. However someone else might have 888 * decided to go write to the chip if we are in 889 * a suspended erase state. If so let's wait 890 * until it's done. 891 */ 892 mutex_lock(&chip->mutex); 893 while (chip->state != FL_XIP_WHILE_ERASING) { 894 DECLARE_WAITQUEUE(wait, current); 895 set_current_state(TASK_UNINTERRUPTIBLE); 896 add_wait_queue(&chip->wq, &wait); 897 mutex_unlock(&chip->mutex); 898 schedule(); 899 remove_wait_queue(&chip->wq, &wait); 900 mutex_lock(&chip->mutex); 901 } 902 /* Disallow XIP again */ 903 local_irq_disable(); 904 905 /* Resume the write or erase operation */ 906 map_write(map, cfi->sector_erase_cmd, adr); 907 chip->state = oldstate; 908 start = xip_currtime(); 909 } else if (usec >= 1000000/HZ) { 910 /* 911 * Try to save on CPU power when waiting delay 912 * is at least a system timer tick period. 913 * No need to be extremely accurate here. 914 */ 915 xip_cpu_idle(); 916 } 917 status = map_read(map, adr); 918 } while (!map_word_andequal(map, status, OK, OK) 919 && xip_elapsed_since(start) < usec); 920 } 921 922 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 923 924 /* 925 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 926 * the flash is actively programming or erasing since we have to poll for 927 * the operation to complete anyway. We can't do that in a generic way with 928 * a XIP setup so do it before the actual flash operation in this case 929 * and stub it out from INVALIDATE_CACHE_UDELAY. 930 */ 931 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 932 INVALIDATE_CACHED_RANGE(map, from, size) 933 934 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 935 UDELAY(map, chip, adr, usec) 936 937 /* 938 * Extra notes: 939 * 940 * Activating this XIP support changes the way the code works a bit. For 941 * example the code to suspend the current process when concurrent access 942 * happens is never executed because xip_udelay() will always return with the 943 * same chip state as it was entered with. This is why there is no care for 944 * the presence of add_wait_queue() or schedule() calls from within a couple 945 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 946 * The queueing and scheduling are always happening within xip_udelay(). 947 * 948 * Similarly, get_chip() and put_chip() just happen to always be executed 949 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 950 * is in array mode, therefore never executing many cases therein and not 951 * causing any problem with XIP. 952 */ 953 954 #else 955 956 #define xip_disable(map, chip, adr) 957 #define xip_enable(map, chip, adr) 958 #define XIP_INVAL_CACHED_RANGE(x...) 959 960 #define UDELAY(map, chip, adr, usec) \ 961 do { \ 962 mutex_unlock(&chip->mutex); \ 963 cfi_udelay(usec); \ 964 mutex_lock(&chip->mutex); \ 965 } while (0) 966 967 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 968 do { \ 969 mutex_unlock(&chip->mutex); \ 970 INVALIDATE_CACHED_RANGE(map, adr, len); \ 971 cfi_udelay(usec); \ 972 mutex_lock(&chip->mutex); \ 973 } while (0) 974 975 #endif 976 977 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 978 { 979 unsigned long cmd_addr; 980 struct cfi_private *cfi = map->fldrv_priv; 981 int ret; 982 983 adr += chip->start; 984 985 /* Ensure cmd read/writes are aligned. */ 986 cmd_addr = adr & ~(map_bankwidth(map)-1); 987 988 mutex_lock(&chip->mutex); 989 ret = get_chip(map, chip, cmd_addr, FL_READY); 990 if (ret) { 991 mutex_unlock(&chip->mutex); 992 return ret; 993 } 994 995 if (chip->state != FL_POINT && chip->state != FL_READY) { 996 map_write(map, CMD(0xf0), cmd_addr); 997 chip->state = FL_READY; 998 } 999 1000 map_copy_from(map, buf, adr, len); 1001 1002 put_chip(map, chip, cmd_addr); 1003 1004 mutex_unlock(&chip->mutex); 1005 return 0; 1006 } 1007 1008 1009 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1010 { 1011 struct map_info *map = mtd->priv; 1012 struct cfi_private *cfi = map->fldrv_priv; 1013 unsigned long ofs; 1014 int chipnum; 1015 int ret = 0; 1016 1017 /* ofs: offset within the first chip that the first read should start */ 1018 chipnum = (from >> cfi->chipshift); 1019 ofs = from - (chipnum << cfi->chipshift); 1020 1021 while (len) { 1022 unsigned long thislen; 1023 1024 if (chipnum >= cfi->numchips) 1025 break; 1026 1027 if ((len + ofs -1) >> cfi->chipshift) 1028 thislen = (1<<cfi->chipshift) - ofs; 1029 else 1030 thislen = len; 1031 1032 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1033 if (ret) 1034 break; 1035 1036 *retlen += thislen; 1037 len -= thislen; 1038 buf += thislen; 1039 1040 ofs = 0; 1041 chipnum++; 1042 } 1043 return ret; 1044 } 1045 1046 1047 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1048 { 1049 DECLARE_WAITQUEUE(wait, current); 1050 unsigned long timeo = jiffies + HZ; 1051 struct cfi_private *cfi = map->fldrv_priv; 1052 1053 retry: 1054 mutex_lock(&chip->mutex); 1055 1056 if (chip->state != FL_READY){ 1057 set_current_state(TASK_UNINTERRUPTIBLE); 1058 add_wait_queue(&chip->wq, &wait); 1059 1060 mutex_unlock(&chip->mutex); 1061 1062 schedule(); 1063 remove_wait_queue(&chip->wq, &wait); 1064 timeo = jiffies + HZ; 1065 1066 goto retry; 1067 } 1068 1069 adr += chip->start; 1070 1071 chip->state = FL_READY; 1072 1073 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1074 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1075 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1076 1077 map_copy_from(map, buf, adr, len); 1078 1079 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1080 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1081 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1082 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1083 1084 wake_up(&chip->wq); 1085 mutex_unlock(&chip->mutex); 1086 1087 return 0; 1088 } 1089 1090 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1091 { 1092 struct map_info *map = mtd->priv; 1093 struct cfi_private *cfi = map->fldrv_priv; 1094 unsigned long ofs; 1095 int chipnum; 1096 int ret = 0; 1097 1098 /* ofs: offset within the first chip that the first read should start */ 1099 /* 8 secsi bytes per chip */ 1100 chipnum=from>>3; 1101 ofs=from & 7; 1102 1103 while (len) { 1104 unsigned long thislen; 1105 1106 if (chipnum >= cfi->numchips) 1107 break; 1108 1109 if ((len + ofs -1) >> 3) 1110 thislen = (1<<3) - ofs; 1111 else 1112 thislen = len; 1113 1114 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1115 if (ret) 1116 break; 1117 1118 *retlen += thislen; 1119 len -= thislen; 1120 buf += thislen; 1121 1122 ofs = 0; 1123 chipnum++; 1124 } 1125 return ret; 1126 } 1127 1128 1129 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1130 { 1131 struct cfi_private *cfi = map->fldrv_priv; 1132 unsigned long timeo = jiffies + HZ; 1133 /* 1134 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1135 * have a max write time of a few hundreds usec). However, we should 1136 * use the maximum timeout value given by the chip at probe time 1137 * instead. Unfortunately, struct flchip does have a field for 1138 * maximum timeout, only for typical which can be far too short 1139 * depending of the conditions. The ' + 1' is to avoid having a 1140 * timeout of 0 jiffies if HZ is smaller than 1000. 1141 */ 1142 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1143 int ret = 0; 1144 map_word oldd; 1145 int retry_cnt = 0; 1146 1147 adr += chip->start; 1148 1149 mutex_lock(&chip->mutex); 1150 ret = get_chip(map, chip, adr, FL_WRITING); 1151 if (ret) { 1152 mutex_unlock(&chip->mutex); 1153 return ret; 1154 } 1155 1156 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1157 __func__, adr, datum.x[0] ); 1158 1159 /* 1160 * Check for a NOP for the case when the datum to write is already 1161 * present - it saves time and works around buggy chips that corrupt 1162 * data at other locations when 0xff is written to a location that 1163 * already contains 0xff. 1164 */ 1165 oldd = map_read(map, adr); 1166 if (map_word_equal(map, oldd, datum)) { 1167 pr_debug("MTD %s(): NOP\n", 1168 __func__); 1169 goto op_done; 1170 } 1171 1172 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1173 ENABLE_VPP(map); 1174 xip_disable(map, chip, adr); 1175 retry: 1176 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1177 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1178 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1179 map_write(map, datum, adr); 1180 chip->state = FL_WRITING; 1181 1182 INVALIDATE_CACHE_UDELAY(map, chip, 1183 adr, map_bankwidth(map), 1184 chip->word_write_time); 1185 1186 /* See comment above for timeout value. */ 1187 timeo = jiffies + uWriteTimeout; 1188 for (;;) { 1189 if (chip->state != FL_WRITING) { 1190 /* Someone's suspended the write. Sleep */ 1191 DECLARE_WAITQUEUE(wait, current); 1192 1193 set_current_state(TASK_UNINTERRUPTIBLE); 1194 add_wait_queue(&chip->wq, &wait); 1195 mutex_unlock(&chip->mutex); 1196 schedule(); 1197 remove_wait_queue(&chip->wq, &wait); 1198 timeo = jiffies + (HZ / 2); /* FIXME */ 1199 mutex_lock(&chip->mutex); 1200 continue; 1201 } 1202 1203 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1204 xip_enable(map, chip, adr); 1205 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1206 xip_disable(map, chip, adr); 1207 break; 1208 } 1209 1210 if (chip_ready(map, adr)) 1211 break; 1212 1213 /* Latency issues. Drop the lock, wait a while and retry */ 1214 UDELAY(map, chip, adr, 1); 1215 } 1216 /* Did we succeed? */ 1217 if (!chip_good(map, adr, datum)) { 1218 /* reset on all failures. */ 1219 map_write( map, CMD(0xF0), chip->start ); 1220 /* FIXME - should have reset delay before continuing */ 1221 1222 if (++retry_cnt <= MAX_WORD_RETRIES) 1223 goto retry; 1224 1225 ret = -EIO; 1226 } 1227 xip_enable(map, chip, adr); 1228 op_done: 1229 chip->state = FL_READY; 1230 DISABLE_VPP(map); 1231 put_chip(map, chip, adr); 1232 mutex_unlock(&chip->mutex); 1233 1234 return ret; 1235 } 1236 1237 1238 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1239 size_t *retlen, const u_char *buf) 1240 { 1241 struct map_info *map = mtd->priv; 1242 struct cfi_private *cfi = map->fldrv_priv; 1243 int ret = 0; 1244 int chipnum; 1245 unsigned long ofs, chipstart; 1246 DECLARE_WAITQUEUE(wait, current); 1247 1248 chipnum = to >> cfi->chipshift; 1249 ofs = to - (chipnum << cfi->chipshift); 1250 chipstart = cfi->chips[chipnum].start; 1251 1252 /* If it's not bus-aligned, do the first byte write */ 1253 if (ofs & (map_bankwidth(map)-1)) { 1254 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1255 int i = ofs - bus_ofs; 1256 int n = 0; 1257 map_word tmp_buf; 1258 1259 retry: 1260 mutex_lock(&cfi->chips[chipnum].mutex); 1261 1262 if (cfi->chips[chipnum].state != FL_READY) { 1263 set_current_state(TASK_UNINTERRUPTIBLE); 1264 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1265 1266 mutex_unlock(&cfi->chips[chipnum].mutex); 1267 1268 schedule(); 1269 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1270 goto retry; 1271 } 1272 1273 /* Load 'tmp_buf' with old contents of flash */ 1274 tmp_buf = map_read(map, bus_ofs+chipstart); 1275 1276 mutex_unlock(&cfi->chips[chipnum].mutex); 1277 1278 /* Number of bytes to copy from buffer */ 1279 n = min_t(int, len, map_bankwidth(map)-i); 1280 1281 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1282 1283 ret = do_write_oneword(map, &cfi->chips[chipnum], 1284 bus_ofs, tmp_buf); 1285 if (ret) 1286 return ret; 1287 1288 ofs += n; 1289 buf += n; 1290 (*retlen) += n; 1291 len -= n; 1292 1293 if (ofs >> cfi->chipshift) { 1294 chipnum ++; 1295 ofs = 0; 1296 if (chipnum == cfi->numchips) 1297 return 0; 1298 } 1299 } 1300 1301 /* We are now aligned, write as much as possible */ 1302 while(len >= map_bankwidth(map)) { 1303 map_word datum; 1304 1305 datum = map_word_load(map, buf); 1306 1307 ret = do_write_oneword(map, &cfi->chips[chipnum], 1308 ofs, datum); 1309 if (ret) 1310 return ret; 1311 1312 ofs += map_bankwidth(map); 1313 buf += map_bankwidth(map); 1314 (*retlen) += map_bankwidth(map); 1315 len -= map_bankwidth(map); 1316 1317 if (ofs >> cfi->chipshift) { 1318 chipnum ++; 1319 ofs = 0; 1320 if (chipnum == cfi->numchips) 1321 return 0; 1322 chipstart = cfi->chips[chipnum].start; 1323 } 1324 } 1325 1326 /* Write the trailing bytes if any */ 1327 if (len & (map_bankwidth(map)-1)) { 1328 map_word tmp_buf; 1329 1330 retry1: 1331 mutex_lock(&cfi->chips[chipnum].mutex); 1332 1333 if (cfi->chips[chipnum].state != FL_READY) { 1334 set_current_state(TASK_UNINTERRUPTIBLE); 1335 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1336 1337 mutex_unlock(&cfi->chips[chipnum].mutex); 1338 1339 schedule(); 1340 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1341 goto retry1; 1342 } 1343 1344 tmp_buf = map_read(map, ofs + chipstart); 1345 1346 mutex_unlock(&cfi->chips[chipnum].mutex); 1347 1348 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1349 1350 ret = do_write_oneword(map, &cfi->chips[chipnum], 1351 ofs, tmp_buf); 1352 if (ret) 1353 return ret; 1354 1355 (*retlen) += len; 1356 } 1357 1358 return 0; 1359 } 1360 1361 1362 /* 1363 * FIXME: interleaved mode not tested, and probably not supported! 1364 */ 1365 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1366 unsigned long adr, const u_char *buf, 1367 int len) 1368 { 1369 struct cfi_private *cfi = map->fldrv_priv; 1370 unsigned long timeo = jiffies + HZ; 1371 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1372 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1373 int ret = -EIO; 1374 unsigned long cmd_adr; 1375 int z, words; 1376 map_word datum; 1377 1378 adr += chip->start; 1379 cmd_adr = adr; 1380 1381 mutex_lock(&chip->mutex); 1382 ret = get_chip(map, chip, adr, FL_WRITING); 1383 if (ret) { 1384 mutex_unlock(&chip->mutex); 1385 return ret; 1386 } 1387 1388 datum = map_word_load(map, buf); 1389 1390 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1391 __func__, adr, datum.x[0] ); 1392 1393 XIP_INVAL_CACHED_RANGE(map, adr, len); 1394 ENABLE_VPP(map); 1395 xip_disable(map, chip, cmd_adr); 1396 1397 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1398 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1399 1400 /* Write Buffer Load */ 1401 map_write(map, CMD(0x25), cmd_adr); 1402 1403 chip->state = FL_WRITING_TO_BUFFER; 1404 1405 /* Write length of data to come */ 1406 words = len / map_bankwidth(map); 1407 map_write(map, CMD(words - 1), cmd_adr); 1408 /* Write data */ 1409 z = 0; 1410 while(z < words * map_bankwidth(map)) { 1411 datum = map_word_load(map, buf); 1412 map_write(map, datum, adr + z); 1413 1414 z += map_bankwidth(map); 1415 buf += map_bankwidth(map); 1416 } 1417 z -= map_bankwidth(map); 1418 1419 adr += z; 1420 1421 /* Write Buffer Program Confirm: GO GO GO */ 1422 map_write(map, CMD(0x29), cmd_adr); 1423 chip->state = FL_WRITING; 1424 1425 INVALIDATE_CACHE_UDELAY(map, chip, 1426 adr, map_bankwidth(map), 1427 chip->word_write_time); 1428 1429 timeo = jiffies + uWriteTimeout; 1430 1431 for (;;) { 1432 if (chip->state != FL_WRITING) { 1433 /* Someone's suspended the write. Sleep */ 1434 DECLARE_WAITQUEUE(wait, current); 1435 1436 set_current_state(TASK_UNINTERRUPTIBLE); 1437 add_wait_queue(&chip->wq, &wait); 1438 mutex_unlock(&chip->mutex); 1439 schedule(); 1440 remove_wait_queue(&chip->wq, &wait); 1441 timeo = jiffies + (HZ / 2); /* FIXME */ 1442 mutex_lock(&chip->mutex); 1443 continue; 1444 } 1445 1446 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1447 break; 1448 1449 if (chip_ready(map, adr)) { 1450 xip_enable(map, chip, adr); 1451 goto op_done; 1452 } 1453 1454 /* Latency issues. Drop the lock, wait a while and retry */ 1455 UDELAY(map, chip, adr, 1); 1456 } 1457 1458 /* reset on all failures. */ 1459 map_write( map, CMD(0xF0), chip->start ); 1460 xip_enable(map, chip, adr); 1461 /* FIXME - should have reset delay before continuing */ 1462 1463 printk(KERN_WARNING "MTD %s(): software timeout\n", 1464 __func__ ); 1465 1466 ret = -EIO; 1467 op_done: 1468 chip->state = FL_READY; 1469 DISABLE_VPP(map); 1470 put_chip(map, chip, adr); 1471 mutex_unlock(&chip->mutex); 1472 1473 return ret; 1474 } 1475 1476 1477 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1478 size_t *retlen, const u_char *buf) 1479 { 1480 struct map_info *map = mtd->priv; 1481 struct cfi_private *cfi = map->fldrv_priv; 1482 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1483 int ret = 0; 1484 int chipnum; 1485 unsigned long ofs; 1486 1487 chipnum = to >> cfi->chipshift; 1488 ofs = to - (chipnum << cfi->chipshift); 1489 1490 /* If it's not bus-aligned, do the first word write */ 1491 if (ofs & (map_bankwidth(map)-1)) { 1492 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1493 if (local_len > len) 1494 local_len = len; 1495 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1496 local_len, retlen, buf); 1497 if (ret) 1498 return ret; 1499 ofs += local_len; 1500 buf += local_len; 1501 len -= local_len; 1502 1503 if (ofs >> cfi->chipshift) { 1504 chipnum ++; 1505 ofs = 0; 1506 if (chipnum == cfi->numchips) 1507 return 0; 1508 } 1509 } 1510 1511 /* Write buffer is worth it only if more than one word to write... */ 1512 while (len >= map_bankwidth(map) * 2) { 1513 /* We must not cross write block boundaries */ 1514 int size = wbufsize - (ofs & (wbufsize-1)); 1515 1516 if (size > len) 1517 size = len; 1518 if (size % map_bankwidth(map)) 1519 size -= size % map_bankwidth(map); 1520 1521 ret = do_write_buffer(map, &cfi->chips[chipnum], 1522 ofs, buf, size); 1523 if (ret) 1524 return ret; 1525 1526 ofs += size; 1527 buf += size; 1528 (*retlen) += size; 1529 len -= size; 1530 1531 if (ofs >> cfi->chipshift) { 1532 chipnum ++; 1533 ofs = 0; 1534 if (chipnum == cfi->numchips) 1535 return 0; 1536 } 1537 } 1538 1539 if (len) { 1540 size_t retlen_dregs = 0; 1541 1542 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1543 len, &retlen_dregs, buf); 1544 1545 *retlen += retlen_dregs; 1546 return ret; 1547 } 1548 1549 return 0; 1550 } 1551 1552 /* 1553 * Wait for the flash chip to become ready to write data 1554 * 1555 * This is only called during the panic_write() path. When panic_write() 1556 * is called, the kernel is in the process of a panic, and will soon be 1557 * dead. Therefore we don't take any locks, and attempt to get access 1558 * to the chip as soon as possible. 1559 */ 1560 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 1561 unsigned long adr) 1562 { 1563 struct cfi_private *cfi = map->fldrv_priv; 1564 int retries = 10; 1565 int i; 1566 1567 /* 1568 * If the driver thinks the chip is idle, and no toggle bits 1569 * are changing, then the chip is actually idle for sure. 1570 */ 1571 if (chip->state == FL_READY && chip_ready(map, adr)) 1572 return 0; 1573 1574 /* 1575 * Try several times to reset the chip and then wait for it 1576 * to become idle. The upper limit of a few milliseconds of 1577 * delay isn't a big problem: the kernel is dying anyway. It 1578 * is more important to save the messages. 1579 */ 1580 while (retries > 0) { 1581 const unsigned long timeo = (HZ / 1000) + 1; 1582 1583 /* send the reset command */ 1584 map_write(map, CMD(0xF0), chip->start); 1585 1586 /* wait for the chip to become ready */ 1587 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 1588 if (chip_ready(map, adr)) 1589 return 0; 1590 1591 udelay(1); 1592 } 1593 } 1594 1595 /* the chip never became ready */ 1596 return -EBUSY; 1597 } 1598 1599 /* 1600 * Write out one word of data to a single flash chip during a kernel panic 1601 * 1602 * This is only called during the panic_write() path. When panic_write() 1603 * is called, the kernel is in the process of a panic, and will soon be 1604 * dead. Therefore we don't take any locks, and attempt to get access 1605 * to the chip as soon as possible. 1606 * 1607 * The implementation of this routine is intentionally similar to 1608 * do_write_oneword(), in order to ease code maintenance. 1609 */ 1610 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 1611 unsigned long adr, map_word datum) 1612 { 1613 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 1614 struct cfi_private *cfi = map->fldrv_priv; 1615 int retry_cnt = 0; 1616 map_word oldd; 1617 int ret = 0; 1618 int i; 1619 1620 adr += chip->start; 1621 1622 ret = cfi_amdstd_panic_wait(map, chip, adr); 1623 if (ret) 1624 return ret; 1625 1626 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 1627 __func__, adr, datum.x[0]); 1628 1629 /* 1630 * Check for a NOP for the case when the datum to write is already 1631 * present - it saves time and works around buggy chips that corrupt 1632 * data at other locations when 0xff is written to a location that 1633 * already contains 0xff. 1634 */ 1635 oldd = map_read(map, adr); 1636 if (map_word_equal(map, oldd, datum)) { 1637 pr_debug("MTD %s(): NOP\n", __func__); 1638 goto op_done; 1639 } 1640 1641 ENABLE_VPP(map); 1642 1643 retry: 1644 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1645 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1646 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1647 map_write(map, datum, adr); 1648 1649 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 1650 if (chip_ready(map, adr)) 1651 break; 1652 1653 udelay(1); 1654 } 1655 1656 if (!chip_good(map, adr, datum)) { 1657 /* reset on all failures. */ 1658 map_write(map, CMD(0xF0), chip->start); 1659 /* FIXME - should have reset delay before continuing */ 1660 1661 if (++retry_cnt <= MAX_WORD_RETRIES) 1662 goto retry; 1663 1664 ret = -EIO; 1665 } 1666 1667 op_done: 1668 DISABLE_VPP(map); 1669 return ret; 1670 } 1671 1672 /* 1673 * Write out some data during a kernel panic 1674 * 1675 * This is used by the mtdoops driver to save the dying messages from a 1676 * kernel which has panic'd. 1677 * 1678 * This routine ignores all of the locking used throughout the rest of the 1679 * driver, in order to ensure that the data gets written out no matter what 1680 * state this driver (and the flash chip itself) was in when the kernel crashed. 1681 * 1682 * The implementation of this routine is intentionally similar to 1683 * cfi_amdstd_write_words(), in order to ease code maintenance. 1684 */ 1685 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 1686 size_t *retlen, const u_char *buf) 1687 { 1688 struct map_info *map = mtd->priv; 1689 struct cfi_private *cfi = map->fldrv_priv; 1690 unsigned long ofs, chipstart; 1691 int ret = 0; 1692 int chipnum; 1693 1694 chipnum = to >> cfi->chipshift; 1695 ofs = to - (chipnum << cfi->chipshift); 1696 chipstart = cfi->chips[chipnum].start; 1697 1698 /* If it's not bus aligned, do the first byte write */ 1699 if (ofs & (map_bankwidth(map) - 1)) { 1700 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 1701 int i = ofs - bus_ofs; 1702 int n = 0; 1703 map_word tmp_buf; 1704 1705 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 1706 if (ret) 1707 return ret; 1708 1709 /* Load 'tmp_buf' with old contents of flash */ 1710 tmp_buf = map_read(map, bus_ofs + chipstart); 1711 1712 /* Number of bytes to copy from buffer */ 1713 n = min_t(int, len, map_bankwidth(map) - i); 1714 1715 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1716 1717 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1718 bus_ofs, tmp_buf); 1719 if (ret) 1720 return ret; 1721 1722 ofs += n; 1723 buf += n; 1724 (*retlen) += n; 1725 len -= n; 1726 1727 if (ofs >> cfi->chipshift) { 1728 chipnum++; 1729 ofs = 0; 1730 if (chipnum == cfi->numchips) 1731 return 0; 1732 } 1733 } 1734 1735 /* We are now aligned, write as much as possible */ 1736 while (len >= map_bankwidth(map)) { 1737 map_word datum; 1738 1739 datum = map_word_load(map, buf); 1740 1741 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1742 ofs, datum); 1743 if (ret) 1744 return ret; 1745 1746 ofs += map_bankwidth(map); 1747 buf += map_bankwidth(map); 1748 (*retlen) += map_bankwidth(map); 1749 len -= map_bankwidth(map); 1750 1751 if (ofs >> cfi->chipshift) { 1752 chipnum++; 1753 ofs = 0; 1754 if (chipnum == cfi->numchips) 1755 return 0; 1756 1757 chipstart = cfi->chips[chipnum].start; 1758 } 1759 } 1760 1761 /* Write the trailing bytes if any */ 1762 if (len & (map_bankwidth(map) - 1)) { 1763 map_word tmp_buf; 1764 1765 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 1766 if (ret) 1767 return ret; 1768 1769 tmp_buf = map_read(map, ofs + chipstart); 1770 1771 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1772 1773 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1774 ofs, tmp_buf); 1775 if (ret) 1776 return ret; 1777 1778 (*retlen) += len; 1779 } 1780 1781 return 0; 1782 } 1783 1784 1785 /* 1786 * Handle devices with one erase region, that only implement 1787 * the chip erase command. 1788 */ 1789 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1790 { 1791 struct cfi_private *cfi = map->fldrv_priv; 1792 unsigned long timeo = jiffies + HZ; 1793 unsigned long int adr; 1794 DECLARE_WAITQUEUE(wait, current); 1795 int ret = 0; 1796 1797 adr = cfi->addr_unlock1; 1798 1799 mutex_lock(&chip->mutex); 1800 ret = get_chip(map, chip, adr, FL_WRITING); 1801 if (ret) { 1802 mutex_unlock(&chip->mutex); 1803 return ret; 1804 } 1805 1806 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1807 __func__, chip->start ); 1808 1809 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1810 ENABLE_VPP(map); 1811 xip_disable(map, chip, adr); 1812 1813 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1814 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1815 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1816 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1817 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1818 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1819 1820 chip->state = FL_ERASING; 1821 chip->erase_suspended = 0; 1822 chip->in_progress_block_addr = adr; 1823 1824 INVALIDATE_CACHE_UDELAY(map, chip, 1825 adr, map->size, 1826 chip->erase_time*500); 1827 1828 timeo = jiffies + (HZ*20); 1829 1830 for (;;) { 1831 if (chip->state != FL_ERASING) { 1832 /* Someone's suspended the erase. Sleep */ 1833 set_current_state(TASK_UNINTERRUPTIBLE); 1834 add_wait_queue(&chip->wq, &wait); 1835 mutex_unlock(&chip->mutex); 1836 schedule(); 1837 remove_wait_queue(&chip->wq, &wait); 1838 mutex_lock(&chip->mutex); 1839 continue; 1840 } 1841 if (chip->erase_suspended) { 1842 /* This erase was suspended and resumed. 1843 Adjust the timeout */ 1844 timeo = jiffies + (HZ*20); /* FIXME */ 1845 chip->erase_suspended = 0; 1846 } 1847 1848 if (chip_ready(map, adr)) 1849 break; 1850 1851 if (time_after(jiffies, timeo)) { 1852 printk(KERN_WARNING "MTD %s(): software timeout\n", 1853 __func__ ); 1854 break; 1855 } 1856 1857 /* Latency issues. Drop the lock, wait a while and retry */ 1858 UDELAY(map, chip, adr, 1000000/HZ); 1859 } 1860 /* Did we succeed? */ 1861 if (!chip_good(map, adr, map_word_ff(map))) { 1862 /* reset on all failures. */ 1863 map_write( map, CMD(0xF0), chip->start ); 1864 /* FIXME - should have reset delay before continuing */ 1865 1866 ret = -EIO; 1867 } 1868 1869 chip->state = FL_READY; 1870 xip_enable(map, chip, adr); 1871 DISABLE_VPP(map); 1872 put_chip(map, chip, adr); 1873 mutex_unlock(&chip->mutex); 1874 1875 return ret; 1876 } 1877 1878 1879 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1880 { 1881 struct cfi_private *cfi = map->fldrv_priv; 1882 unsigned long timeo = jiffies + HZ; 1883 DECLARE_WAITQUEUE(wait, current); 1884 int ret = 0; 1885 1886 adr += chip->start; 1887 1888 mutex_lock(&chip->mutex); 1889 ret = get_chip(map, chip, adr, FL_ERASING); 1890 if (ret) { 1891 mutex_unlock(&chip->mutex); 1892 return ret; 1893 } 1894 1895 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1896 __func__, adr ); 1897 1898 XIP_INVAL_CACHED_RANGE(map, adr, len); 1899 ENABLE_VPP(map); 1900 xip_disable(map, chip, adr); 1901 1902 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1903 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1904 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1905 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1906 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1907 map_write(map, cfi->sector_erase_cmd, adr); 1908 1909 chip->state = FL_ERASING; 1910 chip->erase_suspended = 0; 1911 chip->in_progress_block_addr = adr; 1912 1913 INVALIDATE_CACHE_UDELAY(map, chip, 1914 adr, len, 1915 chip->erase_time*500); 1916 1917 timeo = jiffies + (HZ*20); 1918 1919 for (;;) { 1920 if (chip->state != FL_ERASING) { 1921 /* Someone's suspended the erase. Sleep */ 1922 set_current_state(TASK_UNINTERRUPTIBLE); 1923 add_wait_queue(&chip->wq, &wait); 1924 mutex_unlock(&chip->mutex); 1925 schedule(); 1926 remove_wait_queue(&chip->wq, &wait); 1927 mutex_lock(&chip->mutex); 1928 continue; 1929 } 1930 if (chip->erase_suspended) { 1931 /* This erase was suspended and resumed. 1932 Adjust the timeout */ 1933 timeo = jiffies + (HZ*20); /* FIXME */ 1934 chip->erase_suspended = 0; 1935 } 1936 1937 if (chip_ready(map, adr)) { 1938 xip_enable(map, chip, adr); 1939 break; 1940 } 1941 1942 if (time_after(jiffies, timeo)) { 1943 xip_enable(map, chip, adr); 1944 printk(KERN_WARNING "MTD %s(): software timeout\n", 1945 __func__ ); 1946 break; 1947 } 1948 1949 /* Latency issues. Drop the lock, wait a while and retry */ 1950 UDELAY(map, chip, adr, 1000000/HZ); 1951 } 1952 /* Did we succeed? */ 1953 if (!chip_good(map, adr, map_word_ff(map))) { 1954 /* reset on all failures. */ 1955 map_write( map, CMD(0xF0), chip->start ); 1956 /* FIXME - should have reset delay before continuing */ 1957 1958 ret = -EIO; 1959 } 1960 1961 chip->state = FL_READY; 1962 DISABLE_VPP(map); 1963 put_chip(map, chip, adr); 1964 mutex_unlock(&chip->mutex); 1965 return ret; 1966 } 1967 1968 1969 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1970 { 1971 unsigned long ofs, len; 1972 int ret; 1973 1974 ofs = instr->addr; 1975 len = instr->len; 1976 1977 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 1978 if (ret) 1979 return ret; 1980 1981 instr->state = MTD_ERASE_DONE; 1982 mtd_erase_callback(instr); 1983 1984 return 0; 1985 } 1986 1987 1988 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 1989 { 1990 struct map_info *map = mtd->priv; 1991 struct cfi_private *cfi = map->fldrv_priv; 1992 int ret = 0; 1993 1994 if (instr->addr != 0) 1995 return -EINVAL; 1996 1997 if (instr->len != mtd->size) 1998 return -EINVAL; 1999 2000 ret = do_erase_chip(map, &cfi->chips[0]); 2001 if (ret) 2002 return ret; 2003 2004 instr->state = MTD_ERASE_DONE; 2005 mtd_erase_callback(instr); 2006 2007 return 0; 2008 } 2009 2010 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2011 unsigned long adr, int len, void *thunk) 2012 { 2013 struct cfi_private *cfi = map->fldrv_priv; 2014 int ret; 2015 2016 mutex_lock(&chip->mutex); 2017 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2018 if (ret) 2019 goto out_unlock; 2020 chip->state = FL_LOCKING; 2021 2022 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2023 2024 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2025 cfi->device_type, NULL); 2026 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2027 cfi->device_type, NULL); 2028 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2029 cfi->device_type, NULL); 2030 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2031 cfi->device_type, NULL); 2032 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2033 cfi->device_type, NULL); 2034 map_write(map, CMD(0x40), chip->start + adr); 2035 2036 chip->state = FL_READY; 2037 put_chip(map, chip, adr + chip->start); 2038 ret = 0; 2039 2040 out_unlock: 2041 mutex_unlock(&chip->mutex); 2042 return ret; 2043 } 2044 2045 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2046 unsigned long adr, int len, void *thunk) 2047 { 2048 struct cfi_private *cfi = map->fldrv_priv; 2049 int ret; 2050 2051 mutex_lock(&chip->mutex); 2052 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2053 if (ret) 2054 goto out_unlock; 2055 chip->state = FL_UNLOCKING; 2056 2057 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2058 2059 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2060 cfi->device_type, NULL); 2061 map_write(map, CMD(0x70), adr); 2062 2063 chip->state = FL_READY; 2064 put_chip(map, chip, adr + chip->start); 2065 ret = 0; 2066 2067 out_unlock: 2068 mutex_unlock(&chip->mutex); 2069 return ret; 2070 } 2071 2072 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2073 { 2074 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2075 } 2076 2077 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2078 { 2079 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2080 } 2081 2082 2083 static void cfi_amdstd_sync (struct mtd_info *mtd) 2084 { 2085 struct map_info *map = mtd->priv; 2086 struct cfi_private *cfi = map->fldrv_priv; 2087 int i; 2088 struct flchip *chip; 2089 int ret = 0; 2090 DECLARE_WAITQUEUE(wait, current); 2091 2092 for (i=0; !ret && i<cfi->numchips; i++) { 2093 chip = &cfi->chips[i]; 2094 2095 retry: 2096 mutex_lock(&chip->mutex); 2097 2098 switch(chip->state) { 2099 case FL_READY: 2100 case FL_STATUS: 2101 case FL_CFI_QUERY: 2102 case FL_JEDEC_QUERY: 2103 chip->oldstate = chip->state; 2104 chip->state = FL_SYNCING; 2105 /* No need to wake_up() on this state change - 2106 * as the whole point is that nobody can do anything 2107 * with the chip now anyway. 2108 */ 2109 case FL_SYNCING: 2110 mutex_unlock(&chip->mutex); 2111 break; 2112 2113 default: 2114 /* Not an idle state */ 2115 set_current_state(TASK_UNINTERRUPTIBLE); 2116 add_wait_queue(&chip->wq, &wait); 2117 2118 mutex_unlock(&chip->mutex); 2119 2120 schedule(); 2121 2122 remove_wait_queue(&chip->wq, &wait); 2123 2124 goto retry; 2125 } 2126 } 2127 2128 /* Unlock the chips again */ 2129 2130 for (i--; i >=0; i--) { 2131 chip = &cfi->chips[i]; 2132 2133 mutex_lock(&chip->mutex); 2134 2135 if (chip->state == FL_SYNCING) { 2136 chip->state = chip->oldstate; 2137 wake_up(&chip->wq); 2138 } 2139 mutex_unlock(&chip->mutex); 2140 } 2141 } 2142 2143 2144 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2145 { 2146 struct map_info *map = mtd->priv; 2147 struct cfi_private *cfi = map->fldrv_priv; 2148 int i; 2149 struct flchip *chip; 2150 int ret = 0; 2151 2152 for (i=0; !ret && i<cfi->numchips; i++) { 2153 chip = &cfi->chips[i]; 2154 2155 mutex_lock(&chip->mutex); 2156 2157 switch(chip->state) { 2158 case FL_READY: 2159 case FL_STATUS: 2160 case FL_CFI_QUERY: 2161 case FL_JEDEC_QUERY: 2162 chip->oldstate = chip->state; 2163 chip->state = FL_PM_SUSPENDED; 2164 /* No need to wake_up() on this state change - 2165 * as the whole point is that nobody can do anything 2166 * with the chip now anyway. 2167 */ 2168 case FL_PM_SUSPENDED: 2169 break; 2170 2171 default: 2172 ret = -EAGAIN; 2173 break; 2174 } 2175 mutex_unlock(&chip->mutex); 2176 } 2177 2178 /* Unlock the chips again */ 2179 2180 if (ret) { 2181 for (i--; i >=0; i--) { 2182 chip = &cfi->chips[i]; 2183 2184 mutex_lock(&chip->mutex); 2185 2186 if (chip->state == FL_PM_SUSPENDED) { 2187 chip->state = chip->oldstate; 2188 wake_up(&chip->wq); 2189 } 2190 mutex_unlock(&chip->mutex); 2191 } 2192 } 2193 2194 return ret; 2195 } 2196 2197 2198 static void cfi_amdstd_resume(struct mtd_info *mtd) 2199 { 2200 struct map_info *map = mtd->priv; 2201 struct cfi_private *cfi = map->fldrv_priv; 2202 int i; 2203 struct flchip *chip; 2204 2205 for (i=0; i<cfi->numchips; i++) { 2206 2207 chip = &cfi->chips[i]; 2208 2209 mutex_lock(&chip->mutex); 2210 2211 if (chip->state == FL_PM_SUSPENDED) { 2212 chip->state = FL_READY; 2213 map_write(map, CMD(0xF0), chip->start); 2214 wake_up(&chip->wq); 2215 } 2216 else 2217 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 2218 2219 mutex_unlock(&chip->mutex); 2220 } 2221 } 2222 2223 2224 /* 2225 * Ensure that the flash device is put back into read array mode before 2226 * unloading the driver or rebooting. On some systems, rebooting while 2227 * the flash is in query/program/erase mode will prevent the CPU from 2228 * fetching the bootloader code, requiring a hard reset or power cycle. 2229 */ 2230 static int cfi_amdstd_reset(struct mtd_info *mtd) 2231 { 2232 struct map_info *map = mtd->priv; 2233 struct cfi_private *cfi = map->fldrv_priv; 2234 int i, ret; 2235 struct flchip *chip; 2236 2237 for (i = 0; i < cfi->numchips; i++) { 2238 2239 chip = &cfi->chips[i]; 2240 2241 mutex_lock(&chip->mutex); 2242 2243 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2244 if (!ret) { 2245 map_write(map, CMD(0xF0), chip->start); 2246 chip->state = FL_SHUTDOWN; 2247 put_chip(map, chip, chip->start); 2248 } 2249 2250 mutex_unlock(&chip->mutex); 2251 } 2252 2253 return 0; 2254 } 2255 2256 2257 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2258 void *v) 2259 { 2260 struct mtd_info *mtd; 2261 2262 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2263 cfi_amdstd_reset(mtd); 2264 return NOTIFY_DONE; 2265 } 2266 2267 2268 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2269 { 2270 struct map_info *map = mtd->priv; 2271 struct cfi_private *cfi = map->fldrv_priv; 2272 2273 cfi_amdstd_reset(mtd); 2274 unregister_reboot_notifier(&mtd->reboot_notifier); 2275 kfree(cfi->cmdset_priv); 2276 kfree(cfi->cfiq); 2277 kfree(cfi); 2278 kfree(mtd->eraseregions); 2279 } 2280 2281 MODULE_LICENSE("GPL"); 2282 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2283 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2284 MODULE_ALIAS("cfi_cmdset_0006"); 2285 MODULE_ALIAS("cfi_cmdset_0701"); 2286