1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/init.h> 28 #include <asm/io.h> 29 #include <asm/byteorder.h> 30 31 #include <linux/errno.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/interrupt.h> 35 #include <linux/mtd/compatmac.h> 36 #include <linux/mtd/map.h> 37 #include <linux/mtd/mtd.h> 38 #include <linux/mtd/cfi.h> 39 #include <linux/mtd/xip.h> 40 41 #define AMD_BOOTLOC_BUG 42 #define FORCE_WORD_WRITE 0 43 44 #define MAX_WORD_RETRIES 3 45 46 #define MANUFACTURER_AMD 0x0001 47 #define MANUFACTURER_ATMEL 0x001F 48 #define MANUFACTURER_MACRONIX 0x00C2 49 #define MANUFACTURER_SST 0x00BF 50 #define SST49LF004B 0x0060 51 #define SST49LF040B 0x0050 52 #define SST49LF008A 0x005a 53 #define AT49BV6416 0x00d6 54 55 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 56 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 57 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 58 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 59 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 60 static void cfi_amdstd_sync (struct mtd_info *); 61 static int cfi_amdstd_suspend (struct mtd_info *); 62 static void cfi_amdstd_resume (struct mtd_info *); 63 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 64 65 static void cfi_amdstd_destroy(struct mtd_info *); 66 67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 69 70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 72 #include "fwh_lock.h" 73 74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 76 77 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 78 .probe = NULL, /* Not usable directly */ 79 .destroy = cfi_amdstd_destroy, 80 .name = "cfi_cmdset_0002", 81 .module = THIS_MODULE 82 }; 83 84 85 /* #define DEBUG_CFI_FEATURES */ 86 87 88 #ifdef DEBUG_CFI_FEATURES 89 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 90 { 91 const char* erase_suspend[3] = { 92 "Not supported", "Read only", "Read/write" 93 }; 94 const char* top_bottom[6] = { 95 "No WP", "8x8KiB sectors at top & bottom, no WP", 96 "Bottom boot", "Top boot", 97 "Uniform, Bottom WP", "Uniform, Top WP" 98 }; 99 100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 101 printk(" Address sensitive unlock: %s\n", 102 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 103 104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 106 else 107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 108 109 if (extp->BlkProt == 0) 110 printk(" Block protection: Not supported\n"); 111 else 112 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 113 114 115 printk(" Temporary block unprotect: %s\n", 116 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 119 printk(" Burst mode: %s\n", 120 extp->BurstMode ? "Supported" : "Not supported"); 121 if (extp->PageMode == 0) 122 printk(" Page mode: Not supported\n"); 123 else 124 printk(" Page mode: %d word page\n", extp->PageMode << 2); 125 126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 127 extp->VppMin >> 4, extp->VppMin & 0xf); 128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 129 extp->VppMax >> 4, extp->VppMax & 0xf); 130 131 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 133 else 134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 135 } 136 #endif 137 138 #ifdef AMD_BOOTLOC_BUG 139 /* Wheee. Bring me the head of someone at AMD. */ 140 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) 141 { 142 struct map_info *map = mtd->priv; 143 struct cfi_private *cfi = map->fldrv_priv; 144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 145 __u8 major = extp->MajorVersion; 146 __u8 minor = extp->MinorVersion; 147 148 if (((major << 8) | minor) < 0x3131) { 149 /* CFI version 1.0 => don't trust bootloc */ 150 151 DEBUG(MTD_DEBUG_LEVEL1, 152 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 153 map->name, cfi->mfr, cfi->id); 154 155 /* AFAICS all 29LV400 with a bottom boot block have a device ID 156 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 157 * These were badly detected as they have the 0x80 bit set 158 * so treat them as a special case. 159 */ 160 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 161 162 /* Macronix added CFI to their 2nd generation 163 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 164 * Fujitsu, Spansion, EON, ESI and older Macronix) 165 * has CFI. 166 * 167 * Therefore also check the manufacturer. 168 * This reduces the risk of false detection due to 169 * the 8-bit device ID. 170 */ 171 (cfi->mfr == MANUFACTURER_MACRONIX)) { 172 DEBUG(MTD_DEBUG_LEVEL1, 173 "%s: Macronix MX29LV400C with bottom boot block" 174 " detected\n", map->name); 175 extp->TopBottom = 2; /* bottom boot */ 176 } else 177 if (cfi->id & 0x80) { 178 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 179 extp->TopBottom = 3; /* top boot */ 180 } else { 181 extp->TopBottom = 2; /* bottom boot */ 182 } 183 184 DEBUG(MTD_DEBUG_LEVEL1, 185 "%s: AMD CFI PRI V%c.%c has no boot block field;" 186 " deduced %s from Device ID\n", map->name, major, minor, 187 extp->TopBottom == 2 ? "bottom" : "top"); 188 } 189 } 190 #endif 191 192 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 193 { 194 struct map_info *map = mtd->priv; 195 struct cfi_private *cfi = map->fldrv_priv; 196 if (cfi->cfiq->BufWriteTimeoutTyp) { 197 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); 198 mtd->write = cfi_amdstd_write_buffers; 199 } 200 } 201 202 /* Atmel chips don't use the same PRI format as AMD chips */ 203 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 204 { 205 struct map_info *map = mtd->priv; 206 struct cfi_private *cfi = map->fldrv_priv; 207 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 208 struct cfi_pri_atmel atmel_pri; 209 210 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 211 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 212 213 if (atmel_pri.Features & 0x02) 214 extp->EraseSuspend = 2; 215 216 /* Some chips got it backwards... */ 217 if (cfi->id == AT49BV6416) { 218 if (atmel_pri.BottomBoot) 219 extp->TopBottom = 3; 220 else 221 extp->TopBottom = 2; 222 } else { 223 if (atmel_pri.BottomBoot) 224 extp->TopBottom = 2; 225 else 226 extp->TopBottom = 3; 227 } 228 229 /* burst write mode not supported */ 230 cfi->cfiq->BufWriteTimeoutTyp = 0; 231 cfi->cfiq->BufWriteTimeoutMax = 0; 232 } 233 234 static void fixup_use_secsi(struct mtd_info *mtd, void *param) 235 { 236 /* Setup for chips with a secsi area */ 237 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 238 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 239 } 240 241 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) 242 { 243 struct map_info *map = mtd->priv; 244 struct cfi_private *cfi = map->fldrv_priv; 245 if ((cfi->cfiq->NumEraseRegions == 1) && 246 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 247 mtd->erase = cfi_amdstd_erase_chip; 248 } 249 250 } 251 252 /* 253 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 254 * locked by default. 255 */ 256 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) 257 { 258 mtd->lock = cfi_atmel_lock; 259 mtd->unlock = cfi_atmel_unlock; 260 mtd->flags |= MTD_POWERUP_LOCK; 261 } 262 263 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) 264 { 265 struct map_info *map = mtd->priv; 266 struct cfi_private *cfi = map->fldrv_priv; 267 268 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 269 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 270 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 271 } 272 } 273 274 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param) 275 { 276 struct map_info *map = mtd->priv; 277 struct cfi_private *cfi = map->fldrv_priv; 278 279 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 280 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 281 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 282 } 283 } 284 285 static struct cfi_fixup cfi_fixup_table[] = { 286 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 287 #ifdef AMD_BOOTLOC_BUG 288 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 289 { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 290 #endif 291 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 292 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 293 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, 294 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, 295 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, 296 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, 297 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, }, 298 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, }, 299 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, }, 300 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, }, 301 #if !FORCE_WORD_WRITE 302 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 303 #endif 304 { 0, 0, NULL, NULL } 305 }; 306 static struct cfi_fixup jedec_fixup_table[] = { 307 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 308 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 309 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 310 { 0, 0, NULL, NULL } 311 }; 312 313 static struct cfi_fixup fixup_table[] = { 314 /* The CFI vendor ids and the JEDEC vendor IDs appear 315 * to be common. It is like the devices id's are as 316 * well. This table is to pick all cases where 317 * we know that is the case. 318 */ 319 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, 320 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL }, 321 { 0, 0, NULL, NULL } 322 }; 323 324 325 static void cfi_fixup_major_minor(struct cfi_private *cfi, 326 struct cfi_pri_amdstd *extp) 327 { 328 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e && 329 extp->MajorVersion == '0') 330 extp->MajorVersion = '1'; 331 } 332 333 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 334 { 335 struct cfi_private *cfi = map->fldrv_priv; 336 struct mtd_info *mtd; 337 int i; 338 339 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 340 if (!mtd) { 341 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 342 return NULL; 343 } 344 mtd->priv = map; 345 mtd->type = MTD_NORFLASH; 346 347 /* Fill in the default mtd operations */ 348 mtd->erase = cfi_amdstd_erase_varsize; 349 mtd->write = cfi_amdstd_write_words; 350 mtd->read = cfi_amdstd_read; 351 mtd->sync = cfi_amdstd_sync; 352 mtd->suspend = cfi_amdstd_suspend; 353 mtd->resume = cfi_amdstd_resume; 354 mtd->flags = MTD_CAP_NORFLASH; 355 mtd->name = map->name; 356 mtd->writesize = 1; 357 358 if (cfi->cfi_mode==CFI_MODE_CFI){ 359 unsigned char bootloc; 360 /* 361 * It's a real CFI chip, not one for which the probe 362 * routine faked a CFI structure. So we read the feature 363 * table from it. 364 */ 365 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 366 struct cfi_pri_amdstd *extp; 367 368 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 369 if (!extp) { 370 kfree(mtd); 371 return NULL; 372 } 373 374 cfi_fixup_major_minor(cfi, extp); 375 376 if (extp->MajorVersion != '1' || 377 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 378 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 379 "version %c.%c.\n", extp->MajorVersion, 380 extp->MinorVersion); 381 kfree(extp); 382 kfree(mtd); 383 return NULL; 384 } 385 386 /* Install our own private info structure */ 387 cfi->cmdset_priv = extp; 388 389 /* Apply cfi device specific fixups */ 390 cfi_fixup(mtd, cfi_fixup_table); 391 392 #ifdef DEBUG_CFI_FEATURES 393 /* Tell the user about it in lots of lovely detail */ 394 cfi_tell_features(extp); 395 #endif 396 397 bootloc = extp->TopBottom; 398 if ((bootloc != 2) && (bootloc != 3)) { 399 printk(KERN_WARNING "%s: CFI does not contain boot " 400 "bank location. Assuming top.\n", map->name); 401 bootloc = 2; 402 } 403 404 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 405 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); 406 407 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 408 int j = (cfi->cfiq->NumEraseRegions-1)-i; 409 __u32 swap; 410 411 swap = cfi->cfiq->EraseRegionInfo[i]; 412 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 413 cfi->cfiq->EraseRegionInfo[j] = swap; 414 } 415 } 416 /* Set the default CFI lock/unlock addresses */ 417 cfi->addr_unlock1 = 0x555; 418 cfi->addr_unlock2 = 0x2aa; 419 420 } /* CFI mode */ 421 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 422 /* Apply jedec specific fixups */ 423 cfi_fixup(mtd, jedec_fixup_table); 424 } 425 /* Apply generic fixups */ 426 cfi_fixup(mtd, fixup_table); 427 428 for (i=0; i< cfi->numchips; i++) { 429 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 430 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 431 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 432 cfi->chips[i].ref_point_counter = 0; 433 init_waitqueue_head(&(cfi->chips[i].wq)); 434 } 435 436 map->fldrv = &cfi_amdstd_chipdrv; 437 438 return cfi_amdstd_setup(mtd); 439 } 440 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 441 442 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 443 { 444 struct map_info *map = mtd->priv; 445 struct cfi_private *cfi = map->fldrv_priv; 446 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 447 unsigned long offset = 0; 448 int i,j; 449 450 printk(KERN_NOTICE "number of %s chips: %d\n", 451 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 452 /* Select the correct geometry setup */ 453 mtd->size = devsize * cfi->numchips; 454 455 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 456 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 457 * mtd->numeraseregions, GFP_KERNEL); 458 if (!mtd->eraseregions) { 459 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 460 goto setup_err; 461 } 462 463 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 464 unsigned long ernum, ersize; 465 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 466 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 467 468 if (mtd->erasesize < ersize) { 469 mtd->erasesize = ersize; 470 } 471 for (j=0; j<cfi->numchips; j++) { 472 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 473 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 474 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 475 } 476 offset += (ersize * ernum); 477 } 478 if (offset != devsize) { 479 /* Argh */ 480 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 481 goto setup_err; 482 } 483 #if 0 484 // debug 485 for (i=0; i<mtd->numeraseregions;i++){ 486 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n", 487 i,mtd->eraseregions[i].offset, 488 mtd->eraseregions[i].erasesize, 489 mtd->eraseregions[i].numblocks); 490 } 491 #endif 492 493 __module_get(THIS_MODULE); 494 return mtd; 495 496 setup_err: 497 if(mtd) { 498 kfree(mtd->eraseregions); 499 kfree(mtd); 500 } 501 kfree(cfi->cmdset_priv); 502 kfree(cfi->cfiq); 503 return NULL; 504 } 505 506 /* 507 * Return true if the chip is ready. 508 * 509 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 510 * non-suspended sector) and is indicated by no toggle bits toggling. 511 * 512 * Note that anything more complicated than checking if no bits are toggling 513 * (including checking DQ5 for an error status) is tricky to get working 514 * correctly and is therefore not done (particulary with interleaved chips 515 * as each chip must be checked independantly of the others). 516 */ 517 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 518 { 519 map_word d, t; 520 521 d = map_read(map, addr); 522 t = map_read(map, addr); 523 524 return map_word_equal(map, d, t); 525 } 526 527 /* 528 * Return true if the chip is ready and has the correct value. 529 * 530 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 531 * non-suspended sector) and it is indicated by no bits toggling. 532 * 533 * Error are indicated by toggling bits or bits held with the wrong value, 534 * or with bits toggling. 535 * 536 * Note that anything more complicated than checking if no bits are toggling 537 * (including checking DQ5 for an error status) is tricky to get working 538 * correctly and is therefore not done (particulary with interleaved chips 539 * as each chip must be checked independantly of the others). 540 * 541 */ 542 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 543 { 544 map_word oldd, curd; 545 546 oldd = map_read(map, addr); 547 curd = map_read(map, addr); 548 549 return map_word_equal(map, oldd, curd) && 550 map_word_equal(map, curd, expected); 551 } 552 553 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 554 { 555 DECLARE_WAITQUEUE(wait, current); 556 struct cfi_private *cfi = map->fldrv_priv; 557 unsigned long timeo; 558 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 559 560 resettime: 561 timeo = jiffies + HZ; 562 retry: 563 switch (chip->state) { 564 565 case FL_STATUS: 566 for (;;) { 567 if (chip_ready(map, adr)) 568 break; 569 570 if (time_after(jiffies, timeo)) { 571 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 572 return -EIO; 573 } 574 spin_unlock(chip->mutex); 575 cfi_udelay(1); 576 spin_lock(chip->mutex); 577 /* Someone else might have been playing with it. */ 578 goto retry; 579 } 580 581 case FL_READY: 582 case FL_CFI_QUERY: 583 case FL_JEDEC_QUERY: 584 return 0; 585 586 case FL_ERASING: 587 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 588 !(mode == FL_READY || mode == FL_POINT || 589 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 590 goto sleep; 591 592 /* We could check to see if we're trying to access the sector 593 * that is currently being erased. However, no user will try 594 * anything like that so we just wait for the timeout. */ 595 596 /* Erase suspend */ 597 /* It's harmless to issue the Erase-Suspend and Erase-Resume 598 * commands when the erase algorithm isn't in progress. */ 599 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 600 chip->oldstate = FL_ERASING; 601 chip->state = FL_ERASE_SUSPENDING; 602 chip->erase_suspended = 1; 603 for (;;) { 604 if (chip_ready(map, adr)) 605 break; 606 607 if (time_after(jiffies, timeo)) { 608 /* Should have suspended the erase by now. 609 * Send an Erase-Resume command as either 610 * there was an error (so leave the erase 611 * routine to recover from it) or we trying to 612 * use the erase-in-progress sector. */ 613 map_write(map, CMD(0x30), chip->in_progress_block_addr); 614 chip->state = FL_ERASING; 615 chip->oldstate = FL_READY; 616 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 617 return -EIO; 618 } 619 620 spin_unlock(chip->mutex); 621 cfi_udelay(1); 622 spin_lock(chip->mutex); 623 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 624 So we can just loop here. */ 625 } 626 chip->state = FL_READY; 627 return 0; 628 629 case FL_XIP_WHILE_ERASING: 630 if (mode != FL_READY && mode != FL_POINT && 631 (!cfip || !(cfip->EraseSuspend&2))) 632 goto sleep; 633 chip->oldstate = chip->state; 634 chip->state = FL_READY; 635 return 0; 636 637 case FL_POINT: 638 /* Only if there's no operation suspended... */ 639 if (mode == FL_READY && chip->oldstate == FL_READY) 640 return 0; 641 642 default: 643 sleep: 644 set_current_state(TASK_UNINTERRUPTIBLE); 645 add_wait_queue(&chip->wq, &wait); 646 spin_unlock(chip->mutex); 647 schedule(); 648 remove_wait_queue(&chip->wq, &wait); 649 spin_lock(chip->mutex); 650 goto resettime; 651 } 652 } 653 654 655 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 656 { 657 struct cfi_private *cfi = map->fldrv_priv; 658 659 switch(chip->oldstate) { 660 case FL_ERASING: 661 chip->state = chip->oldstate; 662 map_write(map, CMD(0x30), chip->in_progress_block_addr); 663 chip->oldstate = FL_READY; 664 chip->state = FL_ERASING; 665 break; 666 667 case FL_XIP_WHILE_ERASING: 668 chip->state = chip->oldstate; 669 chip->oldstate = FL_READY; 670 break; 671 672 case FL_READY: 673 case FL_STATUS: 674 /* We should really make set_vpp() count, rather than doing this */ 675 DISABLE_VPP(map); 676 break; 677 default: 678 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 679 } 680 wake_up(&chip->wq); 681 } 682 683 #ifdef CONFIG_MTD_XIP 684 685 /* 686 * No interrupt what so ever can be serviced while the flash isn't in array 687 * mode. This is ensured by the xip_disable() and xip_enable() functions 688 * enclosing any code path where the flash is known not to be in array mode. 689 * And within a XIP disabled code path, only functions marked with __xipram 690 * may be called and nothing else (it's a good thing to inspect generated 691 * assembly to make sure inline functions were actually inlined and that gcc 692 * didn't emit calls to its own support functions). Also configuring MTD CFI 693 * support to a single buswidth and a single interleave is also recommended. 694 */ 695 696 static void xip_disable(struct map_info *map, struct flchip *chip, 697 unsigned long adr) 698 { 699 /* TODO: chips with no XIP use should ignore and return */ 700 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 701 local_irq_disable(); 702 } 703 704 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 705 unsigned long adr) 706 { 707 struct cfi_private *cfi = map->fldrv_priv; 708 709 if (chip->state != FL_POINT && chip->state != FL_READY) { 710 map_write(map, CMD(0xf0), adr); 711 chip->state = FL_READY; 712 } 713 (void) map_read(map, adr); 714 xip_iprefetch(); 715 local_irq_enable(); 716 } 717 718 /* 719 * When a delay is required for the flash operation to complete, the 720 * xip_udelay() function is polling for both the given timeout and pending 721 * (but still masked) hardware interrupts. Whenever there is an interrupt 722 * pending then the flash erase operation is suspended, array mode restored 723 * and interrupts unmasked. Task scheduling might also happen at that 724 * point. The CPU eventually returns from the interrupt or the call to 725 * schedule() and the suspended flash operation is resumed for the remaining 726 * of the delay period. 727 * 728 * Warning: this function _will_ fool interrupt latency tracing tools. 729 */ 730 731 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 732 unsigned long adr, int usec) 733 { 734 struct cfi_private *cfi = map->fldrv_priv; 735 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 736 map_word status, OK = CMD(0x80); 737 unsigned long suspended, start = xip_currtime(); 738 flstate_t oldstate; 739 740 do { 741 cpu_relax(); 742 if (xip_irqpending() && extp && 743 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 744 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 745 /* 746 * Let's suspend the erase operation when supported. 747 * Note that we currently don't try to suspend 748 * interleaved chips if there is already another 749 * operation suspended (imagine what happens 750 * when one chip was already done with the current 751 * operation while another chip suspended it, then 752 * we resume the whole thing at once). Yes, it 753 * can happen! 754 */ 755 map_write(map, CMD(0xb0), adr); 756 usec -= xip_elapsed_since(start); 757 suspended = xip_currtime(); 758 do { 759 if (xip_elapsed_since(suspended) > 100000) { 760 /* 761 * The chip doesn't want to suspend 762 * after waiting for 100 msecs. 763 * This is a critical error but there 764 * is not much we can do here. 765 */ 766 return; 767 } 768 status = map_read(map, adr); 769 } while (!map_word_andequal(map, status, OK, OK)); 770 771 /* Suspend succeeded */ 772 oldstate = chip->state; 773 if (!map_word_bitsset(map, status, CMD(0x40))) 774 break; 775 chip->state = FL_XIP_WHILE_ERASING; 776 chip->erase_suspended = 1; 777 map_write(map, CMD(0xf0), adr); 778 (void) map_read(map, adr); 779 xip_iprefetch(); 780 local_irq_enable(); 781 spin_unlock(chip->mutex); 782 xip_iprefetch(); 783 cond_resched(); 784 785 /* 786 * We're back. However someone else might have 787 * decided to go write to the chip if we are in 788 * a suspended erase state. If so let's wait 789 * until it's done. 790 */ 791 spin_lock(chip->mutex); 792 while (chip->state != FL_XIP_WHILE_ERASING) { 793 DECLARE_WAITQUEUE(wait, current); 794 set_current_state(TASK_UNINTERRUPTIBLE); 795 add_wait_queue(&chip->wq, &wait); 796 spin_unlock(chip->mutex); 797 schedule(); 798 remove_wait_queue(&chip->wq, &wait); 799 spin_lock(chip->mutex); 800 } 801 /* Disallow XIP again */ 802 local_irq_disable(); 803 804 /* Resume the write or erase operation */ 805 map_write(map, CMD(0x30), adr); 806 chip->state = oldstate; 807 start = xip_currtime(); 808 } else if (usec >= 1000000/HZ) { 809 /* 810 * Try to save on CPU power when waiting delay 811 * is at least a system timer tick period. 812 * No need to be extremely accurate here. 813 */ 814 xip_cpu_idle(); 815 } 816 status = map_read(map, adr); 817 } while (!map_word_andequal(map, status, OK, OK) 818 && xip_elapsed_since(start) < usec); 819 } 820 821 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 822 823 /* 824 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 825 * the flash is actively programming or erasing since we have to poll for 826 * the operation to complete anyway. We can't do that in a generic way with 827 * a XIP setup so do it before the actual flash operation in this case 828 * and stub it out from INVALIDATE_CACHE_UDELAY. 829 */ 830 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 831 INVALIDATE_CACHED_RANGE(map, from, size) 832 833 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 834 UDELAY(map, chip, adr, usec) 835 836 /* 837 * Extra notes: 838 * 839 * Activating this XIP support changes the way the code works a bit. For 840 * example the code to suspend the current process when concurrent access 841 * happens is never executed because xip_udelay() will always return with the 842 * same chip state as it was entered with. This is why there is no care for 843 * the presence of add_wait_queue() or schedule() calls from within a couple 844 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 845 * The queueing and scheduling are always happening within xip_udelay(). 846 * 847 * Similarly, get_chip() and put_chip() just happen to always be executed 848 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 849 * is in array mode, therefore never executing many cases therein and not 850 * causing any problem with XIP. 851 */ 852 853 #else 854 855 #define xip_disable(map, chip, adr) 856 #define xip_enable(map, chip, adr) 857 #define XIP_INVAL_CACHED_RANGE(x...) 858 859 #define UDELAY(map, chip, adr, usec) \ 860 do { \ 861 spin_unlock(chip->mutex); \ 862 cfi_udelay(usec); \ 863 spin_lock(chip->mutex); \ 864 } while (0) 865 866 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 867 do { \ 868 spin_unlock(chip->mutex); \ 869 INVALIDATE_CACHED_RANGE(map, adr, len); \ 870 cfi_udelay(usec); \ 871 spin_lock(chip->mutex); \ 872 } while (0) 873 874 #endif 875 876 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 877 { 878 unsigned long cmd_addr; 879 struct cfi_private *cfi = map->fldrv_priv; 880 int ret; 881 882 adr += chip->start; 883 884 /* Ensure cmd read/writes are aligned. */ 885 cmd_addr = adr & ~(map_bankwidth(map)-1); 886 887 spin_lock(chip->mutex); 888 ret = get_chip(map, chip, cmd_addr, FL_READY); 889 if (ret) { 890 spin_unlock(chip->mutex); 891 return ret; 892 } 893 894 if (chip->state != FL_POINT && chip->state != FL_READY) { 895 map_write(map, CMD(0xf0), cmd_addr); 896 chip->state = FL_READY; 897 } 898 899 map_copy_from(map, buf, adr, len); 900 901 put_chip(map, chip, cmd_addr); 902 903 spin_unlock(chip->mutex); 904 return 0; 905 } 906 907 908 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 909 { 910 struct map_info *map = mtd->priv; 911 struct cfi_private *cfi = map->fldrv_priv; 912 unsigned long ofs; 913 int chipnum; 914 int ret = 0; 915 916 /* ofs: offset within the first chip that the first read should start */ 917 918 chipnum = (from >> cfi->chipshift); 919 ofs = from - (chipnum << cfi->chipshift); 920 921 922 *retlen = 0; 923 924 while (len) { 925 unsigned long thislen; 926 927 if (chipnum >= cfi->numchips) 928 break; 929 930 if ((len + ofs -1) >> cfi->chipshift) 931 thislen = (1<<cfi->chipshift) - ofs; 932 else 933 thislen = len; 934 935 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 936 if (ret) 937 break; 938 939 *retlen += thislen; 940 len -= thislen; 941 buf += thislen; 942 943 ofs = 0; 944 chipnum++; 945 } 946 return ret; 947 } 948 949 950 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 951 { 952 DECLARE_WAITQUEUE(wait, current); 953 unsigned long timeo = jiffies + HZ; 954 struct cfi_private *cfi = map->fldrv_priv; 955 956 retry: 957 spin_lock(chip->mutex); 958 959 if (chip->state != FL_READY){ 960 #if 0 961 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); 962 #endif 963 set_current_state(TASK_UNINTERRUPTIBLE); 964 add_wait_queue(&chip->wq, &wait); 965 966 spin_unlock(chip->mutex); 967 968 schedule(); 969 remove_wait_queue(&chip->wq, &wait); 970 #if 0 971 if(signal_pending(current)) 972 return -EINTR; 973 #endif 974 timeo = jiffies + HZ; 975 976 goto retry; 977 } 978 979 adr += chip->start; 980 981 chip->state = FL_READY; 982 983 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 984 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 985 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 986 987 map_copy_from(map, buf, adr, len); 988 989 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 990 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 991 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 992 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 993 994 wake_up(&chip->wq); 995 spin_unlock(chip->mutex); 996 997 return 0; 998 } 999 1000 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1001 { 1002 struct map_info *map = mtd->priv; 1003 struct cfi_private *cfi = map->fldrv_priv; 1004 unsigned long ofs; 1005 int chipnum; 1006 int ret = 0; 1007 1008 1009 /* ofs: offset within the first chip that the first read should start */ 1010 1011 /* 8 secsi bytes per chip */ 1012 chipnum=from>>3; 1013 ofs=from & 7; 1014 1015 1016 *retlen = 0; 1017 1018 while (len) { 1019 unsigned long thislen; 1020 1021 if (chipnum >= cfi->numchips) 1022 break; 1023 1024 if ((len + ofs -1) >> 3) 1025 thislen = (1<<3) - ofs; 1026 else 1027 thislen = len; 1028 1029 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1030 if (ret) 1031 break; 1032 1033 *retlen += thislen; 1034 len -= thislen; 1035 buf += thislen; 1036 1037 ofs = 0; 1038 chipnum++; 1039 } 1040 return ret; 1041 } 1042 1043 1044 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1045 { 1046 struct cfi_private *cfi = map->fldrv_priv; 1047 unsigned long timeo = jiffies + HZ; 1048 /* 1049 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1050 * have a max write time of a few hundreds usec). However, we should 1051 * use the maximum timeout value given by the chip at probe time 1052 * instead. Unfortunately, struct flchip does have a field for 1053 * maximum timeout, only for typical which can be far too short 1054 * depending of the conditions. The ' + 1' is to avoid having a 1055 * timeout of 0 jiffies if HZ is smaller than 1000. 1056 */ 1057 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1058 int ret = 0; 1059 map_word oldd; 1060 int retry_cnt = 0; 1061 1062 adr += chip->start; 1063 1064 spin_lock(chip->mutex); 1065 ret = get_chip(map, chip, adr, FL_WRITING); 1066 if (ret) { 1067 spin_unlock(chip->mutex); 1068 return ret; 1069 } 1070 1071 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1072 __func__, adr, datum.x[0] ); 1073 1074 /* 1075 * Check for a NOP for the case when the datum to write is already 1076 * present - it saves time and works around buggy chips that corrupt 1077 * data at other locations when 0xff is written to a location that 1078 * already contains 0xff. 1079 */ 1080 oldd = map_read(map, adr); 1081 if (map_word_equal(map, oldd, datum)) { 1082 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", 1083 __func__); 1084 goto op_done; 1085 } 1086 1087 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1088 ENABLE_VPP(map); 1089 xip_disable(map, chip, adr); 1090 retry: 1091 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1092 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1093 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1094 map_write(map, datum, adr); 1095 chip->state = FL_WRITING; 1096 1097 INVALIDATE_CACHE_UDELAY(map, chip, 1098 adr, map_bankwidth(map), 1099 chip->word_write_time); 1100 1101 /* See comment above for timeout value. */ 1102 timeo = jiffies + uWriteTimeout; 1103 for (;;) { 1104 if (chip->state != FL_WRITING) { 1105 /* Someone's suspended the write. Sleep */ 1106 DECLARE_WAITQUEUE(wait, current); 1107 1108 set_current_state(TASK_UNINTERRUPTIBLE); 1109 add_wait_queue(&chip->wq, &wait); 1110 spin_unlock(chip->mutex); 1111 schedule(); 1112 remove_wait_queue(&chip->wq, &wait); 1113 timeo = jiffies + (HZ / 2); /* FIXME */ 1114 spin_lock(chip->mutex); 1115 continue; 1116 } 1117 1118 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1119 xip_enable(map, chip, adr); 1120 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1121 xip_disable(map, chip, adr); 1122 break; 1123 } 1124 1125 if (chip_ready(map, adr)) 1126 break; 1127 1128 /* Latency issues. Drop the lock, wait a while and retry */ 1129 UDELAY(map, chip, adr, 1); 1130 } 1131 /* Did we succeed? */ 1132 if (!chip_good(map, adr, datum)) { 1133 /* reset on all failures. */ 1134 map_write( map, CMD(0xF0), chip->start ); 1135 /* FIXME - should have reset delay before continuing */ 1136 1137 if (++retry_cnt <= MAX_WORD_RETRIES) 1138 goto retry; 1139 1140 ret = -EIO; 1141 } 1142 xip_enable(map, chip, adr); 1143 op_done: 1144 chip->state = FL_READY; 1145 put_chip(map, chip, adr); 1146 spin_unlock(chip->mutex); 1147 1148 return ret; 1149 } 1150 1151 1152 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1153 size_t *retlen, const u_char *buf) 1154 { 1155 struct map_info *map = mtd->priv; 1156 struct cfi_private *cfi = map->fldrv_priv; 1157 int ret = 0; 1158 int chipnum; 1159 unsigned long ofs, chipstart; 1160 DECLARE_WAITQUEUE(wait, current); 1161 1162 *retlen = 0; 1163 if (!len) 1164 return 0; 1165 1166 chipnum = to >> cfi->chipshift; 1167 ofs = to - (chipnum << cfi->chipshift); 1168 chipstart = cfi->chips[chipnum].start; 1169 1170 /* If it's not bus-aligned, do the first byte write */ 1171 if (ofs & (map_bankwidth(map)-1)) { 1172 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1173 int i = ofs - bus_ofs; 1174 int n = 0; 1175 map_word tmp_buf; 1176 1177 retry: 1178 spin_lock(cfi->chips[chipnum].mutex); 1179 1180 if (cfi->chips[chipnum].state != FL_READY) { 1181 #if 0 1182 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); 1183 #endif 1184 set_current_state(TASK_UNINTERRUPTIBLE); 1185 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1186 1187 spin_unlock(cfi->chips[chipnum].mutex); 1188 1189 schedule(); 1190 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1191 #if 0 1192 if(signal_pending(current)) 1193 return -EINTR; 1194 #endif 1195 goto retry; 1196 } 1197 1198 /* Load 'tmp_buf' with old contents of flash */ 1199 tmp_buf = map_read(map, bus_ofs+chipstart); 1200 1201 spin_unlock(cfi->chips[chipnum].mutex); 1202 1203 /* Number of bytes to copy from buffer */ 1204 n = min_t(int, len, map_bankwidth(map)-i); 1205 1206 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1207 1208 ret = do_write_oneword(map, &cfi->chips[chipnum], 1209 bus_ofs, tmp_buf); 1210 if (ret) 1211 return ret; 1212 1213 ofs += n; 1214 buf += n; 1215 (*retlen) += n; 1216 len -= n; 1217 1218 if (ofs >> cfi->chipshift) { 1219 chipnum ++; 1220 ofs = 0; 1221 if (chipnum == cfi->numchips) 1222 return 0; 1223 } 1224 } 1225 1226 /* We are now aligned, write as much as possible */ 1227 while(len >= map_bankwidth(map)) { 1228 map_word datum; 1229 1230 datum = map_word_load(map, buf); 1231 1232 ret = do_write_oneword(map, &cfi->chips[chipnum], 1233 ofs, datum); 1234 if (ret) 1235 return ret; 1236 1237 ofs += map_bankwidth(map); 1238 buf += map_bankwidth(map); 1239 (*retlen) += map_bankwidth(map); 1240 len -= map_bankwidth(map); 1241 1242 if (ofs >> cfi->chipshift) { 1243 chipnum ++; 1244 ofs = 0; 1245 if (chipnum == cfi->numchips) 1246 return 0; 1247 chipstart = cfi->chips[chipnum].start; 1248 } 1249 } 1250 1251 /* Write the trailing bytes if any */ 1252 if (len & (map_bankwidth(map)-1)) { 1253 map_word tmp_buf; 1254 1255 retry1: 1256 spin_lock(cfi->chips[chipnum].mutex); 1257 1258 if (cfi->chips[chipnum].state != FL_READY) { 1259 #if 0 1260 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); 1261 #endif 1262 set_current_state(TASK_UNINTERRUPTIBLE); 1263 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1264 1265 spin_unlock(cfi->chips[chipnum].mutex); 1266 1267 schedule(); 1268 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1269 #if 0 1270 if(signal_pending(current)) 1271 return -EINTR; 1272 #endif 1273 goto retry1; 1274 } 1275 1276 tmp_buf = map_read(map, ofs + chipstart); 1277 1278 spin_unlock(cfi->chips[chipnum].mutex); 1279 1280 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1281 1282 ret = do_write_oneword(map, &cfi->chips[chipnum], 1283 ofs, tmp_buf); 1284 if (ret) 1285 return ret; 1286 1287 (*retlen) += len; 1288 } 1289 1290 return 0; 1291 } 1292 1293 1294 /* 1295 * FIXME: interleaved mode not tested, and probably not supported! 1296 */ 1297 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1298 unsigned long adr, const u_char *buf, 1299 int len) 1300 { 1301 struct cfi_private *cfi = map->fldrv_priv; 1302 unsigned long timeo = jiffies + HZ; 1303 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1304 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1305 int ret = -EIO; 1306 unsigned long cmd_adr; 1307 int z, words; 1308 map_word datum; 1309 1310 adr += chip->start; 1311 cmd_adr = adr; 1312 1313 spin_lock(chip->mutex); 1314 ret = get_chip(map, chip, adr, FL_WRITING); 1315 if (ret) { 1316 spin_unlock(chip->mutex); 1317 return ret; 1318 } 1319 1320 datum = map_word_load(map, buf); 1321 1322 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1323 __func__, adr, datum.x[0] ); 1324 1325 XIP_INVAL_CACHED_RANGE(map, adr, len); 1326 ENABLE_VPP(map); 1327 xip_disable(map, chip, cmd_adr); 1328 1329 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1330 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1331 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1332 1333 /* Write Buffer Load */ 1334 map_write(map, CMD(0x25), cmd_adr); 1335 1336 chip->state = FL_WRITING_TO_BUFFER; 1337 1338 /* Write length of data to come */ 1339 words = len / map_bankwidth(map); 1340 map_write(map, CMD(words - 1), cmd_adr); 1341 /* Write data */ 1342 z = 0; 1343 while(z < words * map_bankwidth(map)) { 1344 datum = map_word_load(map, buf); 1345 map_write(map, datum, adr + z); 1346 1347 z += map_bankwidth(map); 1348 buf += map_bankwidth(map); 1349 } 1350 z -= map_bankwidth(map); 1351 1352 adr += z; 1353 1354 /* Write Buffer Program Confirm: GO GO GO */ 1355 map_write(map, CMD(0x29), cmd_adr); 1356 chip->state = FL_WRITING; 1357 1358 INVALIDATE_CACHE_UDELAY(map, chip, 1359 adr, map_bankwidth(map), 1360 chip->word_write_time); 1361 1362 timeo = jiffies + uWriteTimeout; 1363 1364 for (;;) { 1365 if (chip->state != FL_WRITING) { 1366 /* Someone's suspended the write. Sleep */ 1367 DECLARE_WAITQUEUE(wait, current); 1368 1369 set_current_state(TASK_UNINTERRUPTIBLE); 1370 add_wait_queue(&chip->wq, &wait); 1371 spin_unlock(chip->mutex); 1372 schedule(); 1373 remove_wait_queue(&chip->wq, &wait); 1374 timeo = jiffies + (HZ / 2); /* FIXME */ 1375 spin_lock(chip->mutex); 1376 continue; 1377 } 1378 1379 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1380 break; 1381 1382 if (chip_ready(map, adr)) { 1383 xip_enable(map, chip, adr); 1384 goto op_done; 1385 } 1386 1387 /* Latency issues. Drop the lock, wait a while and retry */ 1388 UDELAY(map, chip, adr, 1); 1389 } 1390 1391 /* reset on all failures. */ 1392 map_write( map, CMD(0xF0), chip->start ); 1393 xip_enable(map, chip, adr); 1394 /* FIXME - should have reset delay before continuing */ 1395 1396 printk(KERN_WARNING "MTD %s(): software timeout\n", 1397 __func__ ); 1398 1399 ret = -EIO; 1400 op_done: 1401 chip->state = FL_READY; 1402 put_chip(map, chip, adr); 1403 spin_unlock(chip->mutex); 1404 1405 return ret; 1406 } 1407 1408 1409 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1410 size_t *retlen, const u_char *buf) 1411 { 1412 struct map_info *map = mtd->priv; 1413 struct cfi_private *cfi = map->fldrv_priv; 1414 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1415 int ret = 0; 1416 int chipnum; 1417 unsigned long ofs; 1418 1419 *retlen = 0; 1420 if (!len) 1421 return 0; 1422 1423 chipnum = to >> cfi->chipshift; 1424 ofs = to - (chipnum << cfi->chipshift); 1425 1426 /* If it's not bus-aligned, do the first word write */ 1427 if (ofs & (map_bankwidth(map)-1)) { 1428 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1429 if (local_len > len) 1430 local_len = len; 1431 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1432 local_len, retlen, buf); 1433 if (ret) 1434 return ret; 1435 ofs += local_len; 1436 buf += local_len; 1437 len -= local_len; 1438 1439 if (ofs >> cfi->chipshift) { 1440 chipnum ++; 1441 ofs = 0; 1442 if (chipnum == cfi->numchips) 1443 return 0; 1444 } 1445 } 1446 1447 /* Write buffer is worth it only if more than one word to write... */ 1448 while (len >= map_bankwidth(map) * 2) { 1449 /* We must not cross write block boundaries */ 1450 int size = wbufsize - (ofs & (wbufsize-1)); 1451 1452 if (size > len) 1453 size = len; 1454 if (size % map_bankwidth(map)) 1455 size -= size % map_bankwidth(map); 1456 1457 ret = do_write_buffer(map, &cfi->chips[chipnum], 1458 ofs, buf, size); 1459 if (ret) 1460 return ret; 1461 1462 ofs += size; 1463 buf += size; 1464 (*retlen) += size; 1465 len -= size; 1466 1467 if (ofs >> cfi->chipshift) { 1468 chipnum ++; 1469 ofs = 0; 1470 if (chipnum == cfi->numchips) 1471 return 0; 1472 } 1473 } 1474 1475 if (len) { 1476 size_t retlen_dregs = 0; 1477 1478 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1479 len, &retlen_dregs, buf); 1480 1481 *retlen += retlen_dregs; 1482 return ret; 1483 } 1484 1485 return 0; 1486 } 1487 1488 1489 /* 1490 * Handle devices with one erase region, that only implement 1491 * the chip erase command. 1492 */ 1493 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1494 { 1495 struct cfi_private *cfi = map->fldrv_priv; 1496 unsigned long timeo = jiffies + HZ; 1497 unsigned long int adr; 1498 DECLARE_WAITQUEUE(wait, current); 1499 int ret = 0; 1500 1501 adr = cfi->addr_unlock1; 1502 1503 spin_lock(chip->mutex); 1504 ret = get_chip(map, chip, adr, FL_WRITING); 1505 if (ret) { 1506 spin_unlock(chip->mutex); 1507 return ret; 1508 } 1509 1510 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1511 __func__, chip->start ); 1512 1513 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1514 ENABLE_VPP(map); 1515 xip_disable(map, chip, adr); 1516 1517 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1518 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1519 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1520 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1521 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1522 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1523 1524 chip->state = FL_ERASING; 1525 chip->erase_suspended = 0; 1526 chip->in_progress_block_addr = adr; 1527 1528 INVALIDATE_CACHE_UDELAY(map, chip, 1529 adr, map->size, 1530 chip->erase_time*500); 1531 1532 timeo = jiffies + (HZ*20); 1533 1534 for (;;) { 1535 if (chip->state != FL_ERASING) { 1536 /* Someone's suspended the erase. Sleep */ 1537 set_current_state(TASK_UNINTERRUPTIBLE); 1538 add_wait_queue(&chip->wq, &wait); 1539 spin_unlock(chip->mutex); 1540 schedule(); 1541 remove_wait_queue(&chip->wq, &wait); 1542 spin_lock(chip->mutex); 1543 continue; 1544 } 1545 if (chip->erase_suspended) { 1546 /* This erase was suspended and resumed. 1547 Adjust the timeout */ 1548 timeo = jiffies + (HZ*20); /* FIXME */ 1549 chip->erase_suspended = 0; 1550 } 1551 1552 if (chip_ready(map, adr)) 1553 break; 1554 1555 if (time_after(jiffies, timeo)) { 1556 printk(KERN_WARNING "MTD %s(): software timeout\n", 1557 __func__ ); 1558 break; 1559 } 1560 1561 /* Latency issues. Drop the lock, wait a while and retry */ 1562 UDELAY(map, chip, adr, 1000000/HZ); 1563 } 1564 /* Did we succeed? */ 1565 if (!chip_good(map, adr, map_word_ff(map))) { 1566 /* reset on all failures. */ 1567 map_write( map, CMD(0xF0), chip->start ); 1568 /* FIXME - should have reset delay before continuing */ 1569 1570 ret = -EIO; 1571 } 1572 1573 chip->state = FL_READY; 1574 xip_enable(map, chip, adr); 1575 put_chip(map, chip, adr); 1576 spin_unlock(chip->mutex); 1577 1578 return ret; 1579 } 1580 1581 1582 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1583 { 1584 struct cfi_private *cfi = map->fldrv_priv; 1585 unsigned long timeo = jiffies + HZ; 1586 DECLARE_WAITQUEUE(wait, current); 1587 int ret = 0; 1588 1589 adr += chip->start; 1590 1591 spin_lock(chip->mutex); 1592 ret = get_chip(map, chip, adr, FL_ERASING); 1593 if (ret) { 1594 spin_unlock(chip->mutex); 1595 return ret; 1596 } 1597 1598 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1599 __func__, adr ); 1600 1601 XIP_INVAL_CACHED_RANGE(map, adr, len); 1602 ENABLE_VPP(map); 1603 xip_disable(map, chip, adr); 1604 1605 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1606 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1607 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1608 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1609 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1610 map_write(map, CMD(0x30), adr); 1611 1612 chip->state = FL_ERASING; 1613 chip->erase_suspended = 0; 1614 chip->in_progress_block_addr = adr; 1615 1616 INVALIDATE_CACHE_UDELAY(map, chip, 1617 adr, len, 1618 chip->erase_time*500); 1619 1620 timeo = jiffies + (HZ*20); 1621 1622 for (;;) { 1623 if (chip->state != FL_ERASING) { 1624 /* Someone's suspended the erase. Sleep */ 1625 set_current_state(TASK_UNINTERRUPTIBLE); 1626 add_wait_queue(&chip->wq, &wait); 1627 spin_unlock(chip->mutex); 1628 schedule(); 1629 remove_wait_queue(&chip->wq, &wait); 1630 spin_lock(chip->mutex); 1631 continue; 1632 } 1633 if (chip->erase_suspended) { 1634 /* This erase was suspended and resumed. 1635 Adjust the timeout */ 1636 timeo = jiffies + (HZ*20); /* FIXME */ 1637 chip->erase_suspended = 0; 1638 } 1639 1640 if (chip_ready(map, adr)) { 1641 xip_enable(map, chip, adr); 1642 break; 1643 } 1644 1645 if (time_after(jiffies, timeo)) { 1646 xip_enable(map, chip, adr); 1647 printk(KERN_WARNING "MTD %s(): software timeout\n", 1648 __func__ ); 1649 break; 1650 } 1651 1652 /* Latency issues. Drop the lock, wait a while and retry */ 1653 UDELAY(map, chip, adr, 1000000/HZ); 1654 } 1655 /* Did we succeed? */ 1656 if (!chip_good(map, adr, map_word_ff(map))) { 1657 /* reset on all failures. */ 1658 map_write( map, CMD(0xF0), chip->start ); 1659 /* FIXME - should have reset delay before continuing */ 1660 1661 ret = -EIO; 1662 } 1663 1664 chip->state = FL_READY; 1665 put_chip(map, chip, adr); 1666 spin_unlock(chip->mutex); 1667 return ret; 1668 } 1669 1670 1671 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1672 { 1673 unsigned long ofs, len; 1674 int ret; 1675 1676 ofs = instr->addr; 1677 len = instr->len; 1678 1679 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 1680 if (ret) 1681 return ret; 1682 1683 instr->state = MTD_ERASE_DONE; 1684 mtd_erase_callback(instr); 1685 1686 return 0; 1687 } 1688 1689 1690 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 1691 { 1692 struct map_info *map = mtd->priv; 1693 struct cfi_private *cfi = map->fldrv_priv; 1694 int ret = 0; 1695 1696 if (instr->addr != 0) 1697 return -EINVAL; 1698 1699 if (instr->len != mtd->size) 1700 return -EINVAL; 1701 1702 ret = do_erase_chip(map, &cfi->chips[0]); 1703 if (ret) 1704 return ret; 1705 1706 instr->state = MTD_ERASE_DONE; 1707 mtd_erase_callback(instr); 1708 1709 return 0; 1710 } 1711 1712 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 1713 unsigned long adr, int len, void *thunk) 1714 { 1715 struct cfi_private *cfi = map->fldrv_priv; 1716 int ret; 1717 1718 spin_lock(chip->mutex); 1719 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1720 if (ret) 1721 goto out_unlock; 1722 chip->state = FL_LOCKING; 1723 1724 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1725 __func__, adr, len); 1726 1727 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1728 cfi->device_type, NULL); 1729 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1730 cfi->device_type, NULL); 1731 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 1732 cfi->device_type, NULL); 1733 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1734 cfi->device_type, NULL); 1735 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1736 cfi->device_type, NULL); 1737 map_write(map, CMD(0x40), chip->start + adr); 1738 1739 chip->state = FL_READY; 1740 put_chip(map, chip, adr + chip->start); 1741 ret = 0; 1742 1743 out_unlock: 1744 spin_unlock(chip->mutex); 1745 return ret; 1746 } 1747 1748 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 1749 unsigned long adr, int len, void *thunk) 1750 { 1751 struct cfi_private *cfi = map->fldrv_priv; 1752 int ret; 1753 1754 spin_lock(chip->mutex); 1755 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1756 if (ret) 1757 goto out_unlock; 1758 chip->state = FL_UNLOCKING; 1759 1760 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1761 __func__, adr, len); 1762 1763 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1764 cfi->device_type, NULL); 1765 map_write(map, CMD(0x70), adr); 1766 1767 chip->state = FL_READY; 1768 put_chip(map, chip, adr + chip->start); 1769 ret = 0; 1770 1771 out_unlock: 1772 spin_unlock(chip->mutex); 1773 return ret; 1774 } 1775 1776 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1777 { 1778 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1779 } 1780 1781 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1782 { 1783 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1784 } 1785 1786 1787 static void cfi_amdstd_sync (struct mtd_info *mtd) 1788 { 1789 struct map_info *map = mtd->priv; 1790 struct cfi_private *cfi = map->fldrv_priv; 1791 int i; 1792 struct flchip *chip; 1793 int ret = 0; 1794 DECLARE_WAITQUEUE(wait, current); 1795 1796 for (i=0; !ret && i<cfi->numchips; i++) { 1797 chip = &cfi->chips[i]; 1798 1799 retry: 1800 spin_lock(chip->mutex); 1801 1802 switch(chip->state) { 1803 case FL_READY: 1804 case FL_STATUS: 1805 case FL_CFI_QUERY: 1806 case FL_JEDEC_QUERY: 1807 chip->oldstate = chip->state; 1808 chip->state = FL_SYNCING; 1809 /* No need to wake_up() on this state change - 1810 * as the whole point is that nobody can do anything 1811 * with the chip now anyway. 1812 */ 1813 case FL_SYNCING: 1814 spin_unlock(chip->mutex); 1815 break; 1816 1817 default: 1818 /* Not an idle state */ 1819 set_current_state(TASK_UNINTERRUPTIBLE); 1820 add_wait_queue(&chip->wq, &wait); 1821 1822 spin_unlock(chip->mutex); 1823 1824 schedule(); 1825 1826 remove_wait_queue(&chip->wq, &wait); 1827 1828 goto retry; 1829 } 1830 } 1831 1832 /* Unlock the chips again */ 1833 1834 for (i--; i >=0; i--) { 1835 chip = &cfi->chips[i]; 1836 1837 spin_lock(chip->mutex); 1838 1839 if (chip->state == FL_SYNCING) { 1840 chip->state = chip->oldstate; 1841 wake_up(&chip->wq); 1842 } 1843 spin_unlock(chip->mutex); 1844 } 1845 } 1846 1847 1848 static int cfi_amdstd_suspend(struct mtd_info *mtd) 1849 { 1850 struct map_info *map = mtd->priv; 1851 struct cfi_private *cfi = map->fldrv_priv; 1852 int i; 1853 struct flchip *chip; 1854 int ret = 0; 1855 1856 for (i=0; !ret && i<cfi->numchips; i++) { 1857 chip = &cfi->chips[i]; 1858 1859 spin_lock(chip->mutex); 1860 1861 switch(chip->state) { 1862 case FL_READY: 1863 case FL_STATUS: 1864 case FL_CFI_QUERY: 1865 case FL_JEDEC_QUERY: 1866 chip->oldstate = chip->state; 1867 chip->state = FL_PM_SUSPENDED; 1868 /* No need to wake_up() on this state change - 1869 * as the whole point is that nobody can do anything 1870 * with the chip now anyway. 1871 */ 1872 case FL_PM_SUSPENDED: 1873 break; 1874 1875 default: 1876 ret = -EAGAIN; 1877 break; 1878 } 1879 spin_unlock(chip->mutex); 1880 } 1881 1882 /* Unlock the chips again */ 1883 1884 if (ret) { 1885 for (i--; i >=0; i--) { 1886 chip = &cfi->chips[i]; 1887 1888 spin_lock(chip->mutex); 1889 1890 if (chip->state == FL_PM_SUSPENDED) { 1891 chip->state = chip->oldstate; 1892 wake_up(&chip->wq); 1893 } 1894 spin_unlock(chip->mutex); 1895 } 1896 } 1897 1898 return ret; 1899 } 1900 1901 1902 static void cfi_amdstd_resume(struct mtd_info *mtd) 1903 { 1904 struct map_info *map = mtd->priv; 1905 struct cfi_private *cfi = map->fldrv_priv; 1906 int i; 1907 struct flchip *chip; 1908 1909 for (i=0; i<cfi->numchips; i++) { 1910 1911 chip = &cfi->chips[i]; 1912 1913 spin_lock(chip->mutex); 1914 1915 if (chip->state == FL_PM_SUSPENDED) { 1916 chip->state = FL_READY; 1917 map_write(map, CMD(0xF0), chip->start); 1918 wake_up(&chip->wq); 1919 } 1920 else 1921 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1922 1923 spin_unlock(chip->mutex); 1924 } 1925 } 1926 1927 static void cfi_amdstd_destroy(struct mtd_info *mtd) 1928 { 1929 struct map_info *map = mtd->priv; 1930 struct cfi_private *cfi = map->fldrv_priv; 1931 1932 kfree(cfi->cmdset_priv); 1933 kfree(cfi->cfiq); 1934 kfree(cfi); 1935 kfree(mtd->eraseregions); 1936 } 1937 1938 MODULE_LICENSE("GPL"); 1939 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 1940 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 1941