1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <asm/io.h> 28 #include <asm/byteorder.h> 29 30 #include <linux/errno.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/interrupt.h> 34 #include <linux/reboot.h> 35 #include <linux/of.h> 36 #include <linux/of_platform.h> 37 #include <linux/mtd/map.h> 38 #include <linux/mtd/mtd.h> 39 #include <linux/mtd/cfi.h> 40 #include <linux/mtd/xip.h> 41 42 #define AMD_BOOTLOC_BUG 43 #define FORCE_WORD_WRITE 0 44 45 #define MAX_WORD_RETRIES 3 46 47 #define SST49LF004B 0x0060 48 #define SST49LF040B 0x0050 49 #define SST49LF008A 0x005a 50 #define AT49BV6416 0x00d6 51 52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 57 static void cfi_amdstd_sync (struct mtd_info *); 58 static int cfi_amdstd_suspend (struct mtd_info *); 59 static void cfi_amdstd_resume (struct mtd_info *); 60 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 61 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, 62 size_t *, struct otp_info *); 63 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, 64 size_t *, struct otp_info *); 65 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 66 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, 67 size_t *, u_char *); 68 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, 69 size_t *, u_char *); 70 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, 71 size_t *, u_char *); 72 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); 73 74 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 75 size_t *retlen, const u_char *buf); 76 77 static void cfi_amdstd_destroy(struct mtd_info *); 78 79 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 80 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 81 82 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 83 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 84 #include "fwh_lock.h" 85 86 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 87 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 88 89 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 90 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 91 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 92 93 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 94 .probe = NULL, /* Not usable directly */ 95 .destroy = cfi_amdstd_destroy, 96 .name = "cfi_cmdset_0002", 97 .module = THIS_MODULE 98 }; 99 100 101 /* #define DEBUG_CFI_FEATURES */ 102 103 104 #ifdef DEBUG_CFI_FEATURES 105 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 106 { 107 const char* erase_suspend[3] = { 108 "Not supported", "Read only", "Read/write" 109 }; 110 const char* top_bottom[6] = { 111 "No WP", "8x8KiB sectors at top & bottom, no WP", 112 "Bottom boot", "Top boot", 113 "Uniform, Bottom WP", "Uniform, Top WP" 114 }; 115 116 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 117 printk(" Address sensitive unlock: %s\n", 118 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 119 120 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 121 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 122 else 123 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 124 125 if (extp->BlkProt == 0) 126 printk(" Block protection: Not supported\n"); 127 else 128 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 129 130 131 printk(" Temporary block unprotect: %s\n", 132 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 133 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 134 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 135 printk(" Burst mode: %s\n", 136 extp->BurstMode ? "Supported" : "Not supported"); 137 if (extp->PageMode == 0) 138 printk(" Page mode: Not supported\n"); 139 else 140 printk(" Page mode: %d word page\n", extp->PageMode << 2); 141 142 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 143 extp->VppMin >> 4, extp->VppMin & 0xf); 144 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 145 extp->VppMax >> 4, extp->VppMax & 0xf); 146 147 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 148 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 149 else 150 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 151 } 152 #endif 153 154 #ifdef AMD_BOOTLOC_BUG 155 /* Wheee. Bring me the head of someone at AMD. */ 156 static void fixup_amd_bootblock(struct mtd_info *mtd) 157 { 158 struct map_info *map = mtd->priv; 159 struct cfi_private *cfi = map->fldrv_priv; 160 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 161 __u8 major = extp->MajorVersion; 162 __u8 minor = extp->MinorVersion; 163 164 if (((major << 8) | minor) < 0x3131) { 165 /* CFI version 1.0 => don't trust bootloc */ 166 167 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 168 map->name, cfi->mfr, cfi->id); 169 170 /* AFAICS all 29LV400 with a bottom boot block have a device ID 171 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 172 * These were badly detected as they have the 0x80 bit set 173 * so treat them as a special case. 174 */ 175 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 176 177 /* Macronix added CFI to their 2nd generation 178 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 179 * Fujitsu, Spansion, EON, ESI and older Macronix) 180 * has CFI. 181 * 182 * Therefore also check the manufacturer. 183 * This reduces the risk of false detection due to 184 * the 8-bit device ID. 185 */ 186 (cfi->mfr == CFI_MFR_MACRONIX)) { 187 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 188 " detected\n", map->name); 189 extp->TopBottom = 2; /* bottom boot */ 190 } else 191 if (cfi->id & 0x80) { 192 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 193 extp->TopBottom = 3; /* top boot */ 194 } else { 195 extp->TopBottom = 2; /* bottom boot */ 196 } 197 198 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 199 " deduced %s from Device ID\n", map->name, major, minor, 200 extp->TopBottom == 2 ? "bottom" : "top"); 201 } 202 } 203 #endif 204 205 static void fixup_use_write_buffers(struct mtd_info *mtd) 206 { 207 struct map_info *map = mtd->priv; 208 struct cfi_private *cfi = map->fldrv_priv; 209 if (cfi->cfiq->BufWriteTimeoutTyp) { 210 pr_debug("Using buffer write method\n" ); 211 mtd->_write = cfi_amdstd_write_buffers; 212 } 213 } 214 215 /* Atmel chips don't use the same PRI format as AMD chips */ 216 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 217 { 218 struct map_info *map = mtd->priv; 219 struct cfi_private *cfi = map->fldrv_priv; 220 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 221 struct cfi_pri_atmel atmel_pri; 222 223 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 224 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 225 226 if (atmel_pri.Features & 0x02) 227 extp->EraseSuspend = 2; 228 229 /* Some chips got it backwards... */ 230 if (cfi->id == AT49BV6416) { 231 if (atmel_pri.BottomBoot) 232 extp->TopBottom = 3; 233 else 234 extp->TopBottom = 2; 235 } else { 236 if (atmel_pri.BottomBoot) 237 extp->TopBottom = 2; 238 else 239 extp->TopBottom = 3; 240 } 241 242 /* burst write mode not supported */ 243 cfi->cfiq->BufWriteTimeoutTyp = 0; 244 cfi->cfiq->BufWriteTimeoutMax = 0; 245 } 246 247 static void fixup_use_secsi(struct mtd_info *mtd) 248 { 249 /* Setup for chips with a secsi area */ 250 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 251 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 252 } 253 254 static void fixup_use_erase_chip(struct mtd_info *mtd) 255 { 256 struct map_info *map = mtd->priv; 257 struct cfi_private *cfi = map->fldrv_priv; 258 if ((cfi->cfiq->NumEraseRegions == 1) && 259 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 260 mtd->_erase = cfi_amdstd_erase_chip; 261 } 262 263 } 264 265 /* 266 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 267 * locked by default. 268 */ 269 static void fixup_use_atmel_lock(struct mtd_info *mtd) 270 { 271 mtd->_lock = cfi_atmel_lock; 272 mtd->_unlock = cfi_atmel_unlock; 273 mtd->flags |= MTD_POWERUP_LOCK; 274 } 275 276 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 277 { 278 struct map_info *map = mtd->priv; 279 struct cfi_private *cfi = map->fldrv_priv; 280 281 /* 282 * These flashes report two separate eraseblock regions based on the 283 * sector_erase-size and block_erase-size, although they both operate on the 284 * same memory. This is not allowed according to CFI, so we just pick the 285 * sector_erase-size. 286 */ 287 cfi->cfiq->NumEraseRegions = 1; 288 } 289 290 static void fixup_sst39vf(struct mtd_info *mtd) 291 { 292 struct map_info *map = mtd->priv; 293 struct cfi_private *cfi = map->fldrv_priv; 294 295 fixup_old_sst_eraseregion(mtd); 296 297 cfi->addr_unlock1 = 0x5555; 298 cfi->addr_unlock2 = 0x2AAA; 299 } 300 301 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 302 { 303 struct map_info *map = mtd->priv; 304 struct cfi_private *cfi = map->fldrv_priv; 305 306 fixup_old_sst_eraseregion(mtd); 307 308 cfi->addr_unlock1 = 0x555; 309 cfi->addr_unlock2 = 0x2AA; 310 311 cfi->sector_erase_cmd = CMD(0x50); 312 } 313 314 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 315 { 316 struct map_info *map = mtd->priv; 317 struct cfi_private *cfi = map->fldrv_priv; 318 319 fixup_sst39vf_rev_b(mtd); 320 321 /* 322 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 323 * it should report a size of 8KBytes (0x0020*256). 324 */ 325 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 326 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 327 } 328 329 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 330 { 331 struct map_info *map = mtd->priv; 332 struct cfi_private *cfi = map->fldrv_priv; 333 334 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 335 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 336 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); 337 } 338 } 339 340 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 341 { 342 struct map_info *map = mtd->priv; 343 struct cfi_private *cfi = map->fldrv_priv; 344 345 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 346 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 347 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); 348 } 349 } 350 351 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 352 { 353 struct map_info *map = mtd->priv; 354 struct cfi_private *cfi = map->fldrv_priv; 355 356 /* 357 * S29NS512P flash uses more than 8bits to report number of sectors, 358 * which is not permitted by CFI. 359 */ 360 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 361 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); 362 } 363 364 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 365 static struct cfi_fixup cfi_nopri_fixup_table[] = { 366 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 367 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 368 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 369 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 370 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 371 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 372 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 373 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 374 { 0, 0, NULL } 375 }; 376 377 static struct cfi_fixup cfi_fixup_table[] = { 378 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 379 #ifdef AMD_BOOTLOC_BUG 380 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 381 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 382 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 383 #endif 384 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 385 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 386 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 387 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 388 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 389 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 390 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 391 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 392 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 393 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 394 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 395 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 396 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 397 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 398 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 399 #if !FORCE_WORD_WRITE 400 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 401 #endif 402 { 0, 0, NULL } 403 }; 404 static struct cfi_fixup jedec_fixup_table[] = { 405 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 406 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 407 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 408 { 0, 0, NULL } 409 }; 410 411 static struct cfi_fixup fixup_table[] = { 412 /* The CFI vendor ids and the JEDEC vendor IDs appear 413 * to be common. It is like the devices id's are as 414 * well. This table is to pick all cases where 415 * we know that is the case. 416 */ 417 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 418 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 419 { 0, 0, NULL } 420 }; 421 422 423 static void cfi_fixup_major_minor(struct cfi_private *cfi, 424 struct cfi_pri_amdstd *extp) 425 { 426 if (cfi->mfr == CFI_MFR_SAMSUNG) { 427 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 428 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 429 /* 430 * Samsung K8P2815UQB and K8D6x16UxM chips 431 * report major=0 / minor=0. 432 * K8D3x16UxC chips report major=3 / minor=3. 433 */ 434 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 435 " Extended Query version to 1.%c\n", 436 extp->MinorVersion); 437 extp->MajorVersion = '1'; 438 } 439 } 440 441 /* 442 * SST 38VF640x chips report major=0xFF / minor=0xFF. 443 */ 444 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 445 extp->MajorVersion = '1'; 446 extp->MinorVersion = '0'; 447 } 448 } 449 450 static int is_m29ew(struct cfi_private *cfi) 451 { 452 if (cfi->mfr == CFI_MFR_INTEL && 453 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 454 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 455 return 1; 456 return 0; 457 } 458 459 /* 460 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 461 * Some revisions of the M29EW suffer from erase suspend hang ups. In 462 * particular, it can occur when the sequence 463 * Erase Confirm -> Suspend -> Program -> Resume 464 * causes a lockup due to internal timing issues. The consequence is that the 465 * erase cannot be resumed without inserting a dummy command after programming 466 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 467 * that writes an F0 command code before the RESUME command. 468 */ 469 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 470 unsigned long adr) 471 { 472 struct cfi_private *cfi = map->fldrv_priv; 473 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 474 if (is_m29ew(cfi)) 475 map_write(map, CMD(0xF0), adr); 476 } 477 478 /* 479 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 480 * 481 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 482 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 483 * command is issued after an ERASE RESUME operation without waiting for a 484 * minimum delay. The result is that once the ERASE seems to be completed 485 * (no bits are toggling), the contents of the Flash memory block on which 486 * the erase was ongoing could be inconsistent with the expected values 487 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 488 * values), causing a consequent failure of the ERASE operation. 489 * The occurrence of this issue could be high, especially when file system 490 * operations on the Flash are intensive. As a result, it is recommended 491 * that a patch be applied. Intensive file system operations can cause many 492 * calls to the garbage routine to free Flash space (also by erasing physical 493 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 494 * commands can occur. The problem disappears when a delay is inserted after 495 * the RESUME command by using the udelay() function available in Linux. 496 * The DELAY value must be tuned based on the customer's platform. 497 * The maximum value that fixes the problem in all cases is 500us. 498 * But, in our experience, a delay of 30 µs to 50 µs is sufficient 499 * in most cases. 500 * We have chosen 500µs because this latency is acceptable. 501 */ 502 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 503 { 504 /* 505 * Resolving the Delay After Resume Issue see Micron TN-13-07 506 * Worst case delay must be 500µs but 30-50µs should be ok as well 507 */ 508 if (is_m29ew(cfi)) 509 cfi_udelay(500); 510 } 511 512 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 513 { 514 struct cfi_private *cfi = map->fldrv_priv; 515 struct device_node __maybe_unused *np = map->device_node; 516 struct mtd_info *mtd; 517 int i; 518 519 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 520 if (!mtd) 521 return NULL; 522 mtd->priv = map; 523 mtd->type = MTD_NORFLASH; 524 525 /* Fill in the default mtd operations */ 526 mtd->_erase = cfi_amdstd_erase_varsize; 527 mtd->_write = cfi_amdstd_write_words; 528 mtd->_read = cfi_amdstd_read; 529 mtd->_sync = cfi_amdstd_sync; 530 mtd->_suspend = cfi_amdstd_suspend; 531 mtd->_resume = cfi_amdstd_resume; 532 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; 533 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; 534 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; 535 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; 536 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; 537 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; 538 mtd->flags = MTD_CAP_NORFLASH; 539 mtd->name = map->name; 540 mtd->writesize = 1; 541 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 542 543 pr_debug("MTD %s(): write buffer size %d\n", __func__, 544 mtd->writebufsize); 545 546 mtd->_panic_write = cfi_amdstd_panic_write; 547 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 548 549 if (cfi->cfi_mode==CFI_MODE_CFI){ 550 unsigned char bootloc; 551 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 552 struct cfi_pri_amdstd *extp; 553 554 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 555 if (extp) { 556 /* 557 * It's a real CFI chip, not one for which the probe 558 * routine faked a CFI structure. 559 */ 560 cfi_fixup_major_minor(cfi, extp); 561 562 /* 563 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 564 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 565 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 566 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 567 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 568 */ 569 if (extp->MajorVersion != '1' || 570 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 571 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 572 "version %c.%c (%#02x/%#02x).\n", 573 extp->MajorVersion, extp->MinorVersion, 574 extp->MajorVersion, extp->MinorVersion); 575 kfree(extp); 576 kfree(mtd); 577 return NULL; 578 } 579 580 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 581 extp->MajorVersion, extp->MinorVersion); 582 583 /* Install our own private info structure */ 584 cfi->cmdset_priv = extp; 585 586 /* Apply cfi device specific fixups */ 587 cfi_fixup(mtd, cfi_fixup_table); 588 589 #ifdef DEBUG_CFI_FEATURES 590 /* Tell the user about it in lots of lovely detail */ 591 cfi_tell_features(extp); 592 #endif 593 594 #ifdef CONFIG_OF 595 if (np && of_property_read_bool( 596 np, "use-advanced-sector-protection") 597 && extp->BlkProtUnprot == 8) { 598 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); 599 mtd->_lock = cfi_ppb_lock; 600 mtd->_unlock = cfi_ppb_unlock; 601 mtd->_is_locked = cfi_ppb_is_locked; 602 } 603 #endif 604 605 bootloc = extp->TopBottom; 606 if ((bootloc < 2) || (bootloc > 5)) { 607 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 608 "bank location (%d). Assuming bottom.\n", 609 map->name, bootloc); 610 bootloc = 2; 611 } 612 613 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 614 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 615 616 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 617 int j = (cfi->cfiq->NumEraseRegions-1)-i; 618 619 swap(cfi->cfiq->EraseRegionInfo[i], 620 cfi->cfiq->EraseRegionInfo[j]); 621 } 622 } 623 /* Set the default CFI lock/unlock addresses */ 624 cfi->addr_unlock1 = 0x555; 625 cfi->addr_unlock2 = 0x2aa; 626 } 627 cfi_fixup(mtd, cfi_nopri_fixup_table); 628 629 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 630 kfree(mtd); 631 return NULL; 632 } 633 634 } /* CFI mode */ 635 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 636 /* Apply jedec specific fixups */ 637 cfi_fixup(mtd, jedec_fixup_table); 638 } 639 /* Apply generic fixups */ 640 cfi_fixup(mtd, fixup_table); 641 642 for (i=0; i< cfi->numchips; i++) { 643 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 644 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 645 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 646 /* 647 * First calculate the timeout max according to timeout field 648 * of struct cfi_ident that probed from chip's CFI aera, if 649 * available. Specify a minimum of 2000us, in case the CFI data 650 * is wrong. 651 */ 652 if (cfi->cfiq->BufWriteTimeoutTyp && 653 cfi->cfiq->BufWriteTimeoutMax) 654 cfi->chips[i].buffer_write_time_max = 655 1 << (cfi->cfiq->BufWriteTimeoutTyp + 656 cfi->cfiq->BufWriteTimeoutMax); 657 else 658 cfi->chips[i].buffer_write_time_max = 0; 659 660 cfi->chips[i].buffer_write_time_max = 661 max(cfi->chips[i].buffer_write_time_max, 2000); 662 663 cfi->chips[i].ref_point_counter = 0; 664 init_waitqueue_head(&(cfi->chips[i].wq)); 665 } 666 667 map->fldrv = &cfi_amdstd_chipdrv; 668 669 return cfi_amdstd_setup(mtd); 670 } 671 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 672 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 673 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 674 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 675 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 676 677 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 678 { 679 struct map_info *map = mtd->priv; 680 struct cfi_private *cfi = map->fldrv_priv; 681 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 682 unsigned long offset = 0; 683 int i,j; 684 685 printk(KERN_NOTICE "number of %s chips: %d\n", 686 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 687 /* Select the correct geometry setup */ 688 mtd->size = devsize * cfi->numchips; 689 690 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 691 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 692 * mtd->numeraseregions, GFP_KERNEL); 693 if (!mtd->eraseregions) 694 goto setup_err; 695 696 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 697 unsigned long ernum, ersize; 698 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 699 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 700 701 if (mtd->erasesize < ersize) { 702 mtd->erasesize = ersize; 703 } 704 for (j=0; j<cfi->numchips; j++) { 705 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 706 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 707 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 708 } 709 offset += (ersize * ernum); 710 } 711 if (offset != devsize) { 712 /* Argh */ 713 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 714 goto setup_err; 715 } 716 717 __module_get(THIS_MODULE); 718 register_reboot_notifier(&mtd->reboot_notifier); 719 return mtd; 720 721 setup_err: 722 kfree(mtd->eraseregions); 723 kfree(mtd); 724 kfree(cfi->cmdset_priv); 725 kfree(cfi->cfiq); 726 return NULL; 727 } 728 729 /* 730 * Return true if the chip is ready. 731 * 732 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 733 * non-suspended sector) and is indicated by no toggle bits toggling. 734 * 735 * Note that anything more complicated than checking if no bits are toggling 736 * (including checking DQ5 for an error status) is tricky to get working 737 * correctly and is therefore not done (particularly with interleaved chips 738 * as each chip must be checked independently of the others). 739 */ 740 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 741 { 742 map_word d, t; 743 744 d = map_read(map, addr); 745 t = map_read(map, addr); 746 747 return map_word_equal(map, d, t); 748 } 749 750 /* 751 * Return true if the chip is ready and has the correct value. 752 * 753 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 754 * non-suspended sector) and it is indicated by no bits toggling. 755 * 756 * Error are indicated by toggling bits or bits held with the wrong value, 757 * or with bits toggling. 758 * 759 * Note that anything more complicated than checking if no bits are toggling 760 * (including checking DQ5 for an error status) is tricky to get working 761 * correctly and is therefore not done (particularly with interleaved chips 762 * as each chip must be checked independently of the others). 763 * 764 */ 765 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 766 { 767 map_word oldd, curd; 768 769 oldd = map_read(map, addr); 770 curd = map_read(map, addr); 771 772 return map_word_equal(map, oldd, curd) && 773 map_word_equal(map, curd, expected); 774 } 775 776 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 777 { 778 DECLARE_WAITQUEUE(wait, current); 779 struct cfi_private *cfi = map->fldrv_priv; 780 unsigned long timeo; 781 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 782 783 resettime: 784 timeo = jiffies + HZ; 785 retry: 786 switch (chip->state) { 787 788 case FL_STATUS: 789 for (;;) { 790 if (chip_ready(map, adr)) 791 break; 792 793 if (time_after(jiffies, timeo)) { 794 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 795 return -EIO; 796 } 797 mutex_unlock(&chip->mutex); 798 cfi_udelay(1); 799 mutex_lock(&chip->mutex); 800 /* Someone else might have been playing with it. */ 801 goto retry; 802 } 803 804 case FL_READY: 805 case FL_CFI_QUERY: 806 case FL_JEDEC_QUERY: 807 return 0; 808 809 case FL_ERASING: 810 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 811 !(mode == FL_READY || mode == FL_POINT || 812 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 813 goto sleep; 814 815 /* We could check to see if we're trying to access the sector 816 * that is currently being erased. However, no user will try 817 * anything like that so we just wait for the timeout. */ 818 819 /* Erase suspend */ 820 /* It's harmless to issue the Erase-Suspend and Erase-Resume 821 * commands when the erase algorithm isn't in progress. */ 822 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 823 chip->oldstate = FL_ERASING; 824 chip->state = FL_ERASE_SUSPENDING; 825 chip->erase_suspended = 1; 826 for (;;) { 827 if (chip_ready(map, adr)) 828 break; 829 830 if (time_after(jiffies, timeo)) { 831 /* Should have suspended the erase by now. 832 * Send an Erase-Resume command as either 833 * there was an error (so leave the erase 834 * routine to recover from it) or we trying to 835 * use the erase-in-progress sector. */ 836 put_chip(map, chip, adr); 837 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 838 return -EIO; 839 } 840 841 mutex_unlock(&chip->mutex); 842 cfi_udelay(1); 843 mutex_lock(&chip->mutex); 844 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 845 So we can just loop here. */ 846 } 847 chip->state = FL_READY; 848 return 0; 849 850 case FL_XIP_WHILE_ERASING: 851 if (mode != FL_READY && mode != FL_POINT && 852 (!cfip || !(cfip->EraseSuspend&2))) 853 goto sleep; 854 chip->oldstate = chip->state; 855 chip->state = FL_READY; 856 return 0; 857 858 case FL_SHUTDOWN: 859 /* The machine is rebooting */ 860 return -EIO; 861 862 case FL_POINT: 863 /* Only if there's no operation suspended... */ 864 if (mode == FL_READY && chip->oldstate == FL_READY) 865 return 0; 866 867 default: 868 sleep: 869 set_current_state(TASK_UNINTERRUPTIBLE); 870 add_wait_queue(&chip->wq, &wait); 871 mutex_unlock(&chip->mutex); 872 schedule(); 873 remove_wait_queue(&chip->wq, &wait); 874 mutex_lock(&chip->mutex); 875 goto resettime; 876 } 877 } 878 879 880 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 881 { 882 struct cfi_private *cfi = map->fldrv_priv; 883 884 switch(chip->oldstate) { 885 case FL_ERASING: 886 cfi_fixup_m29ew_erase_suspend(map, 887 chip->in_progress_block_addr); 888 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 889 cfi_fixup_m29ew_delay_after_resume(cfi); 890 chip->oldstate = FL_READY; 891 chip->state = FL_ERASING; 892 break; 893 894 case FL_XIP_WHILE_ERASING: 895 chip->state = chip->oldstate; 896 chip->oldstate = FL_READY; 897 break; 898 899 case FL_READY: 900 case FL_STATUS: 901 break; 902 default: 903 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 904 } 905 wake_up(&chip->wq); 906 } 907 908 #ifdef CONFIG_MTD_XIP 909 910 /* 911 * No interrupt what so ever can be serviced while the flash isn't in array 912 * mode. This is ensured by the xip_disable() and xip_enable() functions 913 * enclosing any code path where the flash is known not to be in array mode. 914 * And within a XIP disabled code path, only functions marked with __xipram 915 * may be called and nothing else (it's a good thing to inspect generated 916 * assembly to make sure inline functions were actually inlined and that gcc 917 * didn't emit calls to its own support functions). Also configuring MTD CFI 918 * support to a single buswidth and a single interleave is also recommended. 919 */ 920 921 static void xip_disable(struct map_info *map, struct flchip *chip, 922 unsigned long adr) 923 { 924 /* TODO: chips with no XIP use should ignore and return */ 925 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 926 local_irq_disable(); 927 } 928 929 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 930 unsigned long adr) 931 { 932 struct cfi_private *cfi = map->fldrv_priv; 933 934 if (chip->state != FL_POINT && chip->state != FL_READY) { 935 map_write(map, CMD(0xf0), adr); 936 chip->state = FL_READY; 937 } 938 (void) map_read(map, adr); 939 xip_iprefetch(); 940 local_irq_enable(); 941 } 942 943 /* 944 * When a delay is required for the flash operation to complete, the 945 * xip_udelay() function is polling for both the given timeout and pending 946 * (but still masked) hardware interrupts. Whenever there is an interrupt 947 * pending then the flash erase operation is suspended, array mode restored 948 * and interrupts unmasked. Task scheduling might also happen at that 949 * point. The CPU eventually returns from the interrupt or the call to 950 * schedule() and the suspended flash operation is resumed for the remaining 951 * of the delay period. 952 * 953 * Warning: this function _will_ fool interrupt latency tracing tools. 954 */ 955 956 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 957 unsigned long adr, int usec) 958 { 959 struct cfi_private *cfi = map->fldrv_priv; 960 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 961 map_word status, OK = CMD(0x80); 962 unsigned long suspended, start = xip_currtime(); 963 flstate_t oldstate; 964 965 do { 966 cpu_relax(); 967 if (xip_irqpending() && extp && 968 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 969 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 970 /* 971 * Let's suspend the erase operation when supported. 972 * Note that we currently don't try to suspend 973 * interleaved chips if there is already another 974 * operation suspended (imagine what happens 975 * when one chip was already done with the current 976 * operation while another chip suspended it, then 977 * we resume the whole thing at once). Yes, it 978 * can happen! 979 */ 980 map_write(map, CMD(0xb0), adr); 981 usec -= xip_elapsed_since(start); 982 suspended = xip_currtime(); 983 do { 984 if (xip_elapsed_since(suspended) > 100000) { 985 /* 986 * The chip doesn't want to suspend 987 * after waiting for 100 msecs. 988 * This is a critical error but there 989 * is not much we can do here. 990 */ 991 return; 992 } 993 status = map_read(map, adr); 994 } while (!map_word_andequal(map, status, OK, OK)); 995 996 /* Suspend succeeded */ 997 oldstate = chip->state; 998 if (!map_word_bitsset(map, status, CMD(0x40))) 999 break; 1000 chip->state = FL_XIP_WHILE_ERASING; 1001 chip->erase_suspended = 1; 1002 map_write(map, CMD(0xf0), adr); 1003 (void) map_read(map, adr); 1004 xip_iprefetch(); 1005 local_irq_enable(); 1006 mutex_unlock(&chip->mutex); 1007 xip_iprefetch(); 1008 cond_resched(); 1009 1010 /* 1011 * We're back. However someone else might have 1012 * decided to go write to the chip if we are in 1013 * a suspended erase state. If so let's wait 1014 * until it's done. 1015 */ 1016 mutex_lock(&chip->mutex); 1017 while (chip->state != FL_XIP_WHILE_ERASING) { 1018 DECLARE_WAITQUEUE(wait, current); 1019 set_current_state(TASK_UNINTERRUPTIBLE); 1020 add_wait_queue(&chip->wq, &wait); 1021 mutex_unlock(&chip->mutex); 1022 schedule(); 1023 remove_wait_queue(&chip->wq, &wait); 1024 mutex_lock(&chip->mutex); 1025 } 1026 /* Disallow XIP again */ 1027 local_irq_disable(); 1028 1029 /* Correct Erase Suspend Hangups for M29EW */ 1030 cfi_fixup_m29ew_erase_suspend(map, adr); 1031 /* Resume the write or erase operation */ 1032 map_write(map, cfi->sector_erase_cmd, adr); 1033 chip->state = oldstate; 1034 start = xip_currtime(); 1035 } else if (usec >= 1000000/HZ) { 1036 /* 1037 * Try to save on CPU power when waiting delay 1038 * is at least a system timer tick period. 1039 * No need to be extremely accurate here. 1040 */ 1041 xip_cpu_idle(); 1042 } 1043 status = map_read(map, adr); 1044 } while (!map_word_andequal(map, status, OK, OK) 1045 && xip_elapsed_since(start) < usec); 1046 } 1047 1048 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1049 1050 /* 1051 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1052 * the flash is actively programming or erasing since we have to poll for 1053 * the operation to complete anyway. We can't do that in a generic way with 1054 * a XIP setup so do it before the actual flash operation in this case 1055 * and stub it out from INVALIDATE_CACHE_UDELAY. 1056 */ 1057 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1058 INVALIDATE_CACHED_RANGE(map, from, size) 1059 1060 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1061 UDELAY(map, chip, adr, usec) 1062 1063 /* 1064 * Extra notes: 1065 * 1066 * Activating this XIP support changes the way the code works a bit. For 1067 * example the code to suspend the current process when concurrent access 1068 * happens is never executed because xip_udelay() will always return with the 1069 * same chip state as it was entered with. This is why there is no care for 1070 * the presence of add_wait_queue() or schedule() calls from within a couple 1071 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1072 * The queueing and scheduling are always happening within xip_udelay(). 1073 * 1074 * Similarly, get_chip() and put_chip() just happen to always be executed 1075 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1076 * is in array mode, therefore never executing many cases therein and not 1077 * causing any problem with XIP. 1078 */ 1079 1080 #else 1081 1082 #define xip_disable(map, chip, adr) 1083 #define xip_enable(map, chip, adr) 1084 #define XIP_INVAL_CACHED_RANGE(x...) 1085 1086 #define UDELAY(map, chip, adr, usec) \ 1087 do { \ 1088 mutex_unlock(&chip->mutex); \ 1089 cfi_udelay(usec); \ 1090 mutex_lock(&chip->mutex); \ 1091 } while (0) 1092 1093 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1094 do { \ 1095 mutex_unlock(&chip->mutex); \ 1096 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1097 cfi_udelay(usec); \ 1098 mutex_lock(&chip->mutex); \ 1099 } while (0) 1100 1101 #endif 1102 1103 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1104 { 1105 unsigned long cmd_addr; 1106 struct cfi_private *cfi = map->fldrv_priv; 1107 int ret; 1108 1109 adr += chip->start; 1110 1111 /* Ensure cmd read/writes are aligned. */ 1112 cmd_addr = adr & ~(map_bankwidth(map)-1); 1113 1114 mutex_lock(&chip->mutex); 1115 ret = get_chip(map, chip, cmd_addr, FL_READY); 1116 if (ret) { 1117 mutex_unlock(&chip->mutex); 1118 return ret; 1119 } 1120 1121 if (chip->state != FL_POINT && chip->state != FL_READY) { 1122 map_write(map, CMD(0xf0), cmd_addr); 1123 chip->state = FL_READY; 1124 } 1125 1126 map_copy_from(map, buf, adr, len); 1127 1128 put_chip(map, chip, cmd_addr); 1129 1130 mutex_unlock(&chip->mutex); 1131 return 0; 1132 } 1133 1134 1135 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1136 { 1137 struct map_info *map = mtd->priv; 1138 struct cfi_private *cfi = map->fldrv_priv; 1139 unsigned long ofs; 1140 int chipnum; 1141 int ret = 0; 1142 1143 /* ofs: offset within the first chip that the first read should start */ 1144 chipnum = (from >> cfi->chipshift); 1145 ofs = from - (chipnum << cfi->chipshift); 1146 1147 while (len) { 1148 unsigned long thislen; 1149 1150 if (chipnum >= cfi->numchips) 1151 break; 1152 1153 if ((len + ofs -1) >> cfi->chipshift) 1154 thislen = (1<<cfi->chipshift) - ofs; 1155 else 1156 thislen = len; 1157 1158 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1159 if (ret) 1160 break; 1161 1162 *retlen += thislen; 1163 len -= thislen; 1164 buf += thislen; 1165 1166 ofs = 0; 1167 chipnum++; 1168 } 1169 return ret; 1170 } 1171 1172 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 1173 loff_t adr, size_t len, u_char *buf, size_t grouplen); 1174 1175 static inline void otp_enter(struct map_info *map, struct flchip *chip, 1176 loff_t adr, size_t len) 1177 { 1178 struct cfi_private *cfi = map->fldrv_priv; 1179 1180 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1181 cfi->device_type, NULL); 1182 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1183 cfi->device_type, NULL); 1184 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, 1185 cfi->device_type, NULL); 1186 1187 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1188 } 1189 1190 static inline void otp_exit(struct map_info *map, struct flchip *chip, 1191 loff_t adr, size_t len) 1192 { 1193 struct cfi_private *cfi = map->fldrv_priv; 1194 1195 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1196 cfi->device_type, NULL); 1197 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1198 cfi->device_type, NULL); 1199 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, 1200 cfi->device_type, NULL); 1201 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, 1202 cfi->device_type, NULL); 1203 1204 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1205 } 1206 1207 static inline int do_read_secsi_onechip(struct map_info *map, 1208 struct flchip *chip, loff_t adr, 1209 size_t len, u_char *buf, 1210 size_t grouplen) 1211 { 1212 DECLARE_WAITQUEUE(wait, current); 1213 unsigned long timeo = jiffies + HZ; 1214 1215 retry: 1216 mutex_lock(&chip->mutex); 1217 1218 if (chip->state != FL_READY){ 1219 set_current_state(TASK_UNINTERRUPTIBLE); 1220 add_wait_queue(&chip->wq, &wait); 1221 1222 mutex_unlock(&chip->mutex); 1223 1224 schedule(); 1225 remove_wait_queue(&chip->wq, &wait); 1226 timeo = jiffies + HZ; 1227 1228 goto retry; 1229 } 1230 1231 adr += chip->start; 1232 1233 chip->state = FL_READY; 1234 1235 otp_enter(map, chip, adr, len); 1236 map_copy_from(map, buf, adr, len); 1237 otp_exit(map, chip, adr, len); 1238 1239 wake_up(&chip->wq); 1240 mutex_unlock(&chip->mutex); 1241 1242 return 0; 1243 } 1244 1245 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1246 { 1247 struct map_info *map = mtd->priv; 1248 struct cfi_private *cfi = map->fldrv_priv; 1249 unsigned long ofs; 1250 int chipnum; 1251 int ret = 0; 1252 1253 /* ofs: offset within the first chip that the first read should start */ 1254 /* 8 secsi bytes per chip */ 1255 chipnum=from>>3; 1256 ofs=from & 7; 1257 1258 while (len) { 1259 unsigned long thislen; 1260 1261 if (chipnum >= cfi->numchips) 1262 break; 1263 1264 if ((len + ofs -1) >> 3) 1265 thislen = (1<<3) - ofs; 1266 else 1267 thislen = len; 1268 1269 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, 1270 thislen, buf, 0); 1271 if (ret) 1272 break; 1273 1274 *retlen += thislen; 1275 len -= thislen; 1276 buf += thislen; 1277 1278 ofs = 0; 1279 chipnum++; 1280 } 1281 return ret; 1282 } 1283 1284 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1285 unsigned long adr, map_word datum, 1286 int mode); 1287 1288 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, 1289 size_t len, u_char *buf, size_t grouplen) 1290 { 1291 int ret; 1292 while (len) { 1293 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); 1294 int gap = adr - bus_ofs; 1295 int n = min_t(int, len, map_bankwidth(map) - gap); 1296 map_word datum = map_word_ff(map); 1297 1298 if (n != map_bankwidth(map)) { 1299 /* partial write of a word, load old contents */ 1300 otp_enter(map, chip, bus_ofs, map_bankwidth(map)); 1301 datum = map_read(map, bus_ofs); 1302 otp_exit(map, chip, bus_ofs, map_bankwidth(map)); 1303 } 1304 1305 datum = map_word_load_partial(map, datum, buf, gap, n); 1306 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 1307 if (ret) 1308 return ret; 1309 1310 adr += n; 1311 buf += n; 1312 len -= n; 1313 } 1314 1315 return 0; 1316 } 1317 1318 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, 1319 size_t len, u_char *buf, size_t grouplen) 1320 { 1321 struct cfi_private *cfi = map->fldrv_priv; 1322 uint8_t lockreg; 1323 unsigned long timeo; 1324 int ret; 1325 1326 /* make sure area matches group boundaries */ 1327 if ((adr != 0) || (len != grouplen)) 1328 return -EINVAL; 1329 1330 mutex_lock(&chip->mutex); 1331 ret = get_chip(map, chip, chip->start, FL_LOCKING); 1332 if (ret) { 1333 mutex_unlock(&chip->mutex); 1334 return ret; 1335 } 1336 chip->state = FL_LOCKING; 1337 1338 /* Enter lock register command */ 1339 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1340 cfi->device_type, NULL); 1341 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1342 cfi->device_type, NULL); 1343 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, 1344 cfi->device_type, NULL); 1345 1346 /* read lock register */ 1347 lockreg = cfi_read_query(map, 0); 1348 1349 /* set bit 0 to protect extended memory block */ 1350 lockreg &= ~0x01; 1351 1352 /* set bit 0 to protect extended memory block */ 1353 /* write lock register */ 1354 map_write(map, CMD(0xA0), chip->start); 1355 map_write(map, CMD(lockreg), chip->start); 1356 1357 /* wait for chip to become ready */ 1358 timeo = jiffies + msecs_to_jiffies(2); 1359 for (;;) { 1360 if (chip_ready(map, adr)) 1361 break; 1362 1363 if (time_after(jiffies, timeo)) { 1364 pr_err("Waiting for chip to be ready timed out.\n"); 1365 ret = -EIO; 1366 break; 1367 } 1368 UDELAY(map, chip, 0, 1); 1369 } 1370 1371 /* exit protection commands */ 1372 map_write(map, CMD(0x90), chip->start); 1373 map_write(map, CMD(0x00), chip->start); 1374 1375 chip->state = FL_READY; 1376 put_chip(map, chip, chip->start); 1377 mutex_unlock(&chip->mutex); 1378 1379 return ret; 1380 } 1381 1382 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, 1383 size_t *retlen, u_char *buf, 1384 otp_op_t action, int user_regs) 1385 { 1386 struct map_info *map = mtd->priv; 1387 struct cfi_private *cfi = map->fldrv_priv; 1388 int ofs_factor = cfi->interleave * cfi->device_type; 1389 unsigned long base; 1390 int chipnum; 1391 struct flchip *chip; 1392 uint8_t otp, lockreg; 1393 int ret; 1394 1395 size_t user_size, factory_size, otpsize; 1396 loff_t user_offset, factory_offset, otpoffset; 1397 int user_locked = 0, otplocked; 1398 1399 *retlen = 0; 1400 1401 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { 1402 chip = &cfi->chips[chipnum]; 1403 factory_size = 0; 1404 user_size = 0; 1405 1406 /* Micron M29EW family */ 1407 if (is_m29ew(cfi)) { 1408 base = chip->start; 1409 1410 /* check whether secsi area is factory locked 1411 or user lockable */ 1412 mutex_lock(&chip->mutex); 1413 ret = get_chip(map, chip, base, FL_CFI_QUERY); 1414 if (ret) { 1415 mutex_unlock(&chip->mutex); 1416 return ret; 1417 } 1418 cfi_qry_mode_on(base, map, cfi); 1419 otp = cfi_read_query(map, base + 0x3 * ofs_factor); 1420 cfi_qry_mode_off(base, map, cfi); 1421 put_chip(map, chip, base); 1422 mutex_unlock(&chip->mutex); 1423 1424 if (otp & 0x80) { 1425 /* factory locked */ 1426 factory_offset = 0; 1427 factory_size = 0x100; 1428 } else { 1429 /* customer lockable */ 1430 user_offset = 0; 1431 user_size = 0x100; 1432 1433 mutex_lock(&chip->mutex); 1434 ret = get_chip(map, chip, base, FL_LOCKING); 1435 if (ret) { 1436 mutex_unlock(&chip->mutex); 1437 return ret; 1438 } 1439 1440 /* Enter lock register command */ 1441 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, 1442 chip->start, map, cfi, 1443 cfi->device_type, NULL); 1444 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, 1445 chip->start, map, cfi, 1446 cfi->device_type, NULL); 1447 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, 1448 chip->start, map, cfi, 1449 cfi->device_type, NULL); 1450 /* read lock register */ 1451 lockreg = cfi_read_query(map, 0); 1452 /* exit protection commands */ 1453 map_write(map, CMD(0x90), chip->start); 1454 map_write(map, CMD(0x00), chip->start); 1455 put_chip(map, chip, chip->start); 1456 mutex_unlock(&chip->mutex); 1457 1458 user_locked = ((lockreg & 0x01) == 0x00); 1459 } 1460 } 1461 1462 otpsize = user_regs ? user_size : factory_size; 1463 if (!otpsize) 1464 continue; 1465 otpoffset = user_regs ? user_offset : factory_offset; 1466 otplocked = user_regs ? user_locked : 1; 1467 1468 if (!action) { 1469 /* return otpinfo */ 1470 struct otp_info *otpinfo; 1471 len -= sizeof(*otpinfo); 1472 if (len <= 0) 1473 return -ENOSPC; 1474 otpinfo = (struct otp_info *)buf; 1475 otpinfo->start = from; 1476 otpinfo->length = otpsize; 1477 otpinfo->locked = otplocked; 1478 buf += sizeof(*otpinfo); 1479 *retlen += sizeof(*otpinfo); 1480 from += otpsize; 1481 } else if ((from < otpsize) && (len > 0)) { 1482 size_t size; 1483 size = (len < otpsize - from) ? len : otpsize - from; 1484 ret = action(map, chip, otpoffset + from, size, buf, 1485 otpsize); 1486 if (ret < 0) 1487 return ret; 1488 1489 buf += size; 1490 len -= size; 1491 *retlen += size; 1492 from = 0; 1493 } else { 1494 from -= otpsize; 1495 } 1496 } 1497 return 0; 1498 } 1499 1500 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, 1501 size_t *retlen, struct otp_info *buf) 1502 { 1503 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1504 NULL, 0); 1505 } 1506 1507 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, 1508 size_t *retlen, struct otp_info *buf) 1509 { 1510 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1511 NULL, 1); 1512 } 1513 1514 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 1515 size_t len, size_t *retlen, 1516 u_char *buf) 1517 { 1518 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1519 buf, do_read_secsi_onechip, 0); 1520 } 1521 1522 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 1523 size_t len, size_t *retlen, 1524 u_char *buf) 1525 { 1526 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1527 buf, do_read_secsi_onechip, 1); 1528 } 1529 1530 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 1531 size_t len, size_t *retlen, 1532 u_char *buf) 1533 { 1534 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf, 1535 do_otp_write, 1); 1536 } 1537 1538 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 1539 size_t len) 1540 { 1541 size_t retlen; 1542 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, 1543 do_otp_lock, 1); 1544 } 1545 1546 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1547 unsigned long adr, map_word datum, 1548 int mode) 1549 { 1550 struct cfi_private *cfi = map->fldrv_priv; 1551 unsigned long timeo = jiffies + HZ; 1552 /* 1553 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1554 * have a max write time of a few hundreds usec). However, we should 1555 * use the maximum timeout value given by the chip at probe time 1556 * instead. Unfortunately, struct flchip does have a field for 1557 * maximum timeout, only for typical which can be far too short 1558 * depending of the conditions. The ' + 1' is to avoid having a 1559 * timeout of 0 jiffies if HZ is smaller than 1000. 1560 */ 1561 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1562 int ret = 0; 1563 map_word oldd; 1564 int retry_cnt = 0; 1565 1566 adr += chip->start; 1567 1568 mutex_lock(&chip->mutex); 1569 ret = get_chip(map, chip, adr, mode); 1570 if (ret) { 1571 mutex_unlock(&chip->mutex); 1572 return ret; 1573 } 1574 1575 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1576 __func__, adr, datum.x[0] ); 1577 1578 if (mode == FL_OTP_WRITE) 1579 otp_enter(map, chip, adr, map_bankwidth(map)); 1580 1581 /* 1582 * Check for a NOP for the case when the datum to write is already 1583 * present - it saves time and works around buggy chips that corrupt 1584 * data at other locations when 0xff is written to a location that 1585 * already contains 0xff. 1586 */ 1587 oldd = map_read(map, adr); 1588 if (map_word_equal(map, oldd, datum)) { 1589 pr_debug("MTD %s(): NOP\n", 1590 __func__); 1591 goto op_done; 1592 } 1593 1594 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1595 ENABLE_VPP(map); 1596 xip_disable(map, chip, adr); 1597 1598 retry: 1599 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1600 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1601 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1602 map_write(map, datum, adr); 1603 chip->state = mode; 1604 1605 INVALIDATE_CACHE_UDELAY(map, chip, 1606 adr, map_bankwidth(map), 1607 chip->word_write_time); 1608 1609 /* See comment above for timeout value. */ 1610 timeo = jiffies + uWriteTimeout; 1611 for (;;) { 1612 if (chip->state != mode) { 1613 /* Someone's suspended the write. Sleep */ 1614 DECLARE_WAITQUEUE(wait, current); 1615 1616 set_current_state(TASK_UNINTERRUPTIBLE); 1617 add_wait_queue(&chip->wq, &wait); 1618 mutex_unlock(&chip->mutex); 1619 schedule(); 1620 remove_wait_queue(&chip->wq, &wait); 1621 timeo = jiffies + (HZ / 2); /* FIXME */ 1622 mutex_lock(&chip->mutex); 1623 continue; 1624 } 1625 1626 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1627 xip_enable(map, chip, adr); 1628 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1629 xip_disable(map, chip, adr); 1630 break; 1631 } 1632 1633 if (chip_ready(map, adr)) 1634 break; 1635 1636 /* Latency issues. Drop the lock, wait a while and retry */ 1637 UDELAY(map, chip, adr, 1); 1638 } 1639 /* Did we succeed? */ 1640 if (!chip_good(map, adr, datum)) { 1641 /* reset on all failures. */ 1642 map_write( map, CMD(0xF0), chip->start ); 1643 /* FIXME - should have reset delay before continuing */ 1644 1645 if (++retry_cnt <= MAX_WORD_RETRIES) 1646 goto retry; 1647 1648 ret = -EIO; 1649 } 1650 xip_enable(map, chip, adr); 1651 op_done: 1652 if (mode == FL_OTP_WRITE) 1653 otp_exit(map, chip, adr, map_bankwidth(map)); 1654 chip->state = FL_READY; 1655 DISABLE_VPP(map); 1656 put_chip(map, chip, adr); 1657 mutex_unlock(&chip->mutex); 1658 1659 return ret; 1660 } 1661 1662 1663 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1664 size_t *retlen, const u_char *buf) 1665 { 1666 struct map_info *map = mtd->priv; 1667 struct cfi_private *cfi = map->fldrv_priv; 1668 int ret = 0; 1669 int chipnum; 1670 unsigned long ofs, chipstart; 1671 DECLARE_WAITQUEUE(wait, current); 1672 1673 chipnum = to >> cfi->chipshift; 1674 ofs = to - (chipnum << cfi->chipshift); 1675 chipstart = cfi->chips[chipnum].start; 1676 1677 /* If it's not bus-aligned, do the first byte write */ 1678 if (ofs & (map_bankwidth(map)-1)) { 1679 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1680 int i = ofs - bus_ofs; 1681 int n = 0; 1682 map_word tmp_buf; 1683 1684 retry: 1685 mutex_lock(&cfi->chips[chipnum].mutex); 1686 1687 if (cfi->chips[chipnum].state != FL_READY) { 1688 set_current_state(TASK_UNINTERRUPTIBLE); 1689 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1690 1691 mutex_unlock(&cfi->chips[chipnum].mutex); 1692 1693 schedule(); 1694 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1695 goto retry; 1696 } 1697 1698 /* Load 'tmp_buf' with old contents of flash */ 1699 tmp_buf = map_read(map, bus_ofs+chipstart); 1700 1701 mutex_unlock(&cfi->chips[chipnum].mutex); 1702 1703 /* Number of bytes to copy from buffer */ 1704 n = min_t(int, len, map_bankwidth(map)-i); 1705 1706 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1707 1708 ret = do_write_oneword(map, &cfi->chips[chipnum], 1709 bus_ofs, tmp_buf, FL_WRITING); 1710 if (ret) 1711 return ret; 1712 1713 ofs += n; 1714 buf += n; 1715 (*retlen) += n; 1716 len -= n; 1717 1718 if (ofs >> cfi->chipshift) { 1719 chipnum ++; 1720 ofs = 0; 1721 if (chipnum == cfi->numchips) 1722 return 0; 1723 } 1724 } 1725 1726 /* We are now aligned, write as much as possible */ 1727 while(len >= map_bankwidth(map)) { 1728 map_word datum; 1729 1730 datum = map_word_load(map, buf); 1731 1732 ret = do_write_oneword(map, &cfi->chips[chipnum], 1733 ofs, datum, FL_WRITING); 1734 if (ret) 1735 return ret; 1736 1737 ofs += map_bankwidth(map); 1738 buf += map_bankwidth(map); 1739 (*retlen) += map_bankwidth(map); 1740 len -= map_bankwidth(map); 1741 1742 if (ofs >> cfi->chipshift) { 1743 chipnum ++; 1744 ofs = 0; 1745 if (chipnum == cfi->numchips) 1746 return 0; 1747 chipstart = cfi->chips[chipnum].start; 1748 } 1749 } 1750 1751 /* Write the trailing bytes if any */ 1752 if (len & (map_bankwidth(map)-1)) { 1753 map_word tmp_buf; 1754 1755 retry1: 1756 mutex_lock(&cfi->chips[chipnum].mutex); 1757 1758 if (cfi->chips[chipnum].state != FL_READY) { 1759 set_current_state(TASK_UNINTERRUPTIBLE); 1760 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1761 1762 mutex_unlock(&cfi->chips[chipnum].mutex); 1763 1764 schedule(); 1765 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1766 goto retry1; 1767 } 1768 1769 tmp_buf = map_read(map, ofs + chipstart); 1770 1771 mutex_unlock(&cfi->chips[chipnum].mutex); 1772 1773 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1774 1775 ret = do_write_oneword(map, &cfi->chips[chipnum], 1776 ofs, tmp_buf, FL_WRITING); 1777 if (ret) 1778 return ret; 1779 1780 (*retlen) += len; 1781 } 1782 1783 return 0; 1784 } 1785 1786 1787 /* 1788 * FIXME: interleaved mode not tested, and probably not supported! 1789 */ 1790 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1791 unsigned long adr, const u_char *buf, 1792 int len) 1793 { 1794 struct cfi_private *cfi = map->fldrv_priv; 1795 unsigned long timeo = jiffies + HZ; 1796 /* 1797 * Timeout is calculated according to CFI data, if available. 1798 * See more comments in cfi_cmdset_0002(). 1799 */ 1800 unsigned long uWriteTimeout = 1801 usecs_to_jiffies(chip->buffer_write_time_max); 1802 int ret = -EIO; 1803 unsigned long cmd_adr; 1804 int z, words; 1805 map_word datum; 1806 1807 adr += chip->start; 1808 cmd_adr = adr; 1809 1810 mutex_lock(&chip->mutex); 1811 ret = get_chip(map, chip, adr, FL_WRITING); 1812 if (ret) { 1813 mutex_unlock(&chip->mutex); 1814 return ret; 1815 } 1816 1817 datum = map_word_load(map, buf); 1818 1819 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1820 __func__, adr, datum.x[0] ); 1821 1822 XIP_INVAL_CACHED_RANGE(map, adr, len); 1823 ENABLE_VPP(map); 1824 xip_disable(map, chip, cmd_adr); 1825 1826 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1827 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1828 1829 /* Write Buffer Load */ 1830 map_write(map, CMD(0x25), cmd_adr); 1831 1832 chip->state = FL_WRITING_TO_BUFFER; 1833 1834 /* Write length of data to come */ 1835 words = len / map_bankwidth(map); 1836 map_write(map, CMD(words - 1), cmd_adr); 1837 /* Write data */ 1838 z = 0; 1839 while(z < words * map_bankwidth(map)) { 1840 datum = map_word_load(map, buf); 1841 map_write(map, datum, adr + z); 1842 1843 z += map_bankwidth(map); 1844 buf += map_bankwidth(map); 1845 } 1846 z -= map_bankwidth(map); 1847 1848 adr += z; 1849 1850 /* Write Buffer Program Confirm: GO GO GO */ 1851 map_write(map, CMD(0x29), cmd_adr); 1852 chip->state = FL_WRITING; 1853 1854 INVALIDATE_CACHE_UDELAY(map, chip, 1855 adr, map_bankwidth(map), 1856 chip->word_write_time); 1857 1858 timeo = jiffies + uWriteTimeout; 1859 1860 for (;;) { 1861 if (chip->state != FL_WRITING) { 1862 /* Someone's suspended the write. Sleep */ 1863 DECLARE_WAITQUEUE(wait, current); 1864 1865 set_current_state(TASK_UNINTERRUPTIBLE); 1866 add_wait_queue(&chip->wq, &wait); 1867 mutex_unlock(&chip->mutex); 1868 schedule(); 1869 remove_wait_queue(&chip->wq, &wait); 1870 timeo = jiffies + (HZ / 2); /* FIXME */ 1871 mutex_lock(&chip->mutex); 1872 continue; 1873 } 1874 1875 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1876 break; 1877 1878 if (chip_ready(map, adr)) { 1879 xip_enable(map, chip, adr); 1880 goto op_done; 1881 } 1882 1883 /* Latency issues. Drop the lock, wait a while and retry */ 1884 UDELAY(map, chip, adr, 1); 1885 } 1886 1887 /* 1888 * Recovery from write-buffer programming failures requires 1889 * the write-to-buffer-reset sequence. Since the last part 1890 * of the sequence also works as a normal reset, we can run 1891 * the same commands regardless of why we are here. 1892 * See e.g. 1893 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 1894 */ 1895 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1896 cfi->device_type, NULL); 1897 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1898 cfi->device_type, NULL); 1899 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 1900 cfi->device_type, NULL); 1901 xip_enable(map, chip, adr); 1902 /* FIXME - should have reset delay before continuing */ 1903 1904 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n", 1905 __func__, adr); 1906 1907 ret = -EIO; 1908 op_done: 1909 chip->state = FL_READY; 1910 DISABLE_VPP(map); 1911 put_chip(map, chip, adr); 1912 mutex_unlock(&chip->mutex); 1913 1914 return ret; 1915 } 1916 1917 1918 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1919 size_t *retlen, const u_char *buf) 1920 { 1921 struct map_info *map = mtd->priv; 1922 struct cfi_private *cfi = map->fldrv_priv; 1923 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1924 int ret = 0; 1925 int chipnum; 1926 unsigned long ofs; 1927 1928 chipnum = to >> cfi->chipshift; 1929 ofs = to - (chipnum << cfi->chipshift); 1930 1931 /* If it's not bus-aligned, do the first word write */ 1932 if (ofs & (map_bankwidth(map)-1)) { 1933 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1934 if (local_len > len) 1935 local_len = len; 1936 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1937 local_len, retlen, buf); 1938 if (ret) 1939 return ret; 1940 ofs += local_len; 1941 buf += local_len; 1942 len -= local_len; 1943 1944 if (ofs >> cfi->chipshift) { 1945 chipnum ++; 1946 ofs = 0; 1947 if (chipnum == cfi->numchips) 1948 return 0; 1949 } 1950 } 1951 1952 /* Write buffer is worth it only if more than one word to write... */ 1953 while (len >= map_bankwidth(map) * 2) { 1954 /* We must not cross write block boundaries */ 1955 int size = wbufsize - (ofs & (wbufsize-1)); 1956 1957 if (size > len) 1958 size = len; 1959 if (size % map_bankwidth(map)) 1960 size -= size % map_bankwidth(map); 1961 1962 ret = do_write_buffer(map, &cfi->chips[chipnum], 1963 ofs, buf, size); 1964 if (ret) 1965 return ret; 1966 1967 ofs += size; 1968 buf += size; 1969 (*retlen) += size; 1970 len -= size; 1971 1972 if (ofs >> cfi->chipshift) { 1973 chipnum ++; 1974 ofs = 0; 1975 if (chipnum == cfi->numchips) 1976 return 0; 1977 } 1978 } 1979 1980 if (len) { 1981 size_t retlen_dregs = 0; 1982 1983 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1984 len, &retlen_dregs, buf); 1985 1986 *retlen += retlen_dregs; 1987 return ret; 1988 } 1989 1990 return 0; 1991 } 1992 1993 /* 1994 * Wait for the flash chip to become ready to write data 1995 * 1996 * This is only called during the panic_write() path. When panic_write() 1997 * is called, the kernel is in the process of a panic, and will soon be 1998 * dead. Therefore we don't take any locks, and attempt to get access 1999 * to the chip as soon as possible. 2000 */ 2001 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 2002 unsigned long adr) 2003 { 2004 struct cfi_private *cfi = map->fldrv_priv; 2005 int retries = 10; 2006 int i; 2007 2008 /* 2009 * If the driver thinks the chip is idle, and no toggle bits 2010 * are changing, then the chip is actually idle for sure. 2011 */ 2012 if (chip->state == FL_READY && chip_ready(map, adr)) 2013 return 0; 2014 2015 /* 2016 * Try several times to reset the chip and then wait for it 2017 * to become idle. The upper limit of a few milliseconds of 2018 * delay isn't a big problem: the kernel is dying anyway. It 2019 * is more important to save the messages. 2020 */ 2021 while (retries > 0) { 2022 const unsigned long timeo = (HZ / 1000) + 1; 2023 2024 /* send the reset command */ 2025 map_write(map, CMD(0xF0), chip->start); 2026 2027 /* wait for the chip to become ready */ 2028 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 2029 if (chip_ready(map, adr)) 2030 return 0; 2031 2032 udelay(1); 2033 } 2034 2035 retries--; 2036 } 2037 2038 /* the chip never became ready */ 2039 return -EBUSY; 2040 } 2041 2042 /* 2043 * Write out one word of data to a single flash chip during a kernel panic 2044 * 2045 * This is only called during the panic_write() path. When panic_write() 2046 * is called, the kernel is in the process of a panic, and will soon be 2047 * dead. Therefore we don't take any locks, and attempt to get access 2048 * to the chip as soon as possible. 2049 * 2050 * The implementation of this routine is intentionally similar to 2051 * do_write_oneword(), in order to ease code maintenance. 2052 */ 2053 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 2054 unsigned long adr, map_word datum) 2055 { 2056 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 2057 struct cfi_private *cfi = map->fldrv_priv; 2058 int retry_cnt = 0; 2059 map_word oldd; 2060 int ret = 0; 2061 int i; 2062 2063 adr += chip->start; 2064 2065 ret = cfi_amdstd_panic_wait(map, chip, adr); 2066 if (ret) 2067 return ret; 2068 2069 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 2070 __func__, adr, datum.x[0]); 2071 2072 /* 2073 * Check for a NOP for the case when the datum to write is already 2074 * present - it saves time and works around buggy chips that corrupt 2075 * data at other locations when 0xff is written to a location that 2076 * already contains 0xff. 2077 */ 2078 oldd = map_read(map, adr); 2079 if (map_word_equal(map, oldd, datum)) { 2080 pr_debug("MTD %s(): NOP\n", __func__); 2081 goto op_done; 2082 } 2083 2084 ENABLE_VPP(map); 2085 2086 retry: 2087 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2088 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2089 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2090 map_write(map, datum, adr); 2091 2092 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 2093 if (chip_ready(map, adr)) 2094 break; 2095 2096 udelay(1); 2097 } 2098 2099 if (!chip_good(map, adr, datum)) { 2100 /* reset on all failures. */ 2101 map_write(map, CMD(0xF0), chip->start); 2102 /* FIXME - should have reset delay before continuing */ 2103 2104 if (++retry_cnt <= MAX_WORD_RETRIES) 2105 goto retry; 2106 2107 ret = -EIO; 2108 } 2109 2110 op_done: 2111 DISABLE_VPP(map); 2112 return ret; 2113 } 2114 2115 /* 2116 * Write out some data during a kernel panic 2117 * 2118 * This is used by the mtdoops driver to save the dying messages from a 2119 * kernel which has panic'd. 2120 * 2121 * This routine ignores all of the locking used throughout the rest of the 2122 * driver, in order to ensure that the data gets written out no matter what 2123 * state this driver (and the flash chip itself) was in when the kernel crashed. 2124 * 2125 * The implementation of this routine is intentionally similar to 2126 * cfi_amdstd_write_words(), in order to ease code maintenance. 2127 */ 2128 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 2129 size_t *retlen, const u_char *buf) 2130 { 2131 struct map_info *map = mtd->priv; 2132 struct cfi_private *cfi = map->fldrv_priv; 2133 unsigned long ofs, chipstart; 2134 int ret = 0; 2135 int chipnum; 2136 2137 chipnum = to >> cfi->chipshift; 2138 ofs = to - (chipnum << cfi->chipshift); 2139 chipstart = cfi->chips[chipnum].start; 2140 2141 /* If it's not bus aligned, do the first byte write */ 2142 if (ofs & (map_bankwidth(map) - 1)) { 2143 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 2144 int i = ofs - bus_ofs; 2145 int n = 0; 2146 map_word tmp_buf; 2147 2148 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 2149 if (ret) 2150 return ret; 2151 2152 /* Load 'tmp_buf' with old contents of flash */ 2153 tmp_buf = map_read(map, bus_ofs + chipstart); 2154 2155 /* Number of bytes to copy from buffer */ 2156 n = min_t(int, len, map_bankwidth(map) - i); 2157 2158 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 2159 2160 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2161 bus_ofs, tmp_buf); 2162 if (ret) 2163 return ret; 2164 2165 ofs += n; 2166 buf += n; 2167 (*retlen) += n; 2168 len -= n; 2169 2170 if (ofs >> cfi->chipshift) { 2171 chipnum++; 2172 ofs = 0; 2173 if (chipnum == cfi->numchips) 2174 return 0; 2175 } 2176 } 2177 2178 /* We are now aligned, write as much as possible */ 2179 while (len >= map_bankwidth(map)) { 2180 map_word datum; 2181 2182 datum = map_word_load(map, buf); 2183 2184 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2185 ofs, datum); 2186 if (ret) 2187 return ret; 2188 2189 ofs += map_bankwidth(map); 2190 buf += map_bankwidth(map); 2191 (*retlen) += map_bankwidth(map); 2192 len -= map_bankwidth(map); 2193 2194 if (ofs >> cfi->chipshift) { 2195 chipnum++; 2196 ofs = 0; 2197 if (chipnum == cfi->numchips) 2198 return 0; 2199 2200 chipstart = cfi->chips[chipnum].start; 2201 } 2202 } 2203 2204 /* Write the trailing bytes if any */ 2205 if (len & (map_bankwidth(map) - 1)) { 2206 map_word tmp_buf; 2207 2208 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 2209 if (ret) 2210 return ret; 2211 2212 tmp_buf = map_read(map, ofs + chipstart); 2213 2214 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 2215 2216 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2217 ofs, tmp_buf); 2218 if (ret) 2219 return ret; 2220 2221 (*retlen) += len; 2222 } 2223 2224 return 0; 2225 } 2226 2227 2228 /* 2229 * Handle devices with one erase region, that only implement 2230 * the chip erase command. 2231 */ 2232 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 2233 { 2234 struct cfi_private *cfi = map->fldrv_priv; 2235 unsigned long timeo = jiffies + HZ; 2236 unsigned long int adr; 2237 DECLARE_WAITQUEUE(wait, current); 2238 int ret = 0; 2239 2240 adr = cfi->addr_unlock1; 2241 2242 mutex_lock(&chip->mutex); 2243 ret = get_chip(map, chip, adr, FL_WRITING); 2244 if (ret) { 2245 mutex_unlock(&chip->mutex); 2246 return ret; 2247 } 2248 2249 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2250 __func__, chip->start ); 2251 2252 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 2253 ENABLE_VPP(map); 2254 xip_disable(map, chip, adr); 2255 2256 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2257 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2258 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2259 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2260 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2261 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2262 2263 chip->state = FL_ERASING; 2264 chip->erase_suspended = 0; 2265 chip->in_progress_block_addr = adr; 2266 2267 INVALIDATE_CACHE_UDELAY(map, chip, 2268 adr, map->size, 2269 chip->erase_time*500); 2270 2271 timeo = jiffies + (HZ*20); 2272 2273 for (;;) { 2274 if (chip->state != FL_ERASING) { 2275 /* Someone's suspended the erase. Sleep */ 2276 set_current_state(TASK_UNINTERRUPTIBLE); 2277 add_wait_queue(&chip->wq, &wait); 2278 mutex_unlock(&chip->mutex); 2279 schedule(); 2280 remove_wait_queue(&chip->wq, &wait); 2281 mutex_lock(&chip->mutex); 2282 continue; 2283 } 2284 if (chip->erase_suspended) { 2285 /* This erase was suspended and resumed. 2286 Adjust the timeout */ 2287 timeo = jiffies + (HZ*20); /* FIXME */ 2288 chip->erase_suspended = 0; 2289 } 2290 2291 if (chip_ready(map, adr)) 2292 break; 2293 2294 if (time_after(jiffies, timeo)) { 2295 printk(KERN_WARNING "MTD %s(): software timeout\n", 2296 __func__ ); 2297 break; 2298 } 2299 2300 /* Latency issues. Drop the lock, wait a while and retry */ 2301 UDELAY(map, chip, adr, 1000000/HZ); 2302 } 2303 /* Did we succeed? */ 2304 if (!chip_good(map, adr, map_word_ff(map))) { 2305 /* reset on all failures. */ 2306 map_write( map, CMD(0xF0), chip->start ); 2307 /* FIXME - should have reset delay before continuing */ 2308 2309 ret = -EIO; 2310 } 2311 2312 chip->state = FL_READY; 2313 xip_enable(map, chip, adr); 2314 DISABLE_VPP(map); 2315 put_chip(map, chip, adr); 2316 mutex_unlock(&chip->mutex); 2317 2318 return ret; 2319 } 2320 2321 2322 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 2323 { 2324 struct cfi_private *cfi = map->fldrv_priv; 2325 unsigned long timeo = jiffies + HZ; 2326 DECLARE_WAITQUEUE(wait, current); 2327 int ret = 0; 2328 2329 adr += chip->start; 2330 2331 mutex_lock(&chip->mutex); 2332 ret = get_chip(map, chip, adr, FL_ERASING); 2333 if (ret) { 2334 mutex_unlock(&chip->mutex); 2335 return ret; 2336 } 2337 2338 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2339 __func__, adr ); 2340 2341 XIP_INVAL_CACHED_RANGE(map, adr, len); 2342 ENABLE_VPP(map); 2343 xip_disable(map, chip, adr); 2344 2345 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2346 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2347 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2348 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2349 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2350 map_write(map, cfi->sector_erase_cmd, adr); 2351 2352 chip->state = FL_ERASING; 2353 chip->erase_suspended = 0; 2354 chip->in_progress_block_addr = adr; 2355 2356 INVALIDATE_CACHE_UDELAY(map, chip, 2357 adr, len, 2358 chip->erase_time*500); 2359 2360 timeo = jiffies + (HZ*20); 2361 2362 for (;;) { 2363 if (chip->state != FL_ERASING) { 2364 /* Someone's suspended the erase. Sleep */ 2365 set_current_state(TASK_UNINTERRUPTIBLE); 2366 add_wait_queue(&chip->wq, &wait); 2367 mutex_unlock(&chip->mutex); 2368 schedule(); 2369 remove_wait_queue(&chip->wq, &wait); 2370 mutex_lock(&chip->mutex); 2371 continue; 2372 } 2373 if (chip->erase_suspended) { 2374 /* This erase was suspended and resumed. 2375 Adjust the timeout */ 2376 timeo = jiffies + (HZ*20); /* FIXME */ 2377 chip->erase_suspended = 0; 2378 } 2379 2380 if (chip_ready(map, adr)) { 2381 xip_enable(map, chip, adr); 2382 break; 2383 } 2384 2385 if (time_after(jiffies, timeo)) { 2386 xip_enable(map, chip, adr); 2387 printk(KERN_WARNING "MTD %s(): software timeout\n", 2388 __func__ ); 2389 break; 2390 } 2391 2392 /* Latency issues. Drop the lock, wait a while and retry */ 2393 UDELAY(map, chip, adr, 1000000/HZ); 2394 } 2395 /* Did we succeed? */ 2396 if (!chip_good(map, adr, map_word_ff(map))) { 2397 /* reset on all failures. */ 2398 map_write( map, CMD(0xF0), chip->start ); 2399 /* FIXME - should have reset delay before continuing */ 2400 2401 ret = -EIO; 2402 } 2403 2404 chip->state = FL_READY; 2405 DISABLE_VPP(map); 2406 put_chip(map, chip, adr); 2407 mutex_unlock(&chip->mutex); 2408 return ret; 2409 } 2410 2411 2412 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2413 { 2414 unsigned long ofs, len; 2415 int ret; 2416 2417 ofs = instr->addr; 2418 len = instr->len; 2419 2420 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 2421 if (ret) 2422 return ret; 2423 2424 instr->state = MTD_ERASE_DONE; 2425 mtd_erase_callback(instr); 2426 2427 return 0; 2428 } 2429 2430 2431 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2432 { 2433 struct map_info *map = mtd->priv; 2434 struct cfi_private *cfi = map->fldrv_priv; 2435 int ret = 0; 2436 2437 if (instr->addr != 0) 2438 return -EINVAL; 2439 2440 if (instr->len != mtd->size) 2441 return -EINVAL; 2442 2443 ret = do_erase_chip(map, &cfi->chips[0]); 2444 if (ret) 2445 return ret; 2446 2447 instr->state = MTD_ERASE_DONE; 2448 mtd_erase_callback(instr); 2449 2450 return 0; 2451 } 2452 2453 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2454 unsigned long adr, int len, void *thunk) 2455 { 2456 struct cfi_private *cfi = map->fldrv_priv; 2457 int ret; 2458 2459 mutex_lock(&chip->mutex); 2460 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2461 if (ret) 2462 goto out_unlock; 2463 chip->state = FL_LOCKING; 2464 2465 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2466 2467 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2468 cfi->device_type, NULL); 2469 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2470 cfi->device_type, NULL); 2471 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2472 cfi->device_type, NULL); 2473 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2474 cfi->device_type, NULL); 2475 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2476 cfi->device_type, NULL); 2477 map_write(map, CMD(0x40), chip->start + adr); 2478 2479 chip->state = FL_READY; 2480 put_chip(map, chip, adr + chip->start); 2481 ret = 0; 2482 2483 out_unlock: 2484 mutex_unlock(&chip->mutex); 2485 return ret; 2486 } 2487 2488 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2489 unsigned long adr, int len, void *thunk) 2490 { 2491 struct cfi_private *cfi = map->fldrv_priv; 2492 int ret; 2493 2494 mutex_lock(&chip->mutex); 2495 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2496 if (ret) 2497 goto out_unlock; 2498 chip->state = FL_UNLOCKING; 2499 2500 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2501 2502 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2503 cfi->device_type, NULL); 2504 map_write(map, CMD(0x70), adr); 2505 2506 chip->state = FL_READY; 2507 put_chip(map, chip, adr + chip->start); 2508 ret = 0; 2509 2510 out_unlock: 2511 mutex_unlock(&chip->mutex); 2512 return ret; 2513 } 2514 2515 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2516 { 2517 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2518 } 2519 2520 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2521 { 2522 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2523 } 2524 2525 /* 2526 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking 2527 */ 2528 2529 struct ppb_lock { 2530 struct flchip *chip; 2531 loff_t offset; 2532 int locked; 2533 }; 2534 2535 #define MAX_SECTORS 512 2536 2537 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2538 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2539 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2540 2541 static int __maybe_unused do_ppb_xxlock(struct map_info *map, 2542 struct flchip *chip, 2543 unsigned long adr, int len, void *thunk) 2544 { 2545 struct cfi_private *cfi = map->fldrv_priv; 2546 unsigned long timeo; 2547 int ret; 2548 2549 mutex_lock(&chip->mutex); 2550 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2551 if (ret) { 2552 mutex_unlock(&chip->mutex); 2553 return ret; 2554 } 2555 2556 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); 2557 2558 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2559 cfi->device_type, NULL); 2560 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2561 cfi->device_type, NULL); 2562 /* PPB entry command */ 2563 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, 2564 cfi->device_type, NULL); 2565 2566 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2567 chip->state = FL_LOCKING; 2568 map_write(map, CMD(0xA0), chip->start + adr); 2569 map_write(map, CMD(0x00), chip->start + adr); 2570 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2571 /* 2572 * Unlocking of one specific sector is not supported, so we 2573 * have to unlock all sectors of this device instead 2574 */ 2575 chip->state = FL_UNLOCKING; 2576 map_write(map, CMD(0x80), chip->start); 2577 map_write(map, CMD(0x30), chip->start); 2578 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { 2579 chip->state = FL_JEDEC_QUERY; 2580 /* Return locked status: 0->locked, 1->unlocked */ 2581 ret = !cfi_read_query(map, adr); 2582 } else 2583 BUG(); 2584 2585 /* 2586 * Wait for some time as unlocking of all sectors takes quite long 2587 */ 2588 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2589 for (;;) { 2590 if (chip_ready(map, adr)) 2591 break; 2592 2593 if (time_after(jiffies, timeo)) { 2594 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 2595 ret = -EIO; 2596 break; 2597 } 2598 2599 UDELAY(map, chip, adr, 1); 2600 } 2601 2602 /* Exit BC commands */ 2603 map_write(map, CMD(0x90), chip->start); 2604 map_write(map, CMD(0x00), chip->start); 2605 2606 chip->state = FL_READY; 2607 put_chip(map, chip, adr + chip->start); 2608 mutex_unlock(&chip->mutex); 2609 2610 return ret; 2611 } 2612 2613 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, 2614 uint64_t len) 2615 { 2616 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2617 DO_XXLOCK_ONEBLOCK_LOCK); 2618 } 2619 2620 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, 2621 uint64_t len) 2622 { 2623 struct mtd_erase_region_info *regions = mtd->eraseregions; 2624 struct map_info *map = mtd->priv; 2625 struct cfi_private *cfi = map->fldrv_priv; 2626 struct ppb_lock *sect; 2627 unsigned long adr; 2628 loff_t offset; 2629 uint64_t length; 2630 int chipnum; 2631 int i; 2632 int sectors; 2633 int ret; 2634 2635 /* 2636 * PPB unlocking always unlocks all sectors of the flash chip. 2637 * We need to re-lock all previously locked sectors. So lets 2638 * first check the locking status of all sectors and save 2639 * it for future use. 2640 */ 2641 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL); 2642 if (!sect) 2643 return -ENOMEM; 2644 2645 /* 2646 * This code to walk all sectors is a slightly modified version 2647 * of the cfi_varsize_frob() code. 2648 */ 2649 i = 0; 2650 chipnum = 0; 2651 adr = 0; 2652 sectors = 0; 2653 offset = 0; 2654 length = mtd->size; 2655 2656 while (length) { 2657 int size = regions[i].erasesize; 2658 2659 /* 2660 * Only test sectors that shall not be unlocked. The other 2661 * sectors shall be unlocked, so lets keep their locking 2662 * status at "unlocked" (locked=0) for the final re-locking. 2663 */ 2664 if ((adr < ofs) || (adr >= (ofs + len))) { 2665 sect[sectors].chip = &cfi->chips[chipnum]; 2666 sect[sectors].offset = offset; 2667 sect[sectors].locked = do_ppb_xxlock( 2668 map, &cfi->chips[chipnum], adr, 0, 2669 DO_XXLOCK_ONEBLOCK_GETLOCK); 2670 } 2671 2672 adr += size; 2673 offset += size; 2674 length -= size; 2675 2676 if (offset == regions[i].offset + size * regions[i].numblocks) 2677 i++; 2678 2679 if (adr >> cfi->chipshift) { 2680 adr = 0; 2681 chipnum++; 2682 2683 if (chipnum >= cfi->numchips) 2684 break; 2685 } 2686 2687 sectors++; 2688 if (sectors >= MAX_SECTORS) { 2689 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2690 MAX_SECTORS); 2691 kfree(sect); 2692 return -EINVAL; 2693 } 2694 } 2695 2696 /* Now unlock the whole chip */ 2697 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2698 DO_XXLOCK_ONEBLOCK_UNLOCK); 2699 if (ret) { 2700 kfree(sect); 2701 return ret; 2702 } 2703 2704 /* 2705 * PPB unlocking always unlocks all sectors of the flash chip. 2706 * We need to re-lock all previously locked sectors. 2707 */ 2708 for (i = 0; i < sectors; i++) { 2709 if (sect[i].locked) 2710 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, 2711 DO_XXLOCK_ONEBLOCK_LOCK); 2712 } 2713 2714 kfree(sect); 2715 return ret; 2716 } 2717 2718 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, 2719 uint64_t len) 2720 { 2721 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2722 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; 2723 } 2724 2725 static void cfi_amdstd_sync (struct mtd_info *mtd) 2726 { 2727 struct map_info *map = mtd->priv; 2728 struct cfi_private *cfi = map->fldrv_priv; 2729 int i; 2730 struct flchip *chip; 2731 int ret = 0; 2732 DECLARE_WAITQUEUE(wait, current); 2733 2734 for (i=0; !ret && i<cfi->numchips; i++) { 2735 chip = &cfi->chips[i]; 2736 2737 retry: 2738 mutex_lock(&chip->mutex); 2739 2740 switch(chip->state) { 2741 case FL_READY: 2742 case FL_STATUS: 2743 case FL_CFI_QUERY: 2744 case FL_JEDEC_QUERY: 2745 chip->oldstate = chip->state; 2746 chip->state = FL_SYNCING; 2747 /* No need to wake_up() on this state change - 2748 * as the whole point is that nobody can do anything 2749 * with the chip now anyway. 2750 */ 2751 case FL_SYNCING: 2752 mutex_unlock(&chip->mutex); 2753 break; 2754 2755 default: 2756 /* Not an idle state */ 2757 set_current_state(TASK_UNINTERRUPTIBLE); 2758 add_wait_queue(&chip->wq, &wait); 2759 2760 mutex_unlock(&chip->mutex); 2761 2762 schedule(); 2763 2764 remove_wait_queue(&chip->wq, &wait); 2765 2766 goto retry; 2767 } 2768 } 2769 2770 /* Unlock the chips again */ 2771 2772 for (i--; i >=0; i--) { 2773 chip = &cfi->chips[i]; 2774 2775 mutex_lock(&chip->mutex); 2776 2777 if (chip->state == FL_SYNCING) { 2778 chip->state = chip->oldstate; 2779 wake_up(&chip->wq); 2780 } 2781 mutex_unlock(&chip->mutex); 2782 } 2783 } 2784 2785 2786 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2787 { 2788 struct map_info *map = mtd->priv; 2789 struct cfi_private *cfi = map->fldrv_priv; 2790 int i; 2791 struct flchip *chip; 2792 int ret = 0; 2793 2794 for (i=0; !ret && i<cfi->numchips; i++) { 2795 chip = &cfi->chips[i]; 2796 2797 mutex_lock(&chip->mutex); 2798 2799 switch(chip->state) { 2800 case FL_READY: 2801 case FL_STATUS: 2802 case FL_CFI_QUERY: 2803 case FL_JEDEC_QUERY: 2804 chip->oldstate = chip->state; 2805 chip->state = FL_PM_SUSPENDED; 2806 /* No need to wake_up() on this state change - 2807 * as the whole point is that nobody can do anything 2808 * with the chip now anyway. 2809 */ 2810 case FL_PM_SUSPENDED: 2811 break; 2812 2813 default: 2814 ret = -EAGAIN; 2815 break; 2816 } 2817 mutex_unlock(&chip->mutex); 2818 } 2819 2820 /* Unlock the chips again */ 2821 2822 if (ret) { 2823 for (i--; i >=0; i--) { 2824 chip = &cfi->chips[i]; 2825 2826 mutex_lock(&chip->mutex); 2827 2828 if (chip->state == FL_PM_SUSPENDED) { 2829 chip->state = chip->oldstate; 2830 wake_up(&chip->wq); 2831 } 2832 mutex_unlock(&chip->mutex); 2833 } 2834 } 2835 2836 return ret; 2837 } 2838 2839 2840 static void cfi_amdstd_resume(struct mtd_info *mtd) 2841 { 2842 struct map_info *map = mtd->priv; 2843 struct cfi_private *cfi = map->fldrv_priv; 2844 int i; 2845 struct flchip *chip; 2846 2847 for (i=0; i<cfi->numchips; i++) { 2848 2849 chip = &cfi->chips[i]; 2850 2851 mutex_lock(&chip->mutex); 2852 2853 if (chip->state == FL_PM_SUSPENDED) { 2854 chip->state = FL_READY; 2855 map_write(map, CMD(0xF0), chip->start); 2856 wake_up(&chip->wq); 2857 } 2858 else 2859 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 2860 2861 mutex_unlock(&chip->mutex); 2862 } 2863 } 2864 2865 2866 /* 2867 * Ensure that the flash device is put back into read array mode before 2868 * unloading the driver or rebooting. On some systems, rebooting while 2869 * the flash is in query/program/erase mode will prevent the CPU from 2870 * fetching the bootloader code, requiring a hard reset or power cycle. 2871 */ 2872 static int cfi_amdstd_reset(struct mtd_info *mtd) 2873 { 2874 struct map_info *map = mtd->priv; 2875 struct cfi_private *cfi = map->fldrv_priv; 2876 int i, ret; 2877 struct flchip *chip; 2878 2879 for (i = 0; i < cfi->numchips; i++) { 2880 2881 chip = &cfi->chips[i]; 2882 2883 mutex_lock(&chip->mutex); 2884 2885 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2886 if (!ret) { 2887 map_write(map, CMD(0xF0), chip->start); 2888 chip->state = FL_SHUTDOWN; 2889 put_chip(map, chip, chip->start); 2890 } 2891 2892 mutex_unlock(&chip->mutex); 2893 } 2894 2895 return 0; 2896 } 2897 2898 2899 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2900 void *v) 2901 { 2902 struct mtd_info *mtd; 2903 2904 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2905 cfi_amdstd_reset(mtd); 2906 return NOTIFY_DONE; 2907 } 2908 2909 2910 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2911 { 2912 struct map_info *map = mtd->priv; 2913 struct cfi_private *cfi = map->fldrv_priv; 2914 2915 cfi_amdstd_reset(mtd); 2916 unregister_reboot_notifier(&mtd->reboot_notifier); 2917 kfree(cfi->cmdset_priv); 2918 kfree(cfi->cfiq); 2919 kfree(cfi); 2920 kfree(mtd->eraseregions); 2921 } 2922 2923 MODULE_LICENSE("GPL"); 2924 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2925 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2926 MODULE_ALIAS("cfi_cmdset_0006"); 2927 MODULE_ALIAS("cfi_cmdset_0701"); 2928