1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17  *
18  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19  *
20  * This code is GPL
21  */
22 
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29 
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/of.h>
36 #include <linux/of_platform.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
41 
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
44 
45 #define MAX_RETRIES 3
46 
47 #define SST49LF004B		0x0060
48 #define SST49LF040B		0x0050
49 #define SST49LF008A		0x005a
50 #define AT49BV6416		0x00d6
51 
52 /*
53  * Status Register bit description. Used by flash devices that don't
54  * support DQ polling (e.g. HyperFlash)
55  */
56 #define CFI_SR_DRB		BIT(7)
57 #define CFI_SR_ESB		BIT(5)
58 #define CFI_SR_PSB		BIT(4)
59 #define CFI_SR_WBASB		BIT(3)
60 #define CFI_SR_SLSB		BIT(1)
61 
62 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
66 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_amdstd_sync (struct mtd_info *);
68 static int cfi_amdstd_suspend (struct mtd_info *);
69 static void cfi_amdstd_resume (struct mtd_info *);
70 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
71 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
72 					 size_t *, struct otp_info *);
73 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
74 					 size_t *, struct otp_info *);
75 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
77 					 size_t *, u_char *);
78 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
79 					 size_t *, u_char *);
80 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
81 					  size_t *, u_char *);
82 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
83 
84 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
85 				  size_t *retlen, const u_char *buf);
86 
87 static void cfi_amdstd_destroy(struct mtd_info *);
88 
89 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
90 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
91 
92 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
94 #include "fwh_lock.h"
95 
96 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
97 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
98 
99 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
100 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
101 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
102 
103 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
104 	.probe		= NULL, /* Not usable directly */
105 	.destroy	= cfi_amdstd_destroy,
106 	.name		= "cfi_cmdset_0002",
107 	.module		= THIS_MODULE
108 };
109 
110 /*
111  * Use status register to poll for Erase/write completion when DQ is not
112  * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
113  * CFI Primary Vendor-Specific Extended Query table 1.5
114  */
115 static int cfi_use_status_reg(struct cfi_private *cfi)
116 {
117 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
118 	u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
119 
120 	return extp->MinorVersion >= '5' &&
121 		(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
122 }
123 
124 static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
125 				 unsigned long adr)
126 {
127 	struct cfi_private *cfi = map->fldrv_priv;
128 	map_word status;
129 
130 	if (!cfi_use_status_reg(cfi))
131 		return;
132 
133 	cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
134 			 cfi->device_type, NULL);
135 	status = map_read(map, adr);
136 
137 	if (map_word_bitsset(map, status, CMD(0x3a))) {
138 		unsigned long chipstatus = MERGESTATUS(status);
139 
140 		if (chipstatus & CFI_SR_ESB)
141 			pr_err("%s erase operation failed, status %lx\n",
142 			       map->name, chipstatus);
143 		if (chipstatus & CFI_SR_PSB)
144 			pr_err("%s program operation failed, status %lx\n",
145 			       map->name, chipstatus);
146 		if (chipstatus & CFI_SR_WBASB)
147 			pr_err("%s buffer program command aborted, status %lx\n",
148 			       map->name, chipstatus);
149 		if (chipstatus & CFI_SR_SLSB)
150 			pr_err("%s sector write protected, status %lx\n",
151 			       map->name, chipstatus);
152 	}
153 }
154 
155 /* #define DEBUG_CFI_FEATURES */
156 
157 
158 #ifdef DEBUG_CFI_FEATURES
159 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
160 {
161 	const char* erase_suspend[3] = {
162 		"Not supported", "Read only", "Read/write"
163 	};
164 	const char* top_bottom[6] = {
165 		"No WP", "8x8KiB sectors at top & bottom, no WP",
166 		"Bottom boot", "Top boot",
167 		"Uniform, Bottom WP", "Uniform, Top WP"
168 	};
169 
170 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
171 	printk("  Address sensitive unlock: %s\n",
172 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
173 
174 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
175 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
176 	else
177 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
178 
179 	if (extp->BlkProt == 0)
180 		printk("  Block protection: Not supported\n");
181 	else
182 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
183 
184 
185 	printk("  Temporary block unprotect: %s\n",
186 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
187 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
188 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
189 	printk("  Burst mode: %s\n",
190 	       extp->BurstMode ? "Supported" : "Not supported");
191 	if (extp->PageMode == 0)
192 		printk("  Page mode: Not supported\n");
193 	else
194 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
195 
196 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
197 	       extp->VppMin >> 4, extp->VppMin & 0xf);
198 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
199 	       extp->VppMax >> 4, extp->VppMax & 0xf);
200 
201 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
202 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
203 	else
204 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
205 }
206 #endif
207 
208 #ifdef AMD_BOOTLOC_BUG
209 /* Wheee. Bring me the head of someone at AMD. */
210 static void fixup_amd_bootblock(struct mtd_info *mtd)
211 {
212 	struct map_info *map = mtd->priv;
213 	struct cfi_private *cfi = map->fldrv_priv;
214 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
215 	__u8 major = extp->MajorVersion;
216 	__u8 minor = extp->MinorVersion;
217 
218 	if (((major << 8) | minor) < 0x3131) {
219 		/* CFI version 1.0 => don't trust bootloc */
220 
221 		pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
222 			map->name, cfi->mfr, cfi->id);
223 
224 		/* AFAICS all 29LV400 with a bottom boot block have a device ID
225 		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
226 		 * These were badly detected as they have the 0x80 bit set
227 		 * so treat them as a special case.
228 		 */
229 		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
230 
231 			/* Macronix added CFI to their 2nd generation
232 			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
233 			 * Fujitsu, Spansion, EON, ESI and older Macronix)
234 			 * has CFI.
235 			 *
236 			 * Therefore also check the manufacturer.
237 			 * This reduces the risk of false detection due to
238 			 * the 8-bit device ID.
239 			 */
240 			(cfi->mfr == CFI_MFR_MACRONIX)) {
241 			pr_debug("%s: Macronix MX29LV400C with bottom boot block"
242 				" detected\n", map->name);
243 			extp->TopBottom = 2;	/* bottom boot */
244 		} else
245 		if (cfi->id & 0x80) {
246 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
247 			extp->TopBottom = 3;	/* top boot */
248 		} else {
249 			extp->TopBottom = 2;	/* bottom boot */
250 		}
251 
252 		pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
253 			" deduced %s from Device ID\n", map->name, major, minor,
254 			extp->TopBottom == 2 ? "bottom" : "top");
255 	}
256 }
257 #endif
258 
259 static void fixup_use_write_buffers(struct mtd_info *mtd)
260 {
261 	struct map_info *map = mtd->priv;
262 	struct cfi_private *cfi = map->fldrv_priv;
263 	if (cfi->cfiq->BufWriteTimeoutTyp) {
264 		pr_debug("Using buffer write method\n");
265 		mtd->_write = cfi_amdstd_write_buffers;
266 	}
267 }
268 
269 /* Atmel chips don't use the same PRI format as AMD chips */
270 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
271 {
272 	struct map_info *map = mtd->priv;
273 	struct cfi_private *cfi = map->fldrv_priv;
274 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
275 	struct cfi_pri_atmel atmel_pri;
276 
277 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
278 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
279 
280 	if (atmel_pri.Features & 0x02)
281 		extp->EraseSuspend = 2;
282 
283 	/* Some chips got it backwards... */
284 	if (cfi->id == AT49BV6416) {
285 		if (atmel_pri.BottomBoot)
286 			extp->TopBottom = 3;
287 		else
288 			extp->TopBottom = 2;
289 	} else {
290 		if (atmel_pri.BottomBoot)
291 			extp->TopBottom = 2;
292 		else
293 			extp->TopBottom = 3;
294 	}
295 
296 	/* burst write mode not supported */
297 	cfi->cfiq->BufWriteTimeoutTyp = 0;
298 	cfi->cfiq->BufWriteTimeoutMax = 0;
299 }
300 
301 static void fixup_use_secsi(struct mtd_info *mtd)
302 {
303 	/* Setup for chips with a secsi area */
304 	mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
305 	mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
306 }
307 
308 static void fixup_use_erase_chip(struct mtd_info *mtd)
309 {
310 	struct map_info *map = mtd->priv;
311 	struct cfi_private *cfi = map->fldrv_priv;
312 	if ((cfi->cfiq->NumEraseRegions == 1) &&
313 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
314 		mtd->_erase = cfi_amdstd_erase_chip;
315 	}
316 
317 }
318 
319 /*
320  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
321  * locked by default.
322  */
323 static void fixup_use_atmel_lock(struct mtd_info *mtd)
324 {
325 	mtd->_lock = cfi_atmel_lock;
326 	mtd->_unlock = cfi_atmel_unlock;
327 	mtd->flags |= MTD_POWERUP_LOCK;
328 }
329 
330 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
331 {
332 	struct map_info *map = mtd->priv;
333 	struct cfi_private *cfi = map->fldrv_priv;
334 
335 	/*
336 	 * These flashes report two separate eraseblock regions based on the
337 	 * sector_erase-size and block_erase-size, although they both operate on the
338 	 * same memory. This is not allowed according to CFI, so we just pick the
339 	 * sector_erase-size.
340 	 */
341 	cfi->cfiq->NumEraseRegions = 1;
342 }
343 
344 static void fixup_sst39vf(struct mtd_info *mtd)
345 {
346 	struct map_info *map = mtd->priv;
347 	struct cfi_private *cfi = map->fldrv_priv;
348 
349 	fixup_old_sst_eraseregion(mtd);
350 
351 	cfi->addr_unlock1 = 0x5555;
352 	cfi->addr_unlock2 = 0x2AAA;
353 }
354 
355 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
356 {
357 	struct map_info *map = mtd->priv;
358 	struct cfi_private *cfi = map->fldrv_priv;
359 
360 	fixup_old_sst_eraseregion(mtd);
361 
362 	cfi->addr_unlock1 = 0x555;
363 	cfi->addr_unlock2 = 0x2AA;
364 
365 	cfi->sector_erase_cmd = CMD(0x50);
366 }
367 
368 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
369 {
370 	struct map_info *map = mtd->priv;
371 	struct cfi_private *cfi = map->fldrv_priv;
372 
373 	fixup_sst39vf_rev_b(mtd);
374 
375 	/*
376 	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
377 	 * it should report a size of 8KBytes (0x0020*256).
378 	 */
379 	cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
380 	pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
381 		mtd->name);
382 }
383 
384 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
385 {
386 	struct map_info *map = mtd->priv;
387 	struct cfi_private *cfi = map->fldrv_priv;
388 
389 	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
390 		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
391 		pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
392 			mtd->name);
393 	}
394 }
395 
396 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
397 {
398 	struct map_info *map = mtd->priv;
399 	struct cfi_private *cfi = map->fldrv_priv;
400 
401 	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
402 		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
403 		pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
404 			mtd->name);
405 	}
406 }
407 
408 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
409 {
410 	struct map_info *map = mtd->priv;
411 	struct cfi_private *cfi = map->fldrv_priv;
412 
413 	/*
414 	 *  S29NS512P flash uses more than 8bits to report number of sectors,
415 	 * which is not permitted by CFI.
416 	 */
417 	cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
418 	pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
419 		mtd->name);
420 }
421 
422 /* Used to fix CFI-Tables of chips without Extended Query Tables */
423 static struct cfi_fixup cfi_nopri_fixup_table[] = {
424 	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
425 	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
426 	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
427 	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
428 	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
429 	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
430 	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
431 	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
432 	{ 0, 0, NULL }
433 };
434 
435 static struct cfi_fixup cfi_fixup_table[] = {
436 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
437 #ifdef AMD_BOOTLOC_BUG
438 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
439 	{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
440 	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
441 #endif
442 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
443 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
444 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
445 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
446 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
447 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
448 	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
449 	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
450 	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
451 	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
452 	{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
453 	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
454 	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
455 	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
456 	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
457 #if !FORCE_WORD_WRITE
458 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
459 #endif
460 	{ 0, 0, NULL }
461 };
462 static struct cfi_fixup jedec_fixup_table[] = {
463 	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
464 	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
465 	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
466 	{ 0, 0, NULL }
467 };
468 
469 static struct cfi_fixup fixup_table[] = {
470 	/* The CFI vendor ids and the JEDEC vendor IDs appear
471 	 * to be common.  It is like the devices id's are as
472 	 * well.  This table is to pick all cases where
473 	 * we know that is the case.
474 	 */
475 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
476 	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
477 	{ 0, 0, NULL }
478 };
479 
480 
481 static void cfi_fixup_major_minor(struct cfi_private *cfi,
482 				  struct cfi_pri_amdstd *extp)
483 {
484 	if (cfi->mfr == CFI_MFR_SAMSUNG) {
485 		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
486 		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
487 			/*
488 			 * Samsung K8P2815UQB and K8D6x16UxM chips
489 			 * report major=0 / minor=0.
490 			 * K8D3x16UxC chips report major=3 / minor=3.
491 			 */
492 			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
493 			       " Extended Query version to 1.%c\n",
494 			       extp->MinorVersion);
495 			extp->MajorVersion = '1';
496 		}
497 	}
498 
499 	/*
500 	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
501 	 */
502 	if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
503 		extp->MajorVersion = '1';
504 		extp->MinorVersion = '0';
505 	}
506 }
507 
508 static int is_m29ew(struct cfi_private *cfi)
509 {
510 	if (cfi->mfr == CFI_MFR_INTEL &&
511 	    ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
512 	     (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
513 		return 1;
514 	return 0;
515 }
516 
517 /*
518  * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
519  * Some revisions of the M29EW suffer from erase suspend hang ups. In
520  * particular, it can occur when the sequence
521  * Erase Confirm -> Suspend -> Program -> Resume
522  * causes a lockup due to internal timing issues. The consequence is that the
523  * erase cannot be resumed without inserting a dummy command after programming
524  * and prior to resuming. [...] The work-around is to issue a dummy write cycle
525  * that writes an F0 command code before the RESUME command.
526  */
527 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
528 					  unsigned long adr)
529 {
530 	struct cfi_private *cfi = map->fldrv_priv;
531 	/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
532 	if (is_m29ew(cfi))
533 		map_write(map, CMD(0xF0), adr);
534 }
535 
536 /*
537  * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
538  *
539  * Some revisions of the M29EW (for example, A1 and A2 step revisions)
540  * are affected by a problem that could cause a hang up when an ERASE SUSPEND
541  * command is issued after an ERASE RESUME operation without waiting for a
542  * minimum delay.  The result is that once the ERASE seems to be completed
543  * (no bits are toggling), the contents of the Flash memory block on which
544  * the erase was ongoing could be inconsistent with the expected values
545  * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
546  * values), causing a consequent failure of the ERASE operation.
547  * The occurrence of this issue could be high, especially when file system
548  * operations on the Flash are intensive.  As a result, it is recommended
549  * that a patch be applied.  Intensive file system operations can cause many
550  * calls to the garbage routine to free Flash space (also by erasing physical
551  * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
552  * commands can occur.  The problem disappears when a delay is inserted after
553  * the RESUME command by using the udelay() function available in Linux.
554  * The DELAY value must be tuned based on the customer's platform.
555  * The maximum value that fixes the problem in all cases is 500us.
556  * But, in our experience, a delay of 30 µs to 50 µs is sufficient
557  * in most cases.
558  * We have chosen 500µs because this latency is acceptable.
559  */
560 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
561 {
562 	/*
563 	 * Resolving the Delay After Resume Issue see Micron TN-13-07
564 	 * Worst case delay must be 500µs but 30-50µs should be ok as well
565 	 */
566 	if (is_m29ew(cfi))
567 		cfi_udelay(500);
568 }
569 
570 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
571 {
572 	struct cfi_private *cfi = map->fldrv_priv;
573 	struct device_node __maybe_unused *np = map->device_node;
574 	struct mtd_info *mtd;
575 	int i;
576 
577 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
578 	if (!mtd)
579 		return NULL;
580 	mtd->priv = map;
581 	mtd->type = MTD_NORFLASH;
582 
583 	/* Fill in the default mtd operations */
584 	mtd->_erase   = cfi_amdstd_erase_varsize;
585 	mtd->_write   = cfi_amdstd_write_words;
586 	mtd->_read    = cfi_amdstd_read;
587 	mtd->_sync    = cfi_amdstd_sync;
588 	mtd->_suspend = cfi_amdstd_suspend;
589 	mtd->_resume  = cfi_amdstd_resume;
590 	mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
591 	mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
592 	mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
593 	mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
594 	mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
595 	mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
596 	mtd->flags   = MTD_CAP_NORFLASH;
597 	mtd->name    = map->name;
598 	mtd->writesize = 1;
599 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
600 
601 	pr_debug("MTD %s(): write buffer size %d\n", __func__,
602 			mtd->writebufsize);
603 
604 	mtd->_panic_write = cfi_amdstd_panic_write;
605 	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
606 
607 	if (cfi->cfi_mode==CFI_MODE_CFI){
608 		unsigned char bootloc;
609 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
610 		struct cfi_pri_amdstd *extp;
611 
612 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
613 		if (extp) {
614 			/*
615 			 * It's a real CFI chip, not one for which the probe
616 			 * routine faked a CFI structure.
617 			 */
618 			cfi_fixup_major_minor(cfi, extp);
619 
620 			/*
621 			 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
622 			 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
623 			 *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
624 			 *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
625 			 *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
626 			 */
627 			if (extp->MajorVersion != '1' ||
628 			    (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
629 				printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
630 				       "version %c.%c (%#02x/%#02x).\n",
631 				       extp->MajorVersion, extp->MinorVersion,
632 				       extp->MajorVersion, extp->MinorVersion);
633 				kfree(extp);
634 				kfree(mtd);
635 				return NULL;
636 			}
637 
638 			printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
639 			       extp->MajorVersion, extp->MinorVersion);
640 
641 			/* Install our own private info structure */
642 			cfi->cmdset_priv = extp;
643 
644 			/* Apply cfi device specific fixups */
645 			cfi_fixup(mtd, cfi_fixup_table);
646 
647 #ifdef DEBUG_CFI_FEATURES
648 			/* Tell the user about it in lots of lovely detail */
649 			cfi_tell_features(extp);
650 #endif
651 
652 #ifdef CONFIG_OF
653 			if (np && of_property_read_bool(
654 				    np, "use-advanced-sector-protection")
655 			    && extp->BlkProtUnprot == 8) {
656 				printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
657 				mtd->_lock = cfi_ppb_lock;
658 				mtd->_unlock = cfi_ppb_unlock;
659 				mtd->_is_locked = cfi_ppb_is_locked;
660 			}
661 #endif
662 
663 			bootloc = extp->TopBottom;
664 			if ((bootloc < 2) || (bootloc > 5)) {
665 				printk(KERN_WARNING "%s: CFI contains unrecognised boot "
666 				       "bank location (%d). Assuming bottom.\n",
667 				       map->name, bootloc);
668 				bootloc = 2;
669 			}
670 
671 			if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
672 				printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
673 
674 				for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
675 					int j = (cfi->cfiq->NumEraseRegions-1)-i;
676 
677 					swap(cfi->cfiq->EraseRegionInfo[i],
678 					     cfi->cfiq->EraseRegionInfo[j]);
679 				}
680 			}
681 			/* Set the default CFI lock/unlock addresses */
682 			cfi->addr_unlock1 = 0x555;
683 			cfi->addr_unlock2 = 0x2aa;
684 		}
685 		cfi_fixup(mtd, cfi_nopri_fixup_table);
686 
687 		if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
688 			kfree(mtd);
689 			return NULL;
690 		}
691 
692 	} /* CFI mode */
693 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
694 		/* Apply jedec specific fixups */
695 		cfi_fixup(mtd, jedec_fixup_table);
696 	}
697 	/* Apply generic fixups */
698 	cfi_fixup(mtd, fixup_table);
699 
700 	for (i=0; i< cfi->numchips; i++) {
701 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
702 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
703 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
704 		/*
705 		 * First calculate the timeout max according to timeout field
706 		 * of struct cfi_ident that probed from chip's CFI aera, if
707 		 * available. Specify a minimum of 2000us, in case the CFI data
708 		 * is wrong.
709 		 */
710 		if (cfi->cfiq->BufWriteTimeoutTyp &&
711 		    cfi->cfiq->BufWriteTimeoutMax)
712 			cfi->chips[i].buffer_write_time_max =
713 				1 << (cfi->cfiq->BufWriteTimeoutTyp +
714 				      cfi->cfiq->BufWriteTimeoutMax);
715 		else
716 			cfi->chips[i].buffer_write_time_max = 0;
717 
718 		cfi->chips[i].buffer_write_time_max =
719 			max(cfi->chips[i].buffer_write_time_max, 2000);
720 
721 		cfi->chips[i].ref_point_counter = 0;
722 		init_waitqueue_head(&(cfi->chips[i].wq));
723 	}
724 
725 	map->fldrv = &cfi_amdstd_chipdrv;
726 
727 	return cfi_amdstd_setup(mtd);
728 }
729 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
730 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
731 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
732 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
733 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
734 
735 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
736 {
737 	struct map_info *map = mtd->priv;
738 	struct cfi_private *cfi = map->fldrv_priv;
739 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
740 	unsigned long offset = 0;
741 	int i,j;
742 
743 	printk(KERN_NOTICE "number of %s chips: %d\n",
744 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
745 	/* Select the correct geometry setup */
746 	mtd->size = devsize * cfi->numchips;
747 
748 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
749 	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
750 					  sizeof(struct mtd_erase_region_info),
751 					  GFP_KERNEL);
752 	if (!mtd->eraseregions)
753 		goto setup_err;
754 
755 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
756 		unsigned long ernum, ersize;
757 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
758 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
759 
760 		if (mtd->erasesize < ersize) {
761 			mtd->erasesize = ersize;
762 		}
763 		for (j=0; j<cfi->numchips; j++) {
764 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
765 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
766 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
767 		}
768 		offset += (ersize * ernum);
769 	}
770 	if (offset != devsize) {
771 		/* Argh */
772 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
773 		goto setup_err;
774 	}
775 
776 	__module_get(THIS_MODULE);
777 	register_reboot_notifier(&mtd->reboot_notifier);
778 	return mtd;
779 
780  setup_err:
781 	kfree(mtd->eraseregions);
782 	kfree(mtd);
783 	kfree(cfi->cmdset_priv);
784 	kfree(cfi->cfiq);
785 	return NULL;
786 }
787 
788 /*
789  * Return true if the chip is ready.
790  *
791  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
792  * non-suspended sector) and is indicated by no toggle bits toggling.
793  *
794  * Note that anything more complicated than checking if no bits are toggling
795  * (including checking DQ5 for an error status) is tricky to get working
796  * correctly and is therefore not done	(particularly with interleaved chips
797  * as each chip must be checked independently of the others).
798  */
799 static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
800 			       unsigned long addr)
801 {
802 	struct cfi_private *cfi = map->fldrv_priv;
803 	map_word d, t;
804 
805 	if (cfi_use_status_reg(cfi)) {
806 		map_word ready = CMD(CFI_SR_DRB);
807 		/*
808 		 * For chips that support status register, check device
809 		 * ready bit
810 		 */
811 		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
812 				 cfi->device_type, NULL);
813 		d = map_read(map, addr);
814 
815 		return map_word_andequal(map, d, ready, ready);
816 	}
817 
818 	d = map_read(map, addr);
819 	t = map_read(map, addr);
820 
821 	return map_word_equal(map, d, t);
822 }
823 
824 /*
825  * Return true if the chip is ready and has the correct value.
826  *
827  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
828  * non-suspended sector) and it is indicated by no bits toggling.
829  *
830  * Error are indicated by toggling bits or bits held with the wrong value,
831  * or with bits toggling.
832  *
833  * Note that anything more complicated than checking if no bits are toggling
834  * (including checking DQ5 for an error status) is tricky to get working
835  * correctly and is therefore not done	(particularly with interleaved chips
836  * as each chip must be checked independently of the others).
837  *
838  */
839 static int __xipram chip_good(struct map_info *map, struct flchip *chip,
840 			      unsigned long addr, map_word expected)
841 {
842 	struct cfi_private *cfi = map->fldrv_priv;
843 	map_word oldd, curd;
844 
845 	if (cfi_use_status_reg(cfi)) {
846 		map_word ready = CMD(CFI_SR_DRB);
847 		map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB);
848 		/*
849 		 * For chips that support status register, check device
850 		 * ready bit and Erase/Program status bit to know if
851 		 * operation succeeded.
852 		 */
853 		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
854 				 cfi->device_type, NULL);
855 		curd = map_read(map, addr);
856 
857 		if (map_word_andequal(map, curd, ready, ready))
858 			return !map_word_bitsset(map, curd, err);
859 
860 		return 0;
861 	}
862 
863 	oldd = map_read(map, addr);
864 	curd = map_read(map, addr);
865 
866 	return	map_word_equal(map, oldd, curd) &&
867 		map_word_equal(map, curd, expected);
868 }
869 
870 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
871 {
872 	DECLARE_WAITQUEUE(wait, current);
873 	struct cfi_private *cfi = map->fldrv_priv;
874 	unsigned long timeo;
875 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
876 
877  resettime:
878 	timeo = jiffies + HZ;
879  retry:
880 	switch (chip->state) {
881 
882 	case FL_STATUS:
883 		for (;;) {
884 			if (chip_ready(map, chip, adr))
885 				break;
886 
887 			if (time_after(jiffies, timeo)) {
888 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
889 				return -EIO;
890 			}
891 			mutex_unlock(&chip->mutex);
892 			cfi_udelay(1);
893 			mutex_lock(&chip->mutex);
894 			/* Someone else might have been playing with it. */
895 			goto retry;
896 		}
897 
898 	case FL_READY:
899 	case FL_CFI_QUERY:
900 	case FL_JEDEC_QUERY:
901 		return 0;
902 
903 	case FL_ERASING:
904 		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
905 		    !(mode == FL_READY || mode == FL_POINT ||
906 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
907 			goto sleep;
908 
909 		/* Do not allow suspend iff read/write to EB address */
910 		if ((adr & chip->in_progress_block_mask) ==
911 		    chip->in_progress_block_addr)
912 			goto sleep;
913 
914 		/* Erase suspend */
915 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
916 		 * commands when the erase algorithm isn't in progress. */
917 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
918 		chip->oldstate = FL_ERASING;
919 		chip->state = FL_ERASE_SUSPENDING;
920 		chip->erase_suspended = 1;
921 		for (;;) {
922 			if (chip_ready(map, chip, adr))
923 				break;
924 
925 			if (time_after(jiffies, timeo)) {
926 				/* Should have suspended the erase by now.
927 				 * Send an Erase-Resume command as either
928 				 * there was an error (so leave the erase
929 				 * routine to recover from it) or we trying to
930 				 * use the erase-in-progress sector. */
931 				put_chip(map, chip, adr);
932 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
933 				return -EIO;
934 			}
935 
936 			mutex_unlock(&chip->mutex);
937 			cfi_udelay(1);
938 			mutex_lock(&chip->mutex);
939 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
940 			   So we can just loop here. */
941 		}
942 		chip->state = FL_READY;
943 		return 0;
944 
945 	case FL_XIP_WHILE_ERASING:
946 		if (mode != FL_READY && mode != FL_POINT &&
947 		    (!cfip || !(cfip->EraseSuspend&2)))
948 			goto sleep;
949 		chip->oldstate = chip->state;
950 		chip->state = FL_READY;
951 		return 0;
952 
953 	case FL_SHUTDOWN:
954 		/* The machine is rebooting */
955 		return -EIO;
956 
957 	case FL_POINT:
958 		/* Only if there's no operation suspended... */
959 		if (mode == FL_READY && chip->oldstate == FL_READY)
960 			return 0;
961 		/* fall through */
962 
963 	default:
964 	sleep:
965 		set_current_state(TASK_UNINTERRUPTIBLE);
966 		add_wait_queue(&chip->wq, &wait);
967 		mutex_unlock(&chip->mutex);
968 		schedule();
969 		remove_wait_queue(&chip->wq, &wait);
970 		mutex_lock(&chip->mutex);
971 		goto resettime;
972 	}
973 }
974 
975 
976 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
977 {
978 	struct cfi_private *cfi = map->fldrv_priv;
979 
980 	switch(chip->oldstate) {
981 	case FL_ERASING:
982 		cfi_fixup_m29ew_erase_suspend(map,
983 			chip->in_progress_block_addr);
984 		map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
985 		cfi_fixup_m29ew_delay_after_resume(cfi);
986 		chip->oldstate = FL_READY;
987 		chip->state = FL_ERASING;
988 		break;
989 
990 	case FL_XIP_WHILE_ERASING:
991 		chip->state = chip->oldstate;
992 		chip->oldstate = FL_READY;
993 		break;
994 
995 	case FL_READY:
996 	case FL_STATUS:
997 		break;
998 	default:
999 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1000 	}
1001 	wake_up(&chip->wq);
1002 }
1003 
1004 #ifdef CONFIG_MTD_XIP
1005 
1006 /*
1007  * No interrupt what so ever can be serviced while the flash isn't in array
1008  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1009  * enclosing any code path where the flash is known not to be in array mode.
1010  * And within a XIP disabled code path, only functions marked with __xipram
1011  * may be called and nothing else (it's a good thing to inspect generated
1012  * assembly to make sure inline functions were actually inlined and that gcc
1013  * didn't emit calls to its own support functions). Also configuring MTD CFI
1014  * support to a single buswidth and a single interleave is also recommended.
1015  */
1016 
1017 static void xip_disable(struct map_info *map, struct flchip *chip,
1018 			unsigned long adr)
1019 {
1020 	/* TODO: chips with no XIP use should ignore and return */
1021 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1022 	local_irq_disable();
1023 }
1024 
1025 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1026 				unsigned long adr)
1027 {
1028 	struct cfi_private *cfi = map->fldrv_priv;
1029 
1030 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1031 		map_write(map, CMD(0xf0), adr);
1032 		chip->state = FL_READY;
1033 	}
1034 	(void) map_read(map, adr);
1035 	xip_iprefetch();
1036 	local_irq_enable();
1037 }
1038 
1039 /*
1040  * When a delay is required for the flash operation to complete, the
1041  * xip_udelay() function is polling for both the given timeout and pending
1042  * (but still masked) hardware interrupts.  Whenever there is an interrupt
1043  * pending then the flash erase operation is suspended, array mode restored
1044  * and interrupts unmasked.  Task scheduling might also happen at that
1045  * point.  The CPU eventually returns from the interrupt or the call to
1046  * schedule() and the suspended flash operation is resumed for the remaining
1047  * of the delay period.
1048  *
1049  * Warning: this function _will_ fool interrupt latency tracing tools.
1050  */
1051 
1052 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1053 				unsigned long adr, int usec)
1054 {
1055 	struct cfi_private *cfi = map->fldrv_priv;
1056 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1057 	map_word status, OK = CMD(0x80);
1058 	unsigned long suspended, start = xip_currtime();
1059 	flstate_t oldstate;
1060 
1061 	do {
1062 		cpu_relax();
1063 		if (xip_irqpending() && extp &&
1064 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1065 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1066 			/*
1067 			 * Let's suspend the erase operation when supported.
1068 			 * Note that we currently don't try to suspend
1069 			 * interleaved chips if there is already another
1070 			 * operation suspended (imagine what happens
1071 			 * when one chip was already done with the current
1072 			 * operation while another chip suspended it, then
1073 			 * we resume the whole thing at once).  Yes, it
1074 			 * can happen!
1075 			 */
1076 			map_write(map, CMD(0xb0), adr);
1077 			usec -= xip_elapsed_since(start);
1078 			suspended = xip_currtime();
1079 			do {
1080 				if (xip_elapsed_since(suspended) > 100000) {
1081 					/*
1082 					 * The chip doesn't want to suspend
1083 					 * after waiting for 100 msecs.
1084 					 * This is a critical error but there
1085 					 * is not much we can do here.
1086 					 */
1087 					return;
1088 				}
1089 				status = map_read(map, adr);
1090 			} while (!map_word_andequal(map, status, OK, OK));
1091 
1092 			/* Suspend succeeded */
1093 			oldstate = chip->state;
1094 			if (!map_word_bitsset(map, status, CMD(0x40)))
1095 				break;
1096 			chip->state = FL_XIP_WHILE_ERASING;
1097 			chip->erase_suspended = 1;
1098 			map_write(map, CMD(0xf0), adr);
1099 			(void) map_read(map, adr);
1100 			xip_iprefetch();
1101 			local_irq_enable();
1102 			mutex_unlock(&chip->mutex);
1103 			xip_iprefetch();
1104 			cond_resched();
1105 
1106 			/*
1107 			 * We're back.  However someone else might have
1108 			 * decided to go write to the chip if we are in
1109 			 * a suspended erase state.  If so let's wait
1110 			 * until it's done.
1111 			 */
1112 			mutex_lock(&chip->mutex);
1113 			while (chip->state != FL_XIP_WHILE_ERASING) {
1114 				DECLARE_WAITQUEUE(wait, current);
1115 				set_current_state(TASK_UNINTERRUPTIBLE);
1116 				add_wait_queue(&chip->wq, &wait);
1117 				mutex_unlock(&chip->mutex);
1118 				schedule();
1119 				remove_wait_queue(&chip->wq, &wait);
1120 				mutex_lock(&chip->mutex);
1121 			}
1122 			/* Disallow XIP again */
1123 			local_irq_disable();
1124 
1125 			/* Correct Erase Suspend Hangups for M29EW */
1126 			cfi_fixup_m29ew_erase_suspend(map, adr);
1127 			/* Resume the write or erase operation */
1128 			map_write(map, cfi->sector_erase_cmd, adr);
1129 			chip->state = oldstate;
1130 			start = xip_currtime();
1131 		} else if (usec >= 1000000/HZ) {
1132 			/*
1133 			 * Try to save on CPU power when waiting delay
1134 			 * is at least a system timer tick period.
1135 			 * No need to be extremely accurate here.
1136 			 */
1137 			xip_cpu_idle();
1138 		}
1139 		status = map_read(map, adr);
1140 	} while (!map_word_andequal(map, status, OK, OK)
1141 		 && xip_elapsed_since(start) < usec);
1142 }
1143 
1144 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1145 
1146 /*
1147  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1148  * the flash is actively programming or erasing since we have to poll for
1149  * the operation to complete anyway.  We can't do that in a generic way with
1150  * a XIP setup so do it before the actual flash operation in this case
1151  * and stub it out from INVALIDATE_CACHE_UDELAY.
1152  */
1153 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1154 	INVALIDATE_CACHED_RANGE(map, from, size)
1155 
1156 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1157 	UDELAY(map, chip, adr, usec)
1158 
1159 /*
1160  * Extra notes:
1161  *
1162  * Activating this XIP support changes the way the code works a bit.  For
1163  * example the code to suspend the current process when concurrent access
1164  * happens is never executed because xip_udelay() will always return with the
1165  * same chip state as it was entered with.  This is why there is no care for
1166  * the presence of add_wait_queue() or schedule() calls from within a couple
1167  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1168  * The queueing and scheduling are always happening within xip_udelay().
1169  *
1170  * Similarly, get_chip() and put_chip() just happen to always be executed
1171  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1172  * is in array mode, therefore never executing many cases therein and not
1173  * causing any problem with XIP.
1174  */
1175 
1176 #else
1177 
1178 #define xip_disable(map, chip, adr)
1179 #define xip_enable(map, chip, adr)
1180 #define XIP_INVAL_CACHED_RANGE(x...)
1181 
1182 #define UDELAY(map, chip, adr, usec)  \
1183 do {  \
1184 	mutex_unlock(&chip->mutex);  \
1185 	cfi_udelay(usec);  \
1186 	mutex_lock(&chip->mutex);  \
1187 } while (0)
1188 
1189 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1190 do {  \
1191 	mutex_unlock(&chip->mutex);  \
1192 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1193 	cfi_udelay(usec);  \
1194 	mutex_lock(&chip->mutex);  \
1195 } while (0)
1196 
1197 #endif
1198 
1199 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1200 {
1201 	unsigned long cmd_addr;
1202 	struct cfi_private *cfi = map->fldrv_priv;
1203 	int ret;
1204 
1205 	adr += chip->start;
1206 
1207 	/* Ensure cmd read/writes are aligned. */
1208 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1209 
1210 	mutex_lock(&chip->mutex);
1211 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1212 	if (ret) {
1213 		mutex_unlock(&chip->mutex);
1214 		return ret;
1215 	}
1216 
1217 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1218 		map_write(map, CMD(0xf0), cmd_addr);
1219 		chip->state = FL_READY;
1220 	}
1221 
1222 	map_copy_from(map, buf, adr, len);
1223 
1224 	put_chip(map, chip, cmd_addr);
1225 
1226 	mutex_unlock(&chip->mutex);
1227 	return 0;
1228 }
1229 
1230 
1231 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1232 {
1233 	struct map_info *map = mtd->priv;
1234 	struct cfi_private *cfi = map->fldrv_priv;
1235 	unsigned long ofs;
1236 	int chipnum;
1237 	int ret = 0;
1238 
1239 	/* ofs: offset within the first chip that the first read should start */
1240 	chipnum = (from >> cfi->chipshift);
1241 	ofs = from - (chipnum <<  cfi->chipshift);
1242 
1243 	while (len) {
1244 		unsigned long thislen;
1245 
1246 		if (chipnum >= cfi->numchips)
1247 			break;
1248 
1249 		if ((len + ofs -1) >> cfi->chipshift)
1250 			thislen = (1<<cfi->chipshift) - ofs;
1251 		else
1252 			thislen = len;
1253 
1254 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1255 		if (ret)
1256 			break;
1257 
1258 		*retlen += thislen;
1259 		len -= thislen;
1260 		buf += thislen;
1261 
1262 		ofs = 0;
1263 		chipnum++;
1264 	}
1265 	return ret;
1266 }
1267 
1268 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1269 			loff_t adr, size_t len, u_char *buf, size_t grouplen);
1270 
1271 static inline void otp_enter(struct map_info *map, struct flchip *chip,
1272 			     loff_t adr, size_t len)
1273 {
1274 	struct cfi_private *cfi = map->fldrv_priv;
1275 
1276 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1277 			 cfi->device_type, NULL);
1278 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1279 			 cfi->device_type, NULL);
1280 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1281 			 cfi->device_type, NULL);
1282 
1283 	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1284 }
1285 
1286 static inline void otp_exit(struct map_info *map, struct flchip *chip,
1287 			    loff_t adr, size_t len)
1288 {
1289 	struct cfi_private *cfi = map->fldrv_priv;
1290 
1291 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1292 			 cfi->device_type, NULL);
1293 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1294 			 cfi->device_type, NULL);
1295 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1296 			 cfi->device_type, NULL);
1297 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1298 			 cfi->device_type, NULL);
1299 
1300 	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1301 }
1302 
1303 static inline int do_read_secsi_onechip(struct map_info *map,
1304 					struct flchip *chip, loff_t adr,
1305 					size_t len, u_char *buf,
1306 					size_t grouplen)
1307 {
1308 	DECLARE_WAITQUEUE(wait, current);
1309 
1310  retry:
1311 	mutex_lock(&chip->mutex);
1312 
1313 	if (chip->state != FL_READY){
1314 		set_current_state(TASK_UNINTERRUPTIBLE);
1315 		add_wait_queue(&chip->wq, &wait);
1316 
1317 		mutex_unlock(&chip->mutex);
1318 
1319 		schedule();
1320 		remove_wait_queue(&chip->wq, &wait);
1321 
1322 		goto retry;
1323 	}
1324 
1325 	adr += chip->start;
1326 
1327 	chip->state = FL_READY;
1328 
1329 	otp_enter(map, chip, adr, len);
1330 	map_copy_from(map, buf, adr, len);
1331 	otp_exit(map, chip, adr, len);
1332 
1333 	wake_up(&chip->wq);
1334 	mutex_unlock(&chip->mutex);
1335 
1336 	return 0;
1337 }
1338 
1339 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1340 {
1341 	struct map_info *map = mtd->priv;
1342 	struct cfi_private *cfi = map->fldrv_priv;
1343 	unsigned long ofs;
1344 	int chipnum;
1345 	int ret = 0;
1346 
1347 	/* ofs: offset within the first chip that the first read should start */
1348 	/* 8 secsi bytes per chip */
1349 	chipnum=from>>3;
1350 	ofs=from & 7;
1351 
1352 	while (len) {
1353 		unsigned long thislen;
1354 
1355 		if (chipnum >= cfi->numchips)
1356 			break;
1357 
1358 		if ((len + ofs -1) >> 3)
1359 			thislen = (1<<3) - ofs;
1360 		else
1361 			thislen = len;
1362 
1363 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1364 					    thislen, buf, 0);
1365 		if (ret)
1366 			break;
1367 
1368 		*retlen += thislen;
1369 		len -= thislen;
1370 		buf += thislen;
1371 
1372 		ofs = 0;
1373 		chipnum++;
1374 	}
1375 	return ret;
1376 }
1377 
1378 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1379 				     unsigned long adr, map_word datum,
1380 				     int mode);
1381 
1382 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1383 			size_t len, u_char *buf, size_t grouplen)
1384 {
1385 	int ret;
1386 	while (len) {
1387 		unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1388 		int gap = adr - bus_ofs;
1389 		int n = min_t(int, len, map_bankwidth(map) - gap);
1390 		map_word datum = map_word_ff(map);
1391 
1392 		if (n != map_bankwidth(map)) {
1393 			/* partial write of a word, load old contents */
1394 			otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1395 			datum = map_read(map, bus_ofs);
1396 			otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1397 		}
1398 
1399 		datum = map_word_load_partial(map, datum, buf, gap, n);
1400 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1401 		if (ret)
1402 			return ret;
1403 
1404 		adr += n;
1405 		buf += n;
1406 		len -= n;
1407 	}
1408 
1409 	return 0;
1410 }
1411 
1412 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1413 		       size_t len, u_char *buf, size_t grouplen)
1414 {
1415 	struct cfi_private *cfi = map->fldrv_priv;
1416 	uint8_t lockreg;
1417 	unsigned long timeo;
1418 	int ret;
1419 
1420 	/* make sure area matches group boundaries */
1421 	if ((adr != 0) || (len != grouplen))
1422 		return -EINVAL;
1423 
1424 	mutex_lock(&chip->mutex);
1425 	ret = get_chip(map, chip, chip->start, FL_LOCKING);
1426 	if (ret) {
1427 		mutex_unlock(&chip->mutex);
1428 		return ret;
1429 	}
1430 	chip->state = FL_LOCKING;
1431 
1432 	/* Enter lock register command */
1433 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1434 			 cfi->device_type, NULL);
1435 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1436 			 cfi->device_type, NULL);
1437 	cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1438 			 cfi->device_type, NULL);
1439 
1440 	/* read lock register */
1441 	lockreg = cfi_read_query(map, 0);
1442 
1443 	/* set bit 0 to protect extended memory block */
1444 	lockreg &= ~0x01;
1445 
1446 	/* set bit 0 to protect extended memory block */
1447 	/* write lock register */
1448 	map_write(map, CMD(0xA0), chip->start);
1449 	map_write(map, CMD(lockreg), chip->start);
1450 
1451 	/* wait for chip to become ready */
1452 	timeo = jiffies + msecs_to_jiffies(2);
1453 	for (;;) {
1454 		if (chip_ready(map, chip, adr))
1455 			break;
1456 
1457 		if (time_after(jiffies, timeo)) {
1458 			pr_err("Waiting for chip to be ready timed out.\n");
1459 			ret = -EIO;
1460 			break;
1461 		}
1462 		UDELAY(map, chip, 0, 1);
1463 	}
1464 
1465 	/* exit protection commands */
1466 	map_write(map, CMD(0x90), chip->start);
1467 	map_write(map, CMD(0x00), chip->start);
1468 
1469 	chip->state = FL_READY;
1470 	put_chip(map, chip, chip->start);
1471 	mutex_unlock(&chip->mutex);
1472 
1473 	return ret;
1474 }
1475 
1476 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1477 			       size_t *retlen, u_char *buf,
1478 			       otp_op_t action, int user_regs)
1479 {
1480 	struct map_info *map = mtd->priv;
1481 	struct cfi_private *cfi = map->fldrv_priv;
1482 	int ofs_factor = cfi->interleave * cfi->device_type;
1483 	unsigned long base;
1484 	int chipnum;
1485 	struct flchip *chip;
1486 	uint8_t otp, lockreg;
1487 	int ret;
1488 
1489 	size_t user_size, factory_size, otpsize;
1490 	loff_t user_offset, factory_offset, otpoffset;
1491 	int user_locked = 0, otplocked;
1492 
1493 	*retlen = 0;
1494 
1495 	for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1496 		chip = &cfi->chips[chipnum];
1497 		factory_size = 0;
1498 		user_size = 0;
1499 
1500 		/* Micron M29EW family */
1501 		if (is_m29ew(cfi)) {
1502 			base = chip->start;
1503 
1504 			/* check whether secsi area is factory locked
1505 			   or user lockable */
1506 			mutex_lock(&chip->mutex);
1507 			ret = get_chip(map, chip, base, FL_CFI_QUERY);
1508 			if (ret) {
1509 				mutex_unlock(&chip->mutex);
1510 				return ret;
1511 			}
1512 			cfi_qry_mode_on(base, map, cfi);
1513 			otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1514 			cfi_qry_mode_off(base, map, cfi);
1515 			put_chip(map, chip, base);
1516 			mutex_unlock(&chip->mutex);
1517 
1518 			if (otp & 0x80) {
1519 				/* factory locked */
1520 				factory_offset = 0;
1521 				factory_size = 0x100;
1522 			} else {
1523 				/* customer lockable */
1524 				user_offset = 0;
1525 				user_size = 0x100;
1526 
1527 				mutex_lock(&chip->mutex);
1528 				ret = get_chip(map, chip, base, FL_LOCKING);
1529 				if (ret) {
1530 					mutex_unlock(&chip->mutex);
1531 					return ret;
1532 				}
1533 
1534 				/* Enter lock register command */
1535 				cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1536 						 chip->start, map, cfi,
1537 						 cfi->device_type, NULL);
1538 				cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1539 						 chip->start, map, cfi,
1540 						 cfi->device_type, NULL);
1541 				cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1542 						 chip->start, map, cfi,
1543 						 cfi->device_type, NULL);
1544 				/* read lock register */
1545 				lockreg = cfi_read_query(map, 0);
1546 				/* exit protection commands */
1547 				map_write(map, CMD(0x90), chip->start);
1548 				map_write(map, CMD(0x00), chip->start);
1549 				put_chip(map, chip, chip->start);
1550 				mutex_unlock(&chip->mutex);
1551 
1552 				user_locked = ((lockreg & 0x01) == 0x00);
1553 			}
1554 		}
1555 
1556 		otpsize = user_regs ? user_size : factory_size;
1557 		if (!otpsize)
1558 			continue;
1559 		otpoffset = user_regs ? user_offset : factory_offset;
1560 		otplocked = user_regs ? user_locked : 1;
1561 
1562 		if (!action) {
1563 			/* return otpinfo */
1564 			struct otp_info *otpinfo;
1565 			len -= sizeof(*otpinfo);
1566 			if (len <= 0)
1567 				return -ENOSPC;
1568 			otpinfo = (struct otp_info *)buf;
1569 			otpinfo->start = from;
1570 			otpinfo->length = otpsize;
1571 			otpinfo->locked = otplocked;
1572 			buf += sizeof(*otpinfo);
1573 			*retlen += sizeof(*otpinfo);
1574 			from += otpsize;
1575 		} else if ((from < otpsize) && (len > 0)) {
1576 			size_t size;
1577 			size = (len < otpsize - from) ? len : otpsize - from;
1578 			ret = action(map, chip, otpoffset + from, size, buf,
1579 				     otpsize);
1580 			if (ret < 0)
1581 				return ret;
1582 
1583 			buf += size;
1584 			len -= size;
1585 			*retlen += size;
1586 			from = 0;
1587 		} else {
1588 			from -= otpsize;
1589 		}
1590 	}
1591 	return 0;
1592 }
1593 
1594 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1595 					 size_t *retlen, struct otp_info *buf)
1596 {
1597 	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1598 				   NULL, 0);
1599 }
1600 
1601 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1602 					 size_t *retlen, struct otp_info *buf)
1603 {
1604 	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1605 				   NULL, 1);
1606 }
1607 
1608 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1609 					 size_t len, size_t *retlen,
1610 					 u_char *buf)
1611 {
1612 	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1613 				   buf, do_read_secsi_onechip, 0);
1614 }
1615 
1616 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1617 					 size_t len, size_t *retlen,
1618 					 u_char *buf)
1619 {
1620 	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1621 				   buf, do_read_secsi_onechip, 1);
1622 }
1623 
1624 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1625 					  size_t len, size_t *retlen,
1626 					  u_char *buf)
1627 {
1628 	return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
1629 				   do_otp_write, 1);
1630 }
1631 
1632 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1633 					 size_t len)
1634 {
1635 	size_t retlen;
1636 	return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1637 				   do_otp_lock, 1);
1638 }
1639 
1640 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1641 				     unsigned long adr, map_word datum,
1642 				     int mode)
1643 {
1644 	struct cfi_private *cfi = map->fldrv_priv;
1645 	unsigned long timeo = jiffies + HZ;
1646 	/*
1647 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1648 	 * have a max write time of a few hundreds usec). However, we should
1649 	 * use the maximum timeout value given by the chip at probe time
1650 	 * instead.  Unfortunately, struct flchip does have a field for
1651 	 * maximum timeout, only for typical which can be far too short
1652 	 * depending of the conditions.	 The ' + 1' is to avoid having a
1653 	 * timeout of 0 jiffies if HZ is smaller than 1000.
1654 	 */
1655 	unsigned long uWriteTimeout = (HZ / 1000) + 1;
1656 	int ret = 0;
1657 	map_word oldd;
1658 	int retry_cnt = 0;
1659 
1660 	adr += chip->start;
1661 
1662 	mutex_lock(&chip->mutex);
1663 	ret = get_chip(map, chip, adr, mode);
1664 	if (ret) {
1665 		mutex_unlock(&chip->mutex);
1666 		return ret;
1667 	}
1668 
1669 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1670 		 __func__, adr, datum.x[0]);
1671 
1672 	if (mode == FL_OTP_WRITE)
1673 		otp_enter(map, chip, adr, map_bankwidth(map));
1674 
1675 	/*
1676 	 * Check for a NOP for the case when the datum to write is already
1677 	 * present - it saves time and works around buggy chips that corrupt
1678 	 * data at other locations when 0xff is written to a location that
1679 	 * already contains 0xff.
1680 	 */
1681 	oldd = map_read(map, adr);
1682 	if (map_word_equal(map, oldd, datum)) {
1683 		pr_debug("MTD %s(): NOP\n",
1684 		       __func__);
1685 		goto op_done;
1686 	}
1687 
1688 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1689 	ENABLE_VPP(map);
1690 	xip_disable(map, chip, adr);
1691 
1692  retry:
1693 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1694 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1695 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1696 	map_write(map, datum, adr);
1697 	chip->state = mode;
1698 
1699 	INVALIDATE_CACHE_UDELAY(map, chip,
1700 				adr, map_bankwidth(map),
1701 				chip->word_write_time);
1702 
1703 	/* See comment above for timeout value. */
1704 	timeo = jiffies + uWriteTimeout;
1705 	for (;;) {
1706 		if (chip->state != mode) {
1707 			/* Someone's suspended the write. Sleep */
1708 			DECLARE_WAITQUEUE(wait, current);
1709 
1710 			set_current_state(TASK_UNINTERRUPTIBLE);
1711 			add_wait_queue(&chip->wq, &wait);
1712 			mutex_unlock(&chip->mutex);
1713 			schedule();
1714 			remove_wait_queue(&chip->wq, &wait);
1715 			timeo = jiffies + (HZ / 2); /* FIXME */
1716 			mutex_lock(&chip->mutex);
1717 			continue;
1718 		}
1719 
1720 		if (time_after(jiffies, timeo) &&
1721 		    !chip_ready(map, chip, adr)) {
1722 			xip_enable(map, chip, adr);
1723 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1724 			xip_disable(map, chip, adr);
1725 			break;
1726 		}
1727 
1728 		if (chip_ready(map, chip, adr))
1729 			break;
1730 
1731 		/* Latency issues. Drop the lock, wait a while and retry */
1732 		UDELAY(map, chip, adr, 1);
1733 	}
1734 	/* Did we succeed? */
1735 	if (!chip_good(map, chip, adr, datum)) {
1736 		/* reset on all failures. */
1737 		cfi_check_err_status(map, chip, adr);
1738 		map_write(map, CMD(0xF0), chip->start);
1739 		/* FIXME - should have reset delay before continuing */
1740 
1741 		if (++retry_cnt <= MAX_RETRIES)
1742 			goto retry;
1743 
1744 		ret = -EIO;
1745 	}
1746 	xip_enable(map, chip, adr);
1747  op_done:
1748 	if (mode == FL_OTP_WRITE)
1749 		otp_exit(map, chip, adr, map_bankwidth(map));
1750 	chip->state = FL_READY;
1751 	DISABLE_VPP(map);
1752 	put_chip(map, chip, adr);
1753 	mutex_unlock(&chip->mutex);
1754 
1755 	return ret;
1756 }
1757 
1758 
1759 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1760 				  size_t *retlen, const u_char *buf)
1761 {
1762 	struct map_info *map = mtd->priv;
1763 	struct cfi_private *cfi = map->fldrv_priv;
1764 	int ret = 0;
1765 	int chipnum;
1766 	unsigned long ofs, chipstart;
1767 	DECLARE_WAITQUEUE(wait, current);
1768 
1769 	chipnum = to >> cfi->chipshift;
1770 	ofs = to  - (chipnum << cfi->chipshift);
1771 	chipstart = cfi->chips[chipnum].start;
1772 
1773 	/* If it's not bus-aligned, do the first byte write */
1774 	if (ofs & (map_bankwidth(map)-1)) {
1775 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1776 		int i = ofs - bus_ofs;
1777 		int n = 0;
1778 		map_word tmp_buf;
1779 
1780  retry:
1781 		mutex_lock(&cfi->chips[chipnum].mutex);
1782 
1783 		if (cfi->chips[chipnum].state != FL_READY) {
1784 			set_current_state(TASK_UNINTERRUPTIBLE);
1785 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1786 
1787 			mutex_unlock(&cfi->chips[chipnum].mutex);
1788 
1789 			schedule();
1790 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1791 			goto retry;
1792 		}
1793 
1794 		/* Load 'tmp_buf' with old contents of flash */
1795 		tmp_buf = map_read(map, bus_ofs+chipstart);
1796 
1797 		mutex_unlock(&cfi->chips[chipnum].mutex);
1798 
1799 		/* Number of bytes to copy from buffer */
1800 		n = min_t(int, len, map_bankwidth(map)-i);
1801 
1802 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1803 
1804 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1805 				       bus_ofs, tmp_buf, FL_WRITING);
1806 		if (ret)
1807 			return ret;
1808 
1809 		ofs += n;
1810 		buf += n;
1811 		(*retlen) += n;
1812 		len -= n;
1813 
1814 		if (ofs >> cfi->chipshift) {
1815 			chipnum ++;
1816 			ofs = 0;
1817 			if (chipnum == cfi->numchips)
1818 				return 0;
1819 		}
1820 	}
1821 
1822 	/* We are now aligned, write as much as possible */
1823 	while(len >= map_bankwidth(map)) {
1824 		map_word datum;
1825 
1826 		datum = map_word_load(map, buf);
1827 
1828 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1829 				       ofs, datum, FL_WRITING);
1830 		if (ret)
1831 			return ret;
1832 
1833 		ofs += map_bankwidth(map);
1834 		buf += map_bankwidth(map);
1835 		(*retlen) += map_bankwidth(map);
1836 		len -= map_bankwidth(map);
1837 
1838 		if (ofs >> cfi->chipshift) {
1839 			chipnum ++;
1840 			ofs = 0;
1841 			if (chipnum == cfi->numchips)
1842 				return 0;
1843 			chipstart = cfi->chips[chipnum].start;
1844 		}
1845 	}
1846 
1847 	/* Write the trailing bytes if any */
1848 	if (len & (map_bankwidth(map)-1)) {
1849 		map_word tmp_buf;
1850 
1851  retry1:
1852 		mutex_lock(&cfi->chips[chipnum].mutex);
1853 
1854 		if (cfi->chips[chipnum].state != FL_READY) {
1855 			set_current_state(TASK_UNINTERRUPTIBLE);
1856 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1857 
1858 			mutex_unlock(&cfi->chips[chipnum].mutex);
1859 
1860 			schedule();
1861 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1862 			goto retry1;
1863 		}
1864 
1865 		tmp_buf = map_read(map, ofs + chipstart);
1866 
1867 		mutex_unlock(&cfi->chips[chipnum].mutex);
1868 
1869 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1870 
1871 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1872 				       ofs, tmp_buf, FL_WRITING);
1873 		if (ret)
1874 			return ret;
1875 
1876 		(*retlen) += len;
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 
1883 /*
1884  * FIXME: interleaved mode not tested, and probably not supported!
1885  */
1886 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1887 				    unsigned long adr, const u_char *buf,
1888 				    int len)
1889 {
1890 	struct cfi_private *cfi = map->fldrv_priv;
1891 	unsigned long timeo = jiffies + HZ;
1892 	/*
1893 	 * Timeout is calculated according to CFI data, if available.
1894 	 * See more comments in cfi_cmdset_0002().
1895 	 */
1896 	unsigned long uWriteTimeout =
1897 				usecs_to_jiffies(chip->buffer_write_time_max);
1898 	int ret = -EIO;
1899 	unsigned long cmd_adr;
1900 	int z, words;
1901 	map_word datum;
1902 
1903 	adr += chip->start;
1904 	cmd_adr = adr;
1905 
1906 	mutex_lock(&chip->mutex);
1907 	ret = get_chip(map, chip, adr, FL_WRITING);
1908 	if (ret) {
1909 		mutex_unlock(&chip->mutex);
1910 		return ret;
1911 	}
1912 
1913 	datum = map_word_load(map, buf);
1914 
1915 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1916 		 __func__, adr, datum.x[0]);
1917 
1918 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1919 	ENABLE_VPP(map);
1920 	xip_disable(map, chip, cmd_adr);
1921 
1922 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1923 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1924 
1925 	/* Write Buffer Load */
1926 	map_write(map, CMD(0x25), cmd_adr);
1927 
1928 	chip->state = FL_WRITING_TO_BUFFER;
1929 
1930 	/* Write length of data to come */
1931 	words = len / map_bankwidth(map);
1932 	map_write(map, CMD(words - 1), cmd_adr);
1933 	/* Write data */
1934 	z = 0;
1935 	while(z < words * map_bankwidth(map)) {
1936 		datum = map_word_load(map, buf);
1937 		map_write(map, datum, adr + z);
1938 
1939 		z += map_bankwidth(map);
1940 		buf += map_bankwidth(map);
1941 	}
1942 	z -= map_bankwidth(map);
1943 
1944 	adr += z;
1945 
1946 	/* Write Buffer Program Confirm: GO GO GO */
1947 	map_write(map, CMD(0x29), cmd_adr);
1948 	chip->state = FL_WRITING;
1949 
1950 	INVALIDATE_CACHE_UDELAY(map, chip,
1951 				adr, map_bankwidth(map),
1952 				chip->word_write_time);
1953 
1954 	timeo = jiffies + uWriteTimeout;
1955 
1956 	for (;;) {
1957 		if (chip->state != FL_WRITING) {
1958 			/* Someone's suspended the write. Sleep */
1959 			DECLARE_WAITQUEUE(wait, current);
1960 
1961 			set_current_state(TASK_UNINTERRUPTIBLE);
1962 			add_wait_queue(&chip->wq, &wait);
1963 			mutex_unlock(&chip->mutex);
1964 			schedule();
1965 			remove_wait_queue(&chip->wq, &wait);
1966 			timeo = jiffies + (HZ / 2); /* FIXME */
1967 			mutex_lock(&chip->mutex);
1968 			continue;
1969 		}
1970 
1971 		/*
1972 		 * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
1973 		 * the failure due to scheduling.
1974 		 */
1975 		if (time_after(jiffies, timeo) &&
1976 		    !chip_good(map, chip, adr, datum))
1977 			break;
1978 
1979 		if (chip_good(map, chip, adr, datum)) {
1980 			xip_enable(map, chip, adr);
1981 			goto op_done;
1982 		}
1983 
1984 		/* Latency issues. Drop the lock, wait a while and retry */
1985 		UDELAY(map, chip, adr, 1);
1986 	}
1987 
1988 	/*
1989 	 * Recovery from write-buffer programming failures requires
1990 	 * the write-to-buffer-reset sequence.  Since the last part
1991 	 * of the sequence also works as a normal reset, we can run
1992 	 * the same commands regardless of why we are here.
1993 	 * See e.g.
1994 	 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1995 	 */
1996 	cfi_check_err_status(map, chip, adr);
1997 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1998 			 cfi->device_type, NULL);
1999 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2000 			 cfi->device_type, NULL);
2001 	cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2002 			 cfi->device_type, NULL);
2003 	xip_enable(map, chip, adr);
2004 	/* FIXME - should have reset delay before continuing */
2005 
2006 	printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
2007 	       __func__, adr);
2008 
2009 	ret = -EIO;
2010  op_done:
2011 	chip->state = FL_READY;
2012 	DISABLE_VPP(map);
2013 	put_chip(map, chip, adr);
2014 	mutex_unlock(&chip->mutex);
2015 
2016 	return ret;
2017 }
2018 
2019 
2020 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2021 				    size_t *retlen, const u_char *buf)
2022 {
2023 	struct map_info *map = mtd->priv;
2024 	struct cfi_private *cfi = map->fldrv_priv;
2025 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2026 	int ret = 0;
2027 	int chipnum;
2028 	unsigned long ofs;
2029 
2030 	chipnum = to >> cfi->chipshift;
2031 	ofs = to  - (chipnum << cfi->chipshift);
2032 
2033 	/* If it's not bus-aligned, do the first word write */
2034 	if (ofs & (map_bankwidth(map)-1)) {
2035 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2036 		if (local_len > len)
2037 			local_len = len;
2038 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2039 					     local_len, retlen, buf);
2040 		if (ret)
2041 			return ret;
2042 		ofs += local_len;
2043 		buf += local_len;
2044 		len -= local_len;
2045 
2046 		if (ofs >> cfi->chipshift) {
2047 			chipnum ++;
2048 			ofs = 0;
2049 			if (chipnum == cfi->numchips)
2050 				return 0;
2051 		}
2052 	}
2053 
2054 	/* Write buffer is worth it only if more than one word to write... */
2055 	while (len >= map_bankwidth(map) * 2) {
2056 		/* We must not cross write block boundaries */
2057 		int size = wbufsize - (ofs & (wbufsize-1));
2058 
2059 		if (size > len)
2060 			size = len;
2061 		if (size % map_bankwidth(map))
2062 			size -= size % map_bankwidth(map);
2063 
2064 		ret = do_write_buffer(map, &cfi->chips[chipnum],
2065 				      ofs, buf, size);
2066 		if (ret)
2067 			return ret;
2068 
2069 		ofs += size;
2070 		buf += size;
2071 		(*retlen) += size;
2072 		len -= size;
2073 
2074 		if (ofs >> cfi->chipshift) {
2075 			chipnum ++;
2076 			ofs = 0;
2077 			if (chipnum == cfi->numchips)
2078 				return 0;
2079 		}
2080 	}
2081 
2082 	if (len) {
2083 		size_t retlen_dregs = 0;
2084 
2085 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2086 					     len, &retlen_dregs, buf);
2087 
2088 		*retlen += retlen_dregs;
2089 		return ret;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 /*
2096  * Wait for the flash chip to become ready to write data
2097  *
2098  * This is only called during the panic_write() path. When panic_write()
2099  * is called, the kernel is in the process of a panic, and will soon be
2100  * dead. Therefore we don't take any locks, and attempt to get access
2101  * to the chip as soon as possible.
2102  */
2103 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2104 				 unsigned long adr)
2105 {
2106 	struct cfi_private *cfi = map->fldrv_priv;
2107 	int retries = 10;
2108 	int i;
2109 
2110 	/*
2111 	 * If the driver thinks the chip is idle, and no toggle bits
2112 	 * are changing, then the chip is actually idle for sure.
2113 	 */
2114 	if (chip->state == FL_READY && chip_ready(map, chip, adr))
2115 		return 0;
2116 
2117 	/*
2118 	 * Try several times to reset the chip and then wait for it
2119 	 * to become idle. The upper limit of a few milliseconds of
2120 	 * delay isn't a big problem: the kernel is dying anyway. It
2121 	 * is more important to save the messages.
2122 	 */
2123 	while (retries > 0) {
2124 		const unsigned long timeo = (HZ / 1000) + 1;
2125 
2126 		/* send the reset command */
2127 		map_write(map, CMD(0xF0), chip->start);
2128 
2129 		/* wait for the chip to become ready */
2130 		for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2131 			if (chip_ready(map, chip, adr))
2132 				return 0;
2133 
2134 			udelay(1);
2135 		}
2136 
2137 		retries--;
2138 	}
2139 
2140 	/* the chip never became ready */
2141 	return -EBUSY;
2142 }
2143 
2144 /*
2145  * Write out one word of data to a single flash chip during a kernel panic
2146  *
2147  * This is only called during the panic_write() path. When panic_write()
2148  * is called, the kernel is in the process of a panic, and will soon be
2149  * dead. Therefore we don't take any locks, and attempt to get access
2150  * to the chip as soon as possible.
2151  *
2152  * The implementation of this routine is intentionally similar to
2153  * do_write_oneword(), in order to ease code maintenance.
2154  */
2155 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2156 				  unsigned long adr, map_word datum)
2157 {
2158 	const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2159 	struct cfi_private *cfi = map->fldrv_priv;
2160 	int retry_cnt = 0;
2161 	map_word oldd;
2162 	int ret = 0;
2163 	int i;
2164 
2165 	adr += chip->start;
2166 
2167 	ret = cfi_amdstd_panic_wait(map, chip, adr);
2168 	if (ret)
2169 		return ret;
2170 
2171 	pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2172 			__func__, adr, datum.x[0]);
2173 
2174 	/*
2175 	 * Check for a NOP for the case when the datum to write is already
2176 	 * present - it saves time and works around buggy chips that corrupt
2177 	 * data at other locations when 0xff is written to a location that
2178 	 * already contains 0xff.
2179 	 */
2180 	oldd = map_read(map, adr);
2181 	if (map_word_equal(map, oldd, datum)) {
2182 		pr_debug("MTD %s(): NOP\n", __func__);
2183 		goto op_done;
2184 	}
2185 
2186 	ENABLE_VPP(map);
2187 
2188 retry:
2189 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2190 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2191 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2192 	map_write(map, datum, adr);
2193 
2194 	for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2195 		if (chip_ready(map, chip, adr))
2196 			break;
2197 
2198 		udelay(1);
2199 	}
2200 
2201 	if (!chip_good(map, chip, adr, datum)) {
2202 		/* reset on all failures. */
2203 		cfi_check_err_status(map, chip, adr);
2204 		map_write(map, CMD(0xF0), chip->start);
2205 		/* FIXME - should have reset delay before continuing */
2206 
2207 		if (++retry_cnt <= MAX_RETRIES)
2208 			goto retry;
2209 
2210 		ret = -EIO;
2211 	}
2212 
2213 op_done:
2214 	DISABLE_VPP(map);
2215 	return ret;
2216 }
2217 
2218 /*
2219  * Write out some data during a kernel panic
2220  *
2221  * This is used by the mtdoops driver to save the dying messages from a
2222  * kernel which has panic'd.
2223  *
2224  * This routine ignores all of the locking used throughout the rest of the
2225  * driver, in order to ensure that the data gets written out no matter what
2226  * state this driver (and the flash chip itself) was in when the kernel crashed.
2227  *
2228  * The implementation of this routine is intentionally similar to
2229  * cfi_amdstd_write_words(), in order to ease code maintenance.
2230  */
2231 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2232 				  size_t *retlen, const u_char *buf)
2233 {
2234 	struct map_info *map = mtd->priv;
2235 	struct cfi_private *cfi = map->fldrv_priv;
2236 	unsigned long ofs, chipstart;
2237 	int ret = 0;
2238 	int chipnum;
2239 
2240 	chipnum = to >> cfi->chipshift;
2241 	ofs = to - (chipnum << cfi->chipshift);
2242 	chipstart = cfi->chips[chipnum].start;
2243 
2244 	/* If it's not bus aligned, do the first byte write */
2245 	if (ofs & (map_bankwidth(map) - 1)) {
2246 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2247 		int i = ofs - bus_ofs;
2248 		int n = 0;
2249 		map_word tmp_buf;
2250 
2251 		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2252 		if (ret)
2253 			return ret;
2254 
2255 		/* Load 'tmp_buf' with old contents of flash */
2256 		tmp_buf = map_read(map, bus_ofs + chipstart);
2257 
2258 		/* Number of bytes to copy from buffer */
2259 		n = min_t(int, len, map_bankwidth(map) - i);
2260 
2261 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2262 
2263 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2264 					     bus_ofs, tmp_buf);
2265 		if (ret)
2266 			return ret;
2267 
2268 		ofs += n;
2269 		buf += n;
2270 		(*retlen) += n;
2271 		len -= n;
2272 
2273 		if (ofs >> cfi->chipshift) {
2274 			chipnum++;
2275 			ofs = 0;
2276 			if (chipnum == cfi->numchips)
2277 				return 0;
2278 		}
2279 	}
2280 
2281 	/* We are now aligned, write as much as possible */
2282 	while (len >= map_bankwidth(map)) {
2283 		map_word datum;
2284 
2285 		datum = map_word_load(map, buf);
2286 
2287 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2288 					     ofs, datum);
2289 		if (ret)
2290 			return ret;
2291 
2292 		ofs += map_bankwidth(map);
2293 		buf += map_bankwidth(map);
2294 		(*retlen) += map_bankwidth(map);
2295 		len -= map_bankwidth(map);
2296 
2297 		if (ofs >> cfi->chipshift) {
2298 			chipnum++;
2299 			ofs = 0;
2300 			if (chipnum == cfi->numchips)
2301 				return 0;
2302 
2303 			chipstart = cfi->chips[chipnum].start;
2304 		}
2305 	}
2306 
2307 	/* Write the trailing bytes if any */
2308 	if (len & (map_bankwidth(map) - 1)) {
2309 		map_word tmp_buf;
2310 
2311 		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2312 		if (ret)
2313 			return ret;
2314 
2315 		tmp_buf = map_read(map, ofs + chipstart);
2316 
2317 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2318 
2319 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2320 					     ofs, tmp_buf);
2321 		if (ret)
2322 			return ret;
2323 
2324 		(*retlen) += len;
2325 	}
2326 
2327 	return 0;
2328 }
2329 
2330 
2331 /*
2332  * Handle devices with one erase region, that only implement
2333  * the chip erase command.
2334  */
2335 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2336 {
2337 	struct cfi_private *cfi = map->fldrv_priv;
2338 	unsigned long timeo = jiffies + HZ;
2339 	unsigned long int adr;
2340 	DECLARE_WAITQUEUE(wait, current);
2341 	int ret = 0;
2342 	int retry_cnt = 0;
2343 
2344 	adr = cfi->addr_unlock1;
2345 
2346 	mutex_lock(&chip->mutex);
2347 	ret = get_chip(map, chip, adr, FL_WRITING);
2348 	if (ret) {
2349 		mutex_unlock(&chip->mutex);
2350 		return ret;
2351 	}
2352 
2353 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2354 	       __func__, chip->start);
2355 
2356 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2357 	ENABLE_VPP(map);
2358 	xip_disable(map, chip, adr);
2359 
2360  retry:
2361 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2362 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2363 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2364 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2365 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2366 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2367 
2368 	chip->state = FL_ERASING;
2369 	chip->erase_suspended = 0;
2370 	chip->in_progress_block_addr = adr;
2371 	chip->in_progress_block_mask = ~(map->size - 1);
2372 
2373 	INVALIDATE_CACHE_UDELAY(map, chip,
2374 				adr, map->size,
2375 				chip->erase_time*500);
2376 
2377 	timeo = jiffies + (HZ*20);
2378 
2379 	for (;;) {
2380 		if (chip->state != FL_ERASING) {
2381 			/* Someone's suspended the erase. Sleep */
2382 			set_current_state(TASK_UNINTERRUPTIBLE);
2383 			add_wait_queue(&chip->wq, &wait);
2384 			mutex_unlock(&chip->mutex);
2385 			schedule();
2386 			remove_wait_queue(&chip->wq, &wait);
2387 			mutex_lock(&chip->mutex);
2388 			continue;
2389 		}
2390 		if (chip->erase_suspended) {
2391 			/* This erase was suspended and resumed.
2392 			   Adjust the timeout */
2393 			timeo = jiffies + (HZ*20); /* FIXME */
2394 			chip->erase_suspended = 0;
2395 		}
2396 
2397 		if (chip_good(map, chip, adr, map_word_ff(map)))
2398 			break;
2399 
2400 		if (time_after(jiffies, timeo)) {
2401 			printk(KERN_WARNING "MTD %s(): software timeout\n",
2402 			       __func__);
2403 			ret = -EIO;
2404 			break;
2405 		}
2406 
2407 		/* Latency issues. Drop the lock, wait a while and retry */
2408 		UDELAY(map, chip, adr, 1000000/HZ);
2409 	}
2410 	/* Did we succeed? */
2411 	if (ret) {
2412 		/* reset on all failures. */
2413 		cfi_check_err_status(map, chip, adr);
2414 		map_write(map, CMD(0xF0), chip->start);
2415 		/* FIXME - should have reset delay before continuing */
2416 
2417 		if (++retry_cnt <= MAX_RETRIES) {
2418 			ret = 0;
2419 			goto retry;
2420 		}
2421 	}
2422 
2423 	chip->state = FL_READY;
2424 	xip_enable(map, chip, adr);
2425 	DISABLE_VPP(map);
2426 	put_chip(map, chip, adr);
2427 	mutex_unlock(&chip->mutex);
2428 
2429 	return ret;
2430 }
2431 
2432 
2433 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2434 {
2435 	struct cfi_private *cfi = map->fldrv_priv;
2436 	unsigned long timeo = jiffies + HZ;
2437 	DECLARE_WAITQUEUE(wait, current);
2438 	int ret = 0;
2439 	int retry_cnt = 0;
2440 
2441 	adr += chip->start;
2442 
2443 	mutex_lock(&chip->mutex);
2444 	ret = get_chip(map, chip, adr, FL_ERASING);
2445 	if (ret) {
2446 		mutex_unlock(&chip->mutex);
2447 		return ret;
2448 	}
2449 
2450 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2451 		 __func__, adr);
2452 
2453 	XIP_INVAL_CACHED_RANGE(map, adr, len);
2454 	ENABLE_VPP(map);
2455 	xip_disable(map, chip, adr);
2456 
2457  retry:
2458 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2459 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2460 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2461 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2462 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2463 	map_write(map, cfi->sector_erase_cmd, adr);
2464 
2465 	chip->state = FL_ERASING;
2466 	chip->erase_suspended = 0;
2467 	chip->in_progress_block_addr = adr;
2468 	chip->in_progress_block_mask = ~(len - 1);
2469 
2470 	INVALIDATE_CACHE_UDELAY(map, chip,
2471 				adr, len,
2472 				chip->erase_time*500);
2473 
2474 	timeo = jiffies + (HZ*20);
2475 
2476 	for (;;) {
2477 		if (chip->state != FL_ERASING) {
2478 			/* Someone's suspended the erase. Sleep */
2479 			set_current_state(TASK_UNINTERRUPTIBLE);
2480 			add_wait_queue(&chip->wq, &wait);
2481 			mutex_unlock(&chip->mutex);
2482 			schedule();
2483 			remove_wait_queue(&chip->wq, &wait);
2484 			mutex_lock(&chip->mutex);
2485 			continue;
2486 		}
2487 		if (chip->erase_suspended) {
2488 			/* This erase was suspended and resumed.
2489 			   Adjust the timeout */
2490 			timeo = jiffies + (HZ*20); /* FIXME */
2491 			chip->erase_suspended = 0;
2492 		}
2493 
2494 		if (chip_good(map, chip, adr, map_word_ff(map)))
2495 			break;
2496 
2497 		if (time_after(jiffies, timeo)) {
2498 			printk(KERN_WARNING "MTD %s(): software timeout\n",
2499 			       __func__);
2500 			ret = -EIO;
2501 			break;
2502 		}
2503 
2504 		/* Latency issues. Drop the lock, wait a while and retry */
2505 		UDELAY(map, chip, adr, 1000000/HZ);
2506 	}
2507 	/* Did we succeed? */
2508 	if (ret) {
2509 		/* reset on all failures. */
2510 		cfi_check_err_status(map, chip, adr);
2511 		map_write(map, CMD(0xF0), chip->start);
2512 		/* FIXME - should have reset delay before continuing */
2513 
2514 		if (++retry_cnt <= MAX_RETRIES) {
2515 			ret = 0;
2516 			goto retry;
2517 		}
2518 	}
2519 
2520 	chip->state = FL_READY;
2521 	xip_enable(map, chip, adr);
2522 	DISABLE_VPP(map);
2523 	put_chip(map, chip, adr);
2524 	mutex_unlock(&chip->mutex);
2525 	return ret;
2526 }
2527 
2528 
2529 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2530 {
2531 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2532 				instr->len, NULL);
2533 }
2534 
2535 
2536 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2537 {
2538 	struct map_info *map = mtd->priv;
2539 	struct cfi_private *cfi = map->fldrv_priv;
2540 
2541 	if (instr->addr != 0)
2542 		return -EINVAL;
2543 
2544 	if (instr->len != mtd->size)
2545 		return -EINVAL;
2546 
2547 	return do_erase_chip(map, &cfi->chips[0]);
2548 }
2549 
2550 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2551 			 unsigned long adr, int len, void *thunk)
2552 {
2553 	struct cfi_private *cfi = map->fldrv_priv;
2554 	int ret;
2555 
2556 	mutex_lock(&chip->mutex);
2557 	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2558 	if (ret)
2559 		goto out_unlock;
2560 	chip->state = FL_LOCKING;
2561 
2562 	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2563 
2564 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2565 			 cfi->device_type, NULL);
2566 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2567 			 cfi->device_type, NULL);
2568 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2569 			 cfi->device_type, NULL);
2570 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2571 			 cfi->device_type, NULL);
2572 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2573 			 cfi->device_type, NULL);
2574 	map_write(map, CMD(0x40), chip->start + adr);
2575 
2576 	chip->state = FL_READY;
2577 	put_chip(map, chip, adr + chip->start);
2578 	ret = 0;
2579 
2580 out_unlock:
2581 	mutex_unlock(&chip->mutex);
2582 	return ret;
2583 }
2584 
2585 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2586 			   unsigned long adr, int len, void *thunk)
2587 {
2588 	struct cfi_private *cfi = map->fldrv_priv;
2589 	int ret;
2590 
2591 	mutex_lock(&chip->mutex);
2592 	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2593 	if (ret)
2594 		goto out_unlock;
2595 	chip->state = FL_UNLOCKING;
2596 
2597 	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2598 
2599 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2600 			 cfi->device_type, NULL);
2601 	map_write(map, CMD(0x70), adr);
2602 
2603 	chip->state = FL_READY;
2604 	put_chip(map, chip, adr + chip->start);
2605 	ret = 0;
2606 
2607 out_unlock:
2608 	mutex_unlock(&chip->mutex);
2609 	return ret;
2610 }
2611 
2612 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2613 {
2614 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2615 }
2616 
2617 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2618 {
2619 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2620 }
2621 
2622 /*
2623  * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2624  */
2625 
2626 struct ppb_lock {
2627 	struct flchip *chip;
2628 	unsigned long adr;
2629 	int locked;
2630 };
2631 
2632 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *)1)
2633 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *)2)
2634 #define DO_XXLOCK_ONEBLOCK_GETLOCK	((void *)3)
2635 
2636 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2637 					struct flchip *chip,
2638 					unsigned long adr, int len, void *thunk)
2639 {
2640 	struct cfi_private *cfi = map->fldrv_priv;
2641 	unsigned long timeo;
2642 	int ret;
2643 
2644 	adr += chip->start;
2645 	mutex_lock(&chip->mutex);
2646 	ret = get_chip(map, chip, adr, FL_LOCKING);
2647 	if (ret) {
2648 		mutex_unlock(&chip->mutex);
2649 		return ret;
2650 	}
2651 
2652 	pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2653 
2654 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2655 			 cfi->device_type, NULL);
2656 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2657 			 cfi->device_type, NULL);
2658 	/* PPB entry command */
2659 	cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2660 			 cfi->device_type, NULL);
2661 
2662 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2663 		chip->state = FL_LOCKING;
2664 		map_write(map, CMD(0xA0), adr);
2665 		map_write(map, CMD(0x00), adr);
2666 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2667 		/*
2668 		 * Unlocking of one specific sector is not supported, so we
2669 		 * have to unlock all sectors of this device instead
2670 		 */
2671 		chip->state = FL_UNLOCKING;
2672 		map_write(map, CMD(0x80), chip->start);
2673 		map_write(map, CMD(0x30), chip->start);
2674 	} else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2675 		chip->state = FL_JEDEC_QUERY;
2676 		/* Return locked status: 0->locked, 1->unlocked */
2677 		ret = !cfi_read_query(map, adr);
2678 	} else
2679 		BUG();
2680 
2681 	/*
2682 	 * Wait for some time as unlocking of all sectors takes quite long
2683 	 */
2684 	timeo = jiffies + msecs_to_jiffies(2000);	/* 2s max (un)locking */
2685 	for (;;) {
2686 		if (chip_ready(map, chip, adr))
2687 			break;
2688 
2689 		if (time_after(jiffies, timeo)) {
2690 			printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2691 			ret = -EIO;
2692 			break;
2693 		}
2694 
2695 		UDELAY(map, chip, adr, 1);
2696 	}
2697 
2698 	/* Exit BC commands */
2699 	map_write(map, CMD(0x90), chip->start);
2700 	map_write(map, CMD(0x00), chip->start);
2701 
2702 	chip->state = FL_READY;
2703 	put_chip(map, chip, adr);
2704 	mutex_unlock(&chip->mutex);
2705 
2706 	return ret;
2707 }
2708 
2709 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2710 				       uint64_t len)
2711 {
2712 	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2713 				DO_XXLOCK_ONEBLOCK_LOCK);
2714 }
2715 
2716 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2717 					 uint64_t len)
2718 {
2719 	struct mtd_erase_region_info *regions = mtd->eraseregions;
2720 	struct map_info *map = mtd->priv;
2721 	struct cfi_private *cfi = map->fldrv_priv;
2722 	struct ppb_lock *sect;
2723 	unsigned long adr;
2724 	loff_t offset;
2725 	uint64_t length;
2726 	int chipnum;
2727 	int i;
2728 	int sectors;
2729 	int ret;
2730 	int max_sectors;
2731 
2732 	/*
2733 	 * PPB unlocking always unlocks all sectors of the flash chip.
2734 	 * We need to re-lock all previously locked sectors. So lets
2735 	 * first check the locking status of all sectors and save
2736 	 * it for future use.
2737 	 */
2738 	max_sectors = 0;
2739 	for (i = 0; i < mtd->numeraseregions; i++)
2740 		max_sectors += regions[i].numblocks;
2741 
2742 	sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2743 	if (!sect)
2744 		return -ENOMEM;
2745 
2746 	/*
2747 	 * This code to walk all sectors is a slightly modified version
2748 	 * of the cfi_varsize_frob() code.
2749 	 */
2750 	i = 0;
2751 	chipnum = 0;
2752 	adr = 0;
2753 	sectors = 0;
2754 	offset = 0;
2755 	length = mtd->size;
2756 
2757 	while (length) {
2758 		int size = regions[i].erasesize;
2759 
2760 		/*
2761 		 * Only test sectors that shall not be unlocked. The other
2762 		 * sectors shall be unlocked, so lets keep their locking
2763 		 * status at "unlocked" (locked=0) for the final re-locking.
2764 		 */
2765 		if ((offset < ofs) || (offset >= (ofs + len))) {
2766 			sect[sectors].chip = &cfi->chips[chipnum];
2767 			sect[sectors].adr = adr;
2768 			sect[sectors].locked = do_ppb_xxlock(
2769 				map, &cfi->chips[chipnum], adr, 0,
2770 				DO_XXLOCK_ONEBLOCK_GETLOCK);
2771 		}
2772 
2773 		adr += size;
2774 		offset += size;
2775 		length -= size;
2776 
2777 		if (offset == regions[i].offset + size * regions[i].numblocks)
2778 			i++;
2779 
2780 		if (adr >> cfi->chipshift) {
2781 			if (offset >= (ofs + len))
2782 				break;
2783 			adr = 0;
2784 			chipnum++;
2785 
2786 			if (chipnum >= cfi->numchips)
2787 				break;
2788 		}
2789 
2790 		sectors++;
2791 		if (sectors >= max_sectors) {
2792 			printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2793 			       max_sectors);
2794 			kfree(sect);
2795 			return -EINVAL;
2796 		}
2797 	}
2798 
2799 	/* Now unlock the whole chip */
2800 	ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2801 			       DO_XXLOCK_ONEBLOCK_UNLOCK);
2802 	if (ret) {
2803 		kfree(sect);
2804 		return ret;
2805 	}
2806 
2807 	/*
2808 	 * PPB unlocking always unlocks all sectors of the flash chip.
2809 	 * We need to re-lock all previously locked sectors.
2810 	 */
2811 	for (i = 0; i < sectors; i++) {
2812 		if (sect[i].locked)
2813 			do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2814 				      DO_XXLOCK_ONEBLOCK_LOCK);
2815 	}
2816 
2817 	kfree(sect);
2818 	return ret;
2819 }
2820 
2821 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2822 					    uint64_t len)
2823 {
2824 	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2825 				DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2826 }
2827 
2828 static void cfi_amdstd_sync (struct mtd_info *mtd)
2829 {
2830 	struct map_info *map = mtd->priv;
2831 	struct cfi_private *cfi = map->fldrv_priv;
2832 	int i;
2833 	struct flchip *chip;
2834 	int ret = 0;
2835 	DECLARE_WAITQUEUE(wait, current);
2836 
2837 	for (i=0; !ret && i<cfi->numchips; i++) {
2838 		chip = &cfi->chips[i];
2839 
2840 	retry:
2841 		mutex_lock(&chip->mutex);
2842 
2843 		switch(chip->state) {
2844 		case FL_READY:
2845 		case FL_STATUS:
2846 		case FL_CFI_QUERY:
2847 		case FL_JEDEC_QUERY:
2848 			chip->oldstate = chip->state;
2849 			chip->state = FL_SYNCING;
2850 			/* No need to wake_up() on this state change -
2851 			 * as the whole point is that nobody can do anything
2852 			 * with the chip now anyway.
2853 			 */
2854 			/* fall through */
2855 		case FL_SYNCING:
2856 			mutex_unlock(&chip->mutex);
2857 			break;
2858 
2859 		default:
2860 			/* Not an idle state */
2861 			set_current_state(TASK_UNINTERRUPTIBLE);
2862 			add_wait_queue(&chip->wq, &wait);
2863 
2864 			mutex_unlock(&chip->mutex);
2865 
2866 			schedule();
2867 
2868 			remove_wait_queue(&chip->wq, &wait);
2869 
2870 			goto retry;
2871 		}
2872 	}
2873 
2874 	/* Unlock the chips again */
2875 
2876 	for (i--; i >=0; i--) {
2877 		chip = &cfi->chips[i];
2878 
2879 		mutex_lock(&chip->mutex);
2880 
2881 		if (chip->state == FL_SYNCING) {
2882 			chip->state = chip->oldstate;
2883 			wake_up(&chip->wq);
2884 		}
2885 		mutex_unlock(&chip->mutex);
2886 	}
2887 }
2888 
2889 
2890 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2891 {
2892 	struct map_info *map = mtd->priv;
2893 	struct cfi_private *cfi = map->fldrv_priv;
2894 	int i;
2895 	struct flchip *chip;
2896 	int ret = 0;
2897 
2898 	for (i=0; !ret && i<cfi->numchips; i++) {
2899 		chip = &cfi->chips[i];
2900 
2901 		mutex_lock(&chip->mutex);
2902 
2903 		switch(chip->state) {
2904 		case FL_READY:
2905 		case FL_STATUS:
2906 		case FL_CFI_QUERY:
2907 		case FL_JEDEC_QUERY:
2908 			chip->oldstate = chip->state;
2909 			chip->state = FL_PM_SUSPENDED;
2910 			/* No need to wake_up() on this state change -
2911 			 * as the whole point is that nobody can do anything
2912 			 * with the chip now anyway.
2913 			 */
2914 		case FL_PM_SUSPENDED:
2915 			break;
2916 
2917 		default:
2918 			ret = -EAGAIN;
2919 			break;
2920 		}
2921 		mutex_unlock(&chip->mutex);
2922 	}
2923 
2924 	/* Unlock the chips again */
2925 
2926 	if (ret) {
2927 		for (i--; i >=0; i--) {
2928 			chip = &cfi->chips[i];
2929 
2930 			mutex_lock(&chip->mutex);
2931 
2932 			if (chip->state == FL_PM_SUSPENDED) {
2933 				chip->state = chip->oldstate;
2934 				wake_up(&chip->wq);
2935 			}
2936 			mutex_unlock(&chip->mutex);
2937 		}
2938 	}
2939 
2940 	return ret;
2941 }
2942 
2943 
2944 static void cfi_amdstd_resume(struct mtd_info *mtd)
2945 {
2946 	struct map_info *map = mtd->priv;
2947 	struct cfi_private *cfi = map->fldrv_priv;
2948 	int i;
2949 	struct flchip *chip;
2950 
2951 	for (i=0; i<cfi->numchips; i++) {
2952 
2953 		chip = &cfi->chips[i];
2954 
2955 		mutex_lock(&chip->mutex);
2956 
2957 		if (chip->state == FL_PM_SUSPENDED) {
2958 			chip->state = FL_READY;
2959 			map_write(map, CMD(0xF0), chip->start);
2960 			wake_up(&chip->wq);
2961 		}
2962 		else
2963 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2964 
2965 		mutex_unlock(&chip->mutex);
2966 	}
2967 }
2968 
2969 
2970 /*
2971  * Ensure that the flash device is put back into read array mode before
2972  * unloading the driver or rebooting.  On some systems, rebooting while
2973  * the flash is in query/program/erase mode will prevent the CPU from
2974  * fetching the bootloader code, requiring a hard reset or power cycle.
2975  */
2976 static int cfi_amdstd_reset(struct mtd_info *mtd)
2977 {
2978 	struct map_info *map = mtd->priv;
2979 	struct cfi_private *cfi = map->fldrv_priv;
2980 	int i, ret;
2981 	struct flchip *chip;
2982 
2983 	for (i = 0; i < cfi->numchips; i++) {
2984 
2985 		chip = &cfi->chips[i];
2986 
2987 		mutex_lock(&chip->mutex);
2988 
2989 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2990 		if (!ret) {
2991 			map_write(map, CMD(0xF0), chip->start);
2992 			chip->state = FL_SHUTDOWN;
2993 			put_chip(map, chip, chip->start);
2994 		}
2995 
2996 		mutex_unlock(&chip->mutex);
2997 	}
2998 
2999 	return 0;
3000 }
3001 
3002 
3003 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3004 			       void *v)
3005 {
3006 	struct mtd_info *mtd;
3007 
3008 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
3009 	cfi_amdstd_reset(mtd);
3010 	return NOTIFY_DONE;
3011 }
3012 
3013 
3014 static void cfi_amdstd_destroy(struct mtd_info *mtd)
3015 {
3016 	struct map_info *map = mtd->priv;
3017 	struct cfi_private *cfi = map->fldrv_priv;
3018 
3019 	cfi_amdstd_reset(mtd);
3020 	unregister_reboot_notifier(&mtd->reboot_notifier);
3021 	kfree(cfi->cmdset_priv);
3022 	kfree(cfi->cfiq);
3023 	kfree(cfi);
3024 	kfree(mtd->eraseregions);
3025 }
3026 
3027 MODULE_LICENSE("GPL");
3028 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3029 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3030 MODULE_ALIAS("cfi_cmdset_0006");
3031 MODULE_ALIAS("cfi_cmdset_0701");
3032