1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17  *
18  * This code is GPL
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28 
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/compatmac.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37 #include <linux/mtd/xip.h>
38 
39 #define AMD_BOOTLOC_BUG
40 #define FORCE_WORD_WRITE 0
41 
42 #define MAX_WORD_RETRIES 3
43 
44 #define MANUFACTURER_AMD	0x0001
45 #define MANUFACTURER_ATMEL	0x001F
46 #define MANUFACTURER_SST	0x00BF
47 #define SST49LF004B	        0x0060
48 #define SST49LF040B	        0x0050
49 #define SST49LF008A		0x005a
50 #define AT49BV6416		0x00d6
51 
52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57 static void cfi_amdstd_sync (struct mtd_info *);
58 static int cfi_amdstd_suspend (struct mtd_info *);
59 static void cfi_amdstd_resume (struct mtd_info *);
60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 
62 static void cfi_amdstd_destroy(struct mtd_info *);
63 
64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
66 
67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
69 #include "fwh_lock.h"
70 
71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
73 
74 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
75 	.probe		= NULL, /* Not usable directly */
76 	.destroy	= cfi_amdstd_destroy,
77 	.name		= "cfi_cmdset_0002",
78 	.module		= THIS_MODULE
79 };
80 
81 
82 /* #define DEBUG_CFI_FEATURES */
83 
84 
85 #ifdef DEBUG_CFI_FEATURES
86 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
87 {
88 	const char* erase_suspend[3] = {
89 		"Not supported", "Read only", "Read/write"
90 	};
91 	const char* top_bottom[6] = {
92 		"No WP", "8x8KiB sectors at top & bottom, no WP",
93 		"Bottom boot", "Top boot",
94 		"Uniform, Bottom WP", "Uniform, Top WP"
95 	};
96 
97 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
98 	printk("  Address sensitive unlock: %s\n",
99 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
100 
101 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
102 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
103 	else
104 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
105 
106 	if (extp->BlkProt == 0)
107 		printk("  Block protection: Not supported\n");
108 	else
109 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
110 
111 
112 	printk("  Temporary block unprotect: %s\n",
113 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
114 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
115 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
116 	printk("  Burst mode: %s\n",
117 	       extp->BurstMode ? "Supported" : "Not supported");
118 	if (extp->PageMode == 0)
119 		printk("  Page mode: Not supported\n");
120 	else
121 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
122 
123 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
124 	       extp->VppMin >> 4, extp->VppMin & 0xf);
125 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
126 	       extp->VppMax >> 4, extp->VppMax & 0xf);
127 
128 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
129 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
130 	else
131 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
132 }
133 #endif
134 
135 #ifdef AMD_BOOTLOC_BUG
136 /* Wheee. Bring me the head of someone at AMD. */
137 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
138 {
139 	struct map_info *map = mtd->priv;
140 	struct cfi_private *cfi = map->fldrv_priv;
141 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
142 	__u8 major = extp->MajorVersion;
143 	__u8 minor = extp->MinorVersion;
144 
145 	if (((major << 8) | minor) < 0x3131) {
146 		/* CFI version 1.0 => don't trust bootloc */
147 		if (cfi->id & 0x80) {
148 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
149 			extp->TopBottom = 3;	/* top boot */
150 		} else {
151 			extp->TopBottom = 2;	/* bottom boot */
152 		}
153 	}
154 }
155 #endif
156 
157 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
158 {
159 	struct map_info *map = mtd->priv;
160 	struct cfi_private *cfi = map->fldrv_priv;
161 	if (cfi->cfiq->BufWriteTimeoutTyp) {
162 		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
163 		mtd->write = cfi_amdstd_write_buffers;
164 	}
165 }
166 
167 /* Atmel chips don't use the same PRI format as AMD chips */
168 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
169 {
170 	struct map_info *map = mtd->priv;
171 	struct cfi_private *cfi = map->fldrv_priv;
172 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
173 	struct cfi_pri_atmel atmel_pri;
174 
175 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
177 
178 	if (atmel_pri.Features & 0x02)
179 		extp->EraseSuspend = 2;
180 
181 	if (atmel_pri.BottomBoot)
182 		extp->TopBottom = 2;
183 	else
184 		extp->TopBottom = 3;
185 
186 	/* burst write mode not supported */
187 	cfi->cfiq->BufWriteTimeoutTyp = 0;
188 	cfi->cfiq->BufWriteTimeoutMax = 0;
189 }
190 
191 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
192 {
193 	/* Setup for chips with a secsi area */
194 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
195 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
196 }
197 
198 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
199 {
200 	struct map_info *map = mtd->priv;
201 	struct cfi_private *cfi = map->fldrv_priv;
202 	if ((cfi->cfiq->NumEraseRegions == 1) &&
203 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
204 		mtd->erase = cfi_amdstd_erase_chip;
205 	}
206 
207 }
208 
209 /*
210  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
211  * locked by default.
212  */
213 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
214 {
215 	mtd->lock = cfi_atmel_lock;
216 	mtd->unlock = cfi_atmel_unlock;
217 	mtd->flags |= MTD_POWERUP_LOCK;
218 }
219 
220 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
221 {
222 	struct map_info *map = mtd->priv;
223 	struct cfi_private *cfi = map->fldrv_priv;
224 
225 	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
226 		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
227 		pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
228 	}
229 }
230 
231 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
232 {
233 	struct map_info *map = mtd->priv;
234 	struct cfi_private *cfi = map->fldrv_priv;
235 
236 	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
237 		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
238 		pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
239 	}
240 }
241 
242 static struct cfi_fixup cfi_fixup_table[] = {
243 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
244 #ifdef AMD_BOOTLOC_BUG
245 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
246 #endif
247 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
248 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
249 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
250 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
251 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
252 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
253 	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
254 	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
255 	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
256 	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
257 #if !FORCE_WORD_WRITE
258 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
259 #endif
260 	{ 0, 0, NULL, NULL }
261 };
262 static struct cfi_fixup jedec_fixup_table[] = {
263 	{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
264 	{ MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
265 	{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
266 	{ 0, 0, NULL, NULL }
267 };
268 
269 static struct cfi_fixup fixup_table[] = {
270 	/* The CFI vendor ids and the JEDEC vendor IDs appear
271 	 * to be common.  It is like the devices id's are as
272 	 * well.  This table is to pick all cases where
273 	 * we know that is the case.
274 	 */
275 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
276 	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
277 	{ 0, 0, NULL, NULL }
278 };
279 
280 
281 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
282 {
283 	struct cfi_private *cfi = map->fldrv_priv;
284 	struct mtd_info *mtd;
285 	int i;
286 
287 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
288 	if (!mtd) {
289 		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
290 		return NULL;
291 	}
292 	mtd->priv = map;
293 	mtd->type = MTD_NORFLASH;
294 
295 	/* Fill in the default mtd operations */
296 	mtd->erase   = cfi_amdstd_erase_varsize;
297 	mtd->write   = cfi_amdstd_write_words;
298 	mtd->read    = cfi_amdstd_read;
299 	mtd->sync    = cfi_amdstd_sync;
300 	mtd->suspend = cfi_amdstd_suspend;
301 	mtd->resume  = cfi_amdstd_resume;
302 	mtd->flags   = MTD_CAP_NORFLASH;
303 	mtd->name    = map->name;
304 	mtd->writesize = 1;
305 
306 	if (cfi->cfi_mode==CFI_MODE_CFI){
307 		unsigned char bootloc;
308 		/*
309 		 * It's a real CFI chip, not one for which the probe
310 		 * routine faked a CFI structure. So we read the feature
311 		 * table from it.
312 		 */
313 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
314 		struct cfi_pri_amdstd *extp;
315 
316 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
317 		if (!extp) {
318 			kfree(mtd);
319 			return NULL;
320 		}
321 
322 		if (extp->MajorVersion != '1' ||
323 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
324 			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
325 			       "version %c.%c.\n",  extp->MajorVersion,
326 			       extp->MinorVersion);
327 			kfree(extp);
328 			kfree(mtd);
329 			return NULL;
330 		}
331 
332 		/* Install our own private info structure */
333 		cfi->cmdset_priv = extp;
334 
335 		/* Apply cfi device specific fixups */
336 		cfi_fixup(mtd, cfi_fixup_table);
337 
338 #ifdef DEBUG_CFI_FEATURES
339 		/* Tell the user about it in lots of lovely detail */
340 		cfi_tell_features(extp);
341 #endif
342 
343 		bootloc = extp->TopBottom;
344 		if ((bootloc != 2) && (bootloc != 3)) {
345 			printk(KERN_WARNING "%s: CFI does not contain boot "
346 			       "bank location. Assuming top.\n", map->name);
347 			bootloc = 2;
348 		}
349 
350 		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
351 			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
352 
353 			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
354 				int j = (cfi->cfiq->NumEraseRegions-1)-i;
355 				__u32 swap;
356 
357 				swap = cfi->cfiq->EraseRegionInfo[i];
358 				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
359 				cfi->cfiq->EraseRegionInfo[j] = swap;
360 			}
361 		}
362 		/* Set the default CFI lock/unlock addresses */
363 		cfi->addr_unlock1 = 0x555;
364 		cfi->addr_unlock2 = 0x2aa;
365 		/* Modify the unlock address if we are in compatibility mode */
366 		if (	/* x16 in x8 mode */
367 			((cfi->device_type == CFI_DEVICETYPE_X8) &&
368 				(cfi->cfiq->InterfaceDesc ==
369 					CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
370 			/* x32 in x16 mode */
371 			((cfi->device_type == CFI_DEVICETYPE_X16) &&
372 				(cfi->cfiq->InterfaceDesc ==
373 					CFI_INTERFACE_X16_BY_X32_ASYNC)))
374 		{
375 			cfi->addr_unlock1 = 0xaaa;
376 			cfi->addr_unlock2 = 0x555;
377 		}
378 
379 	} /* CFI mode */
380 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
381 		/* Apply jedec specific fixups */
382 		cfi_fixup(mtd, jedec_fixup_table);
383 	}
384 	/* Apply generic fixups */
385 	cfi_fixup(mtd, fixup_table);
386 
387 	for (i=0; i< cfi->numchips; i++) {
388 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
389 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
390 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
391 		cfi->chips[i].ref_point_counter = 0;
392 		init_waitqueue_head(&(cfi->chips[i].wq));
393 	}
394 
395 	map->fldrv = &cfi_amdstd_chipdrv;
396 
397 	return cfi_amdstd_setup(mtd);
398 }
399 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
400 
401 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
402 {
403 	struct map_info *map = mtd->priv;
404 	struct cfi_private *cfi = map->fldrv_priv;
405 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
406 	unsigned long offset = 0;
407 	int i,j;
408 
409 	printk(KERN_NOTICE "number of %s chips: %d\n",
410 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
411 	/* Select the correct geometry setup */
412 	mtd->size = devsize * cfi->numchips;
413 
414 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
415 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
416 				    * mtd->numeraseregions, GFP_KERNEL);
417 	if (!mtd->eraseregions) {
418 		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
419 		goto setup_err;
420 	}
421 
422 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
423 		unsigned long ernum, ersize;
424 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
425 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
426 
427 		if (mtd->erasesize < ersize) {
428 			mtd->erasesize = ersize;
429 		}
430 		for (j=0; j<cfi->numchips; j++) {
431 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
432 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
433 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
434 		}
435 		offset += (ersize * ernum);
436 	}
437 	if (offset != devsize) {
438 		/* Argh */
439 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
440 		goto setup_err;
441 	}
442 #if 0
443 	// debug
444 	for (i=0; i<mtd->numeraseregions;i++){
445 		printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
446 		       i,mtd->eraseregions[i].offset,
447 		       mtd->eraseregions[i].erasesize,
448 		       mtd->eraseregions[i].numblocks);
449 	}
450 #endif
451 
452 	/* FIXME: erase-suspend-program is broken.  See
453 	   http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
454 	printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
455 
456 	__module_get(THIS_MODULE);
457 	return mtd;
458 
459  setup_err:
460 	if(mtd) {
461 		kfree(mtd->eraseregions);
462 		kfree(mtd);
463 	}
464 	kfree(cfi->cmdset_priv);
465 	kfree(cfi->cfiq);
466 	return NULL;
467 }
468 
469 /*
470  * Return true if the chip is ready.
471  *
472  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
473  * non-suspended sector) and is indicated by no toggle bits toggling.
474  *
475  * Note that anything more complicated than checking if no bits are toggling
476  * (including checking DQ5 for an error status) is tricky to get working
477  * correctly and is therefore not done	(particulary with interleaved chips
478  * as each chip must be checked independantly of the others).
479  */
480 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
481 {
482 	map_word d, t;
483 
484 	d = map_read(map, addr);
485 	t = map_read(map, addr);
486 
487 	return map_word_equal(map, d, t);
488 }
489 
490 /*
491  * Return true if the chip is ready and has the correct value.
492  *
493  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
494  * non-suspended sector) and it is indicated by no bits toggling.
495  *
496  * Error are indicated by toggling bits or bits held with the wrong value,
497  * or with bits toggling.
498  *
499  * Note that anything more complicated than checking if no bits are toggling
500  * (including checking DQ5 for an error status) is tricky to get working
501  * correctly and is therefore not done	(particulary with interleaved chips
502  * as each chip must be checked independantly of the others).
503  *
504  */
505 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
506 {
507 	map_word oldd, curd;
508 
509 	oldd = map_read(map, addr);
510 	curd = map_read(map, addr);
511 
512 	return	map_word_equal(map, oldd, curd) &&
513 		map_word_equal(map, curd, expected);
514 }
515 
516 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
517 {
518 	DECLARE_WAITQUEUE(wait, current);
519 	struct cfi_private *cfi = map->fldrv_priv;
520 	unsigned long timeo;
521 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
522 
523  resettime:
524 	timeo = jiffies + HZ;
525  retry:
526 	switch (chip->state) {
527 
528 	case FL_STATUS:
529 		for (;;) {
530 			if (chip_ready(map, adr))
531 				break;
532 
533 			if (time_after(jiffies, timeo)) {
534 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
535 				spin_unlock(chip->mutex);
536 				return -EIO;
537 			}
538 			spin_unlock(chip->mutex);
539 			cfi_udelay(1);
540 			spin_lock(chip->mutex);
541 			/* Someone else might have been playing with it. */
542 			goto retry;
543 		}
544 
545 	case FL_READY:
546 	case FL_CFI_QUERY:
547 	case FL_JEDEC_QUERY:
548 		return 0;
549 
550 	case FL_ERASING:
551 		if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
552 			goto sleep;
553 
554 		if (!(   mode == FL_READY
555 		      || mode == FL_POINT
556 		      || !cfip
557 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
558 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
559 		    )))
560 			goto sleep;
561 
562 		/* We could check to see if we're trying to access the sector
563 		 * that is currently being erased. However, no user will try
564 		 * anything like that so we just wait for the timeout. */
565 
566 		/* Erase suspend */
567 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
568 		 * commands when the erase algorithm isn't in progress. */
569 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
570 		chip->oldstate = FL_ERASING;
571 		chip->state = FL_ERASE_SUSPENDING;
572 		chip->erase_suspended = 1;
573 		for (;;) {
574 			if (chip_ready(map, adr))
575 				break;
576 
577 			if (time_after(jiffies, timeo)) {
578 				/* Should have suspended the erase by now.
579 				 * Send an Erase-Resume command as either
580 				 * there was an error (so leave the erase
581 				 * routine to recover from it) or we trying to
582 				 * use the erase-in-progress sector. */
583 				map_write(map, CMD(0x30), chip->in_progress_block_addr);
584 				chip->state = FL_ERASING;
585 				chip->oldstate = FL_READY;
586 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
587 				return -EIO;
588 			}
589 
590 			spin_unlock(chip->mutex);
591 			cfi_udelay(1);
592 			spin_lock(chip->mutex);
593 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
594 			   So we can just loop here. */
595 		}
596 		chip->state = FL_READY;
597 		return 0;
598 
599 	case FL_XIP_WHILE_ERASING:
600 		if (mode != FL_READY && mode != FL_POINT &&
601 		    (!cfip || !(cfip->EraseSuspend&2)))
602 			goto sleep;
603 		chip->oldstate = chip->state;
604 		chip->state = FL_READY;
605 		return 0;
606 
607 	case FL_POINT:
608 		/* Only if there's no operation suspended... */
609 		if (mode == FL_READY && chip->oldstate == FL_READY)
610 			return 0;
611 
612 	default:
613 	sleep:
614 		set_current_state(TASK_UNINTERRUPTIBLE);
615 		add_wait_queue(&chip->wq, &wait);
616 		spin_unlock(chip->mutex);
617 		schedule();
618 		remove_wait_queue(&chip->wq, &wait);
619 		spin_lock(chip->mutex);
620 		goto resettime;
621 	}
622 }
623 
624 
625 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
626 {
627 	struct cfi_private *cfi = map->fldrv_priv;
628 
629 	switch(chip->oldstate) {
630 	case FL_ERASING:
631 		chip->state = chip->oldstate;
632 		map_write(map, CMD(0x30), chip->in_progress_block_addr);
633 		chip->oldstate = FL_READY;
634 		chip->state = FL_ERASING;
635 		break;
636 
637 	case FL_XIP_WHILE_ERASING:
638 		chip->state = chip->oldstate;
639 		chip->oldstate = FL_READY;
640 		break;
641 
642 	case FL_READY:
643 	case FL_STATUS:
644 		/* We should really make set_vpp() count, rather than doing this */
645 		DISABLE_VPP(map);
646 		break;
647 	default:
648 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
649 	}
650 	wake_up(&chip->wq);
651 }
652 
653 #ifdef CONFIG_MTD_XIP
654 
655 /*
656  * No interrupt what so ever can be serviced while the flash isn't in array
657  * mode.  This is ensured by the xip_disable() and xip_enable() functions
658  * enclosing any code path where the flash is known not to be in array mode.
659  * And within a XIP disabled code path, only functions marked with __xipram
660  * may be called and nothing else (it's a good thing to inspect generated
661  * assembly to make sure inline functions were actually inlined and that gcc
662  * didn't emit calls to its own support functions). Also configuring MTD CFI
663  * support to a single buswidth and a single interleave is also recommended.
664  */
665 
666 static void xip_disable(struct map_info *map, struct flchip *chip,
667 			unsigned long adr)
668 {
669 	/* TODO: chips with no XIP use should ignore and return */
670 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
671 	local_irq_disable();
672 }
673 
674 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
675 				unsigned long adr)
676 {
677 	struct cfi_private *cfi = map->fldrv_priv;
678 
679 	if (chip->state != FL_POINT && chip->state != FL_READY) {
680 		map_write(map, CMD(0xf0), adr);
681 		chip->state = FL_READY;
682 	}
683 	(void) map_read(map, adr);
684 	xip_iprefetch();
685 	local_irq_enable();
686 }
687 
688 /*
689  * When a delay is required for the flash operation to complete, the
690  * xip_udelay() function is polling for both the given timeout and pending
691  * (but still masked) hardware interrupts.  Whenever there is an interrupt
692  * pending then the flash erase operation is suspended, array mode restored
693  * and interrupts unmasked.  Task scheduling might also happen at that
694  * point.  The CPU eventually returns from the interrupt or the call to
695  * schedule() and the suspended flash operation is resumed for the remaining
696  * of the delay period.
697  *
698  * Warning: this function _will_ fool interrupt latency tracing tools.
699  */
700 
701 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
702 				unsigned long adr, int usec)
703 {
704 	struct cfi_private *cfi = map->fldrv_priv;
705 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
706 	map_word status, OK = CMD(0x80);
707 	unsigned long suspended, start = xip_currtime();
708 	flstate_t oldstate;
709 
710 	do {
711 		cpu_relax();
712 		if (xip_irqpending() && extp &&
713 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
714 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
715 			/*
716 			 * Let's suspend the erase operation when supported.
717 			 * Note that we currently don't try to suspend
718 			 * interleaved chips if there is already another
719 			 * operation suspended (imagine what happens
720 			 * when one chip was already done with the current
721 			 * operation while another chip suspended it, then
722 			 * we resume the whole thing at once).  Yes, it
723 			 * can happen!
724 			 */
725 			map_write(map, CMD(0xb0), adr);
726 			usec -= xip_elapsed_since(start);
727 			suspended = xip_currtime();
728 			do {
729 				if (xip_elapsed_since(suspended) > 100000) {
730 					/*
731 					 * The chip doesn't want to suspend
732 					 * after waiting for 100 msecs.
733 					 * This is a critical error but there
734 					 * is not much we can do here.
735 					 */
736 					return;
737 				}
738 				status = map_read(map, adr);
739 			} while (!map_word_andequal(map, status, OK, OK));
740 
741 			/* Suspend succeeded */
742 			oldstate = chip->state;
743 			if (!map_word_bitsset(map, status, CMD(0x40)))
744 				break;
745 			chip->state = FL_XIP_WHILE_ERASING;
746 			chip->erase_suspended = 1;
747 			map_write(map, CMD(0xf0), adr);
748 			(void) map_read(map, adr);
749 			xip_iprefetch();
750 			local_irq_enable();
751 			spin_unlock(chip->mutex);
752 			xip_iprefetch();
753 			cond_resched();
754 
755 			/*
756 			 * We're back.  However someone else might have
757 			 * decided to go write to the chip if we are in
758 			 * a suspended erase state.  If so let's wait
759 			 * until it's done.
760 			 */
761 			spin_lock(chip->mutex);
762 			while (chip->state != FL_XIP_WHILE_ERASING) {
763 				DECLARE_WAITQUEUE(wait, current);
764 				set_current_state(TASK_UNINTERRUPTIBLE);
765 				add_wait_queue(&chip->wq, &wait);
766 				spin_unlock(chip->mutex);
767 				schedule();
768 				remove_wait_queue(&chip->wq, &wait);
769 				spin_lock(chip->mutex);
770 			}
771 			/* Disallow XIP again */
772 			local_irq_disable();
773 
774 			/* Resume the write or erase operation */
775 			map_write(map, CMD(0x30), adr);
776 			chip->state = oldstate;
777 			start = xip_currtime();
778 		} else if (usec >= 1000000/HZ) {
779 			/*
780 			 * Try to save on CPU power when waiting delay
781 			 * is at least a system timer tick period.
782 			 * No need to be extremely accurate here.
783 			 */
784 			xip_cpu_idle();
785 		}
786 		status = map_read(map, adr);
787 	} while (!map_word_andequal(map, status, OK, OK)
788 		 && xip_elapsed_since(start) < usec);
789 }
790 
791 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
792 
793 /*
794  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
795  * the flash is actively programming or erasing since we have to poll for
796  * the operation to complete anyway.  We can't do that in a generic way with
797  * a XIP setup so do it before the actual flash operation in this case
798  * and stub it out from INVALIDATE_CACHE_UDELAY.
799  */
800 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
801 	INVALIDATE_CACHED_RANGE(map, from, size)
802 
803 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
804 	UDELAY(map, chip, adr, usec)
805 
806 /*
807  * Extra notes:
808  *
809  * Activating this XIP support changes the way the code works a bit.  For
810  * example the code to suspend the current process when concurrent access
811  * happens is never executed because xip_udelay() will always return with the
812  * same chip state as it was entered with.  This is why there is no care for
813  * the presence of add_wait_queue() or schedule() calls from within a couple
814  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
815  * The queueing and scheduling are always happening within xip_udelay().
816  *
817  * Similarly, get_chip() and put_chip() just happen to always be executed
818  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
819  * is in array mode, therefore never executing many cases therein and not
820  * causing any problem with XIP.
821  */
822 
823 #else
824 
825 #define xip_disable(map, chip, adr)
826 #define xip_enable(map, chip, adr)
827 #define XIP_INVAL_CACHED_RANGE(x...)
828 
829 #define UDELAY(map, chip, adr, usec)  \
830 do {  \
831 	spin_unlock(chip->mutex);  \
832 	cfi_udelay(usec);  \
833 	spin_lock(chip->mutex);  \
834 } while (0)
835 
836 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
837 do {  \
838 	spin_unlock(chip->mutex);  \
839 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
840 	cfi_udelay(usec);  \
841 	spin_lock(chip->mutex);  \
842 } while (0)
843 
844 #endif
845 
846 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
847 {
848 	unsigned long cmd_addr;
849 	struct cfi_private *cfi = map->fldrv_priv;
850 	int ret;
851 
852 	adr += chip->start;
853 
854 	/* Ensure cmd read/writes are aligned. */
855 	cmd_addr = adr & ~(map_bankwidth(map)-1);
856 
857 	spin_lock(chip->mutex);
858 	ret = get_chip(map, chip, cmd_addr, FL_READY);
859 	if (ret) {
860 		spin_unlock(chip->mutex);
861 		return ret;
862 	}
863 
864 	if (chip->state != FL_POINT && chip->state != FL_READY) {
865 		map_write(map, CMD(0xf0), cmd_addr);
866 		chip->state = FL_READY;
867 	}
868 
869 	map_copy_from(map, buf, adr, len);
870 
871 	put_chip(map, chip, cmd_addr);
872 
873 	spin_unlock(chip->mutex);
874 	return 0;
875 }
876 
877 
878 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
879 {
880 	struct map_info *map = mtd->priv;
881 	struct cfi_private *cfi = map->fldrv_priv;
882 	unsigned long ofs;
883 	int chipnum;
884 	int ret = 0;
885 
886 	/* ofs: offset within the first chip that the first read should start */
887 
888 	chipnum = (from >> cfi->chipshift);
889 	ofs = from - (chipnum <<  cfi->chipshift);
890 
891 
892 	*retlen = 0;
893 
894 	while (len) {
895 		unsigned long thislen;
896 
897 		if (chipnum >= cfi->numchips)
898 			break;
899 
900 		if ((len + ofs -1) >> cfi->chipshift)
901 			thislen = (1<<cfi->chipshift) - ofs;
902 		else
903 			thislen = len;
904 
905 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
906 		if (ret)
907 			break;
908 
909 		*retlen += thislen;
910 		len -= thislen;
911 		buf += thislen;
912 
913 		ofs = 0;
914 		chipnum++;
915 	}
916 	return ret;
917 }
918 
919 
920 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
921 {
922 	DECLARE_WAITQUEUE(wait, current);
923 	unsigned long timeo = jiffies + HZ;
924 	struct cfi_private *cfi = map->fldrv_priv;
925 
926  retry:
927 	spin_lock(chip->mutex);
928 
929 	if (chip->state != FL_READY){
930 #if 0
931 		printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
932 #endif
933 		set_current_state(TASK_UNINTERRUPTIBLE);
934 		add_wait_queue(&chip->wq, &wait);
935 
936 		spin_unlock(chip->mutex);
937 
938 		schedule();
939 		remove_wait_queue(&chip->wq, &wait);
940 #if 0
941 		if(signal_pending(current))
942 			return -EINTR;
943 #endif
944 		timeo = jiffies + HZ;
945 
946 		goto retry;
947 	}
948 
949 	adr += chip->start;
950 
951 	chip->state = FL_READY;
952 
953 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
954 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
955 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
956 
957 	map_copy_from(map, buf, adr, len);
958 
959 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
960 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
961 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
962 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
963 
964 	wake_up(&chip->wq);
965 	spin_unlock(chip->mutex);
966 
967 	return 0;
968 }
969 
970 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
971 {
972 	struct map_info *map = mtd->priv;
973 	struct cfi_private *cfi = map->fldrv_priv;
974 	unsigned long ofs;
975 	int chipnum;
976 	int ret = 0;
977 
978 
979 	/* ofs: offset within the first chip that the first read should start */
980 
981 	/* 8 secsi bytes per chip */
982 	chipnum=from>>3;
983 	ofs=from & 7;
984 
985 
986 	*retlen = 0;
987 
988 	while (len) {
989 		unsigned long thislen;
990 
991 		if (chipnum >= cfi->numchips)
992 			break;
993 
994 		if ((len + ofs -1) >> 3)
995 			thislen = (1<<3) - ofs;
996 		else
997 			thislen = len;
998 
999 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1000 		if (ret)
1001 			break;
1002 
1003 		*retlen += thislen;
1004 		len -= thislen;
1005 		buf += thislen;
1006 
1007 		ofs = 0;
1008 		chipnum++;
1009 	}
1010 	return ret;
1011 }
1012 
1013 
1014 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1015 {
1016 	struct cfi_private *cfi = map->fldrv_priv;
1017 	unsigned long timeo = jiffies + HZ;
1018 	/*
1019 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1020 	 * have a max write time of a few hundreds usec). However, we should
1021 	 * use the maximum timeout value given by the chip at probe time
1022 	 * instead.  Unfortunately, struct flchip does have a field for
1023 	 * maximum timeout, only for typical which can be far too short
1024 	 * depending of the conditions.	 The ' + 1' is to avoid having a
1025 	 * timeout of 0 jiffies if HZ is smaller than 1000.
1026 	 */
1027 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1028 	int ret = 0;
1029 	map_word oldd;
1030 	int retry_cnt = 0;
1031 
1032 	adr += chip->start;
1033 
1034 	spin_lock(chip->mutex);
1035 	ret = get_chip(map, chip, adr, FL_WRITING);
1036 	if (ret) {
1037 		spin_unlock(chip->mutex);
1038 		return ret;
1039 	}
1040 
1041 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1042 	       __func__, adr, datum.x[0] );
1043 
1044 	/*
1045 	 * Check for a NOP for the case when the datum to write is already
1046 	 * present - it saves time and works around buggy chips that corrupt
1047 	 * data at other locations when 0xff is written to a location that
1048 	 * already contains 0xff.
1049 	 */
1050 	oldd = map_read(map, adr);
1051 	if (map_word_equal(map, oldd, datum)) {
1052 		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1053 		       __func__);
1054 		goto op_done;
1055 	}
1056 
1057 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1058 	ENABLE_VPP(map);
1059 	xip_disable(map, chip, adr);
1060  retry:
1061 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1062 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1063 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1064 	map_write(map, datum, adr);
1065 	chip->state = FL_WRITING;
1066 
1067 	INVALIDATE_CACHE_UDELAY(map, chip,
1068 				adr, map_bankwidth(map),
1069 				chip->word_write_time);
1070 
1071 	/* See comment above for timeout value. */
1072 	timeo = jiffies + uWriteTimeout;
1073 	for (;;) {
1074 		if (chip->state != FL_WRITING) {
1075 			/* Someone's suspended the write. Sleep */
1076 			DECLARE_WAITQUEUE(wait, current);
1077 
1078 			set_current_state(TASK_UNINTERRUPTIBLE);
1079 			add_wait_queue(&chip->wq, &wait);
1080 			spin_unlock(chip->mutex);
1081 			schedule();
1082 			remove_wait_queue(&chip->wq, &wait);
1083 			timeo = jiffies + (HZ / 2); /* FIXME */
1084 			spin_lock(chip->mutex);
1085 			continue;
1086 		}
1087 
1088 		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1089 			xip_enable(map, chip, adr);
1090 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1091 			xip_disable(map, chip, adr);
1092 			break;
1093 		}
1094 
1095 		if (chip_ready(map, adr))
1096 			break;
1097 
1098 		/* Latency issues. Drop the lock, wait a while and retry */
1099 		UDELAY(map, chip, adr, 1);
1100 	}
1101 	/* Did we succeed? */
1102 	if (!chip_good(map, adr, datum)) {
1103 		/* reset on all failures. */
1104 		map_write( map, CMD(0xF0), chip->start );
1105 		/* FIXME - should have reset delay before continuing */
1106 
1107 		if (++retry_cnt <= MAX_WORD_RETRIES)
1108 			goto retry;
1109 
1110 		ret = -EIO;
1111 	}
1112 	xip_enable(map, chip, adr);
1113  op_done:
1114 	chip->state = FL_READY;
1115 	put_chip(map, chip, adr);
1116 	spin_unlock(chip->mutex);
1117 
1118 	return ret;
1119 }
1120 
1121 
1122 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1123 				  size_t *retlen, const u_char *buf)
1124 {
1125 	struct map_info *map = mtd->priv;
1126 	struct cfi_private *cfi = map->fldrv_priv;
1127 	int ret = 0;
1128 	int chipnum;
1129 	unsigned long ofs, chipstart;
1130 	DECLARE_WAITQUEUE(wait, current);
1131 
1132 	*retlen = 0;
1133 	if (!len)
1134 		return 0;
1135 
1136 	chipnum = to >> cfi->chipshift;
1137 	ofs = to  - (chipnum << cfi->chipshift);
1138 	chipstart = cfi->chips[chipnum].start;
1139 
1140 	/* If it's not bus-aligned, do the first byte write */
1141 	if (ofs & (map_bankwidth(map)-1)) {
1142 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1143 		int i = ofs - bus_ofs;
1144 		int n = 0;
1145 		map_word tmp_buf;
1146 
1147  retry:
1148 		spin_lock(cfi->chips[chipnum].mutex);
1149 
1150 		if (cfi->chips[chipnum].state != FL_READY) {
1151 #if 0
1152 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1153 #endif
1154 			set_current_state(TASK_UNINTERRUPTIBLE);
1155 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1156 
1157 			spin_unlock(cfi->chips[chipnum].mutex);
1158 
1159 			schedule();
1160 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1161 #if 0
1162 			if(signal_pending(current))
1163 				return -EINTR;
1164 #endif
1165 			goto retry;
1166 		}
1167 
1168 		/* Load 'tmp_buf' with old contents of flash */
1169 		tmp_buf = map_read(map, bus_ofs+chipstart);
1170 
1171 		spin_unlock(cfi->chips[chipnum].mutex);
1172 
1173 		/* Number of bytes to copy from buffer */
1174 		n = min_t(int, len, map_bankwidth(map)-i);
1175 
1176 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1177 
1178 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1179 				       bus_ofs, tmp_buf);
1180 		if (ret)
1181 			return ret;
1182 
1183 		ofs += n;
1184 		buf += n;
1185 		(*retlen) += n;
1186 		len -= n;
1187 
1188 		if (ofs >> cfi->chipshift) {
1189 			chipnum ++;
1190 			ofs = 0;
1191 			if (chipnum == cfi->numchips)
1192 				return 0;
1193 		}
1194 	}
1195 
1196 	/* We are now aligned, write as much as possible */
1197 	while(len >= map_bankwidth(map)) {
1198 		map_word datum;
1199 
1200 		datum = map_word_load(map, buf);
1201 
1202 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1203 				       ofs, datum);
1204 		if (ret)
1205 			return ret;
1206 
1207 		ofs += map_bankwidth(map);
1208 		buf += map_bankwidth(map);
1209 		(*retlen) += map_bankwidth(map);
1210 		len -= map_bankwidth(map);
1211 
1212 		if (ofs >> cfi->chipshift) {
1213 			chipnum ++;
1214 			ofs = 0;
1215 			if (chipnum == cfi->numchips)
1216 				return 0;
1217 			chipstart = cfi->chips[chipnum].start;
1218 		}
1219 	}
1220 
1221 	/* Write the trailing bytes if any */
1222 	if (len & (map_bankwidth(map)-1)) {
1223 		map_word tmp_buf;
1224 
1225  retry1:
1226 		spin_lock(cfi->chips[chipnum].mutex);
1227 
1228 		if (cfi->chips[chipnum].state != FL_READY) {
1229 #if 0
1230 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1231 #endif
1232 			set_current_state(TASK_UNINTERRUPTIBLE);
1233 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1234 
1235 			spin_unlock(cfi->chips[chipnum].mutex);
1236 
1237 			schedule();
1238 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1239 #if 0
1240 			if(signal_pending(current))
1241 				return -EINTR;
1242 #endif
1243 			goto retry1;
1244 		}
1245 
1246 		tmp_buf = map_read(map, ofs + chipstart);
1247 
1248 		spin_unlock(cfi->chips[chipnum].mutex);
1249 
1250 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1251 
1252 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1253 				ofs, tmp_buf);
1254 		if (ret)
1255 			return ret;
1256 
1257 		(*retlen) += len;
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 
1264 /*
1265  * FIXME: interleaved mode not tested, and probably not supported!
1266  */
1267 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1268 				    unsigned long adr, const u_char *buf,
1269 				    int len)
1270 {
1271 	struct cfi_private *cfi = map->fldrv_priv;
1272 	unsigned long timeo = jiffies + HZ;
1273 	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1274 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1275 	int ret = -EIO;
1276 	unsigned long cmd_adr;
1277 	int z, words;
1278 	map_word datum;
1279 
1280 	adr += chip->start;
1281 	cmd_adr = adr;
1282 
1283 	spin_lock(chip->mutex);
1284 	ret = get_chip(map, chip, adr, FL_WRITING);
1285 	if (ret) {
1286 		spin_unlock(chip->mutex);
1287 		return ret;
1288 	}
1289 
1290 	datum = map_word_load(map, buf);
1291 
1292 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1293 	       __func__, adr, datum.x[0] );
1294 
1295 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1296 	ENABLE_VPP(map);
1297 	xip_disable(map, chip, cmd_adr);
1298 
1299 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1300 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1301 	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1302 
1303 	/* Write Buffer Load */
1304 	map_write(map, CMD(0x25), cmd_adr);
1305 
1306 	chip->state = FL_WRITING_TO_BUFFER;
1307 
1308 	/* Write length of data to come */
1309 	words = len / map_bankwidth(map);
1310 	map_write(map, CMD(words - 1), cmd_adr);
1311 	/* Write data */
1312 	z = 0;
1313 	while(z < words * map_bankwidth(map)) {
1314 		datum = map_word_load(map, buf);
1315 		map_write(map, datum, adr + z);
1316 
1317 		z += map_bankwidth(map);
1318 		buf += map_bankwidth(map);
1319 	}
1320 	z -= map_bankwidth(map);
1321 
1322 	adr += z;
1323 
1324 	/* Write Buffer Program Confirm: GO GO GO */
1325 	map_write(map, CMD(0x29), cmd_adr);
1326 	chip->state = FL_WRITING;
1327 
1328 	INVALIDATE_CACHE_UDELAY(map, chip,
1329 				adr, map_bankwidth(map),
1330 				chip->word_write_time);
1331 
1332 	timeo = jiffies + uWriteTimeout;
1333 
1334 	for (;;) {
1335 		if (chip->state != FL_WRITING) {
1336 			/* Someone's suspended the write. Sleep */
1337 			DECLARE_WAITQUEUE(wait, current);
1338 
1339 			set_current_state(TASK_UNINTERRUPTIBLE);
1340 			add_wait_queue(&chip->wq, &wait);
1341 			spin_unlock(chip->mutex);
1342 			schedule();
1343 			remove_wait_queue(&chip->wq, &wait);
1344 			timeo = jiffies + (HZ / 2); /* FIXME */
1345 			spin_lock(chip->mutex);
1346 			continue;
1347 		}
1348 
1349 		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1350 			break;
1351 
1352 		if (chip_ready(map, adr)) {
1353 			xip_enable(map, chip, adr);
1354 			goto op_done;
1355 		}
1356 
1357 		/* Latency issues. Drop the lock, wait a while and retry */
1358 		UDELAY(map, chip, adr, 1);
1359 	}
1360 
1361 	/* reset on all failures. */
1362 	map_write( map, CMD(0xF0), chip->start );
1363 	xip_enable(map, chip, adr);
1364 	/* FIXME - should have reset delay before continuing */
1365 
1366 	printk(KERN_WARNING "MTD %s(): software timeout\n",
1367 	       __func__ );
1368 
1369 	ret = -EIO;
1370  op_done:
1371 	chip->state = FL_READY;
1372 	put_chip(map, chip, adr);
1373 	spin_unlock(chip->mutex);
1374 
1375 	return ret;
1376 }
1377 
1378 
1379 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1380 				    size_t *retlen, const u_char *buf)
1381 {
1382 	struct map_info *map = mtd->priv;
1383 	struct cfi_private *cfi = map->fldrv_priv;
1384 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1385 	int ret = 0;
1386 	int chipnum;
1387 	unsigned long ofs;
1388 
1389 	*retlen = 0;
1390 	if (!len)
1391 		return 0;
1392 
1393 	chipnum = to >> cfi->chipshift;
1394 	ofs = to  - (chipnum << cfi->chipshift);
1395 
1396 	/* If it's not bus-aligned, do the first word write */
1397 	if (ofs & (map_bankwidth(map)-1)) {
1398 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1399 		if (local_len > len)
1400 			local_len = len;
1401 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1402 					     local_len, retlen, buf);
1403 		if (ret)
1404 			return ret;
1405 		ofs += local_len;
1406 		buf += local_len;
1407 		len -= local_len;
1408 
1409 		if (ofs >> cfi->chipshift) {
1410 			chipnum ++;
1411 			ofs = 0;
1412 			if (chipnum == cfi->numchips)
1413 				return 0;
1414 		}
1415 	}
1416 
1417 	/* Write buffer is worth it only if more than one word to write... */
1418 	while (len >= map_bankwidth(map) * 2) {
1419 		/* We must not cross write block boundaries */
1420 		int size = wbufsize - (ofs & (wbufsize-1));
1421 
1422 		if (size > len)
1423 			size = len;
1424 		if (size % map_bankwidth(map))
1425 			size -= size % map_bankwidth(map);
1426 
1427 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1428 				      ofs, buf, size);
1429 		if (ret)
1430 			return ret;
1431 
1432 		ofs += size;
1433 		buf += size;
1434 		(*retlen) += size;
1435 		len -= size;
1436 
1437 		if (ofs >> cfi->chipshift) {
1438 			chipnum ++;
1439 			ofs = 0;
1440 			if (chipnum == cfi->numchips)
1441 				return 0;
1442 		}
1443 	}
1444 
1445 	if (len) {
1446 		size_t retlen_dregs = 0;
1447 
1448 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1449 					     len, &retlen_dregs, buf);
1450 
1451 		*retlen += retlen_dregs;
1452 		return ret;
1453 	}
1454 
1455 	return 0;
1456 }
1457 
1458 
1459 /*
1460  * Handle devices with one erase region, that only implement
1461  * the chip erase command.
1462  */
1463 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1464 {
1465 	struct cfi_private *cfi = map->fldrv_priv;
1466 	unsigned long timeo = jiffies + HZ;
1467 	unsigned long int adr;
1468 	DECLARE_WAITQUEUE(wait, current);
1469 	int ret = 0;
1470 
1471 	adr = cfi->addr_unlock1;
1472 
1473 	spin_lock(chip->mutex);
1474 	ret = get_chip(map, chip, adr, FL_WRITING);
1475 	if (ret) {
1476 		spin_unlock(chip->mutex);
1477 		return ret;
1478 	}
1479 
1480 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1481 	       __func__, chip->start );
1482 
1483 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1484 	ENABLE_VPP(map);
1485 	xip_disable(map, chip, adr);
1486 
1487 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1488 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1489 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1490 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1491 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1492 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1493 
1494 	chip->state = FL_ERASING;
1495 	chip->erase_suspended = 0;
1496 	chip->in_progress_block_addr = adr;
1497 
1498 	INVALIDATE_CACHE_UDELAY(map, chip,
1499 				adr, map->size,
1500 				chip->erase_time*500);
1501 
1502 	timeo = jiffies + (HZ*20);
1503 
1504 	for (;;) {
1505 		if (chip->state != FL_ERASING) {
1506 			/* Someone's suspended the erase. Sleep */
1507 			set_current_state(TASK_UNINTERRUPTIBLE);
1508 			add_wait_queue(&chip->wq, &wait);
1509 			spin_unlock(chip->mutex);
1510 			schedule();
1511 			remove_wait_queue(&chip->wq, &wait);
1512 			spin_lock(chip->mutex);
1513 			continue;
1514 		}
1515 		if (chip->erase_suspended) {
1516 			/* This erase was suspended and resumed.
1517 			   Adjust the timeout */
1518 			timeo = jiffies + (HZ*20); /* FIXME */
1519 			chip->erase_suspended = 0;
1520 		}
1521 
1522 		if (chip_ready(map, adr))
1523 			break;
1524 
1525 		if (time_after(jiffies, timeo)) {
1526 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1527 				__func__ );
1528 			break;
1529 		}
1530 
1531 		/* Latency issues. Drop the lock, wait a while and retry */
1532 		UDELAY(map, chip, adr, 1000000/HZ);
1533 	}
1534 	/* Did we succeed? */
1535 	if (!chip_good(map, adr, map_word_ff(map))) {
1536 		/* reset on all failures. */
1537 		map_write( map, CMD(0xF0), chip->start );
1538 		/* FIXME - should have reset delay before continuing */
1539 
1540 		ret = -EIO;
1541 	}
1542 
1543 	chip->state = FL_READY;
1544 	xip_enable(map, chip, adr);
1545 	put_chip(map, chip, adr);
1546 	spin_unlock(chip->mutex);
1547 
1548 	return ret;
1549 }
1550 
1551 
1552 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1553 {
1554 	struct cfi_private *cfi = map->fldrv_priv;
1555 	unsigned long timeo = jiffies + HZ;
1556 	DECLARE_WAITQUEUE(wait, current);
1557 	int ret = 0;
1558 
1559 	adr += chip->start;
1560 
1561 	spin_lock(chip->mutex);
1562 	ret = get_chip(map, chip, adr, FL_ERASING);
1563 	if (ret) {
1564 		spin_unlock(chip->mutex);
1565 		return ret;
1566 	}
1567 
1568 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1569 	       __func__, adr );
1570 
1571 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1572 	ENABLE_VPP(map);
1573 	xip_disable(map, chip, adr);
1574 
1575 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1576 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1577 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1578 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1579 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1580 	map_write(map, CMD(0x30), adr);
1581 
1582 	chip->state = FL_ERASING;
1583 	chip->erase_suspended = 0;
1584 	chip->in_progress_block_addr = adr;
1585 
1586 	INVALIDATE_CACHE_UDELAY(map, chip,
1587 				adr, len,
1588 				chip->erase_time*500);
1589 
1590 	timeo = jiffies + (HZ*20);
1591 
1592 	for (;;) {
1593 		if (chip->state != FL_ERASING) {
1594 			/* Someone's suspended the erase. Sleep */
1595 			set_current_state(TASK_UNINTERRUPTIBLE);
1596 			add_wait_queue(&chip->wq, &wait);
1597 			spin_unlock(chip->mutex);
1598 			schedule();
1599 			remove_wait_queue(&chip->wq, &wait);
1600 			spin_lock(chip->mutex);
1601 			continue;
1602 		}
1603 		if (chip->erase_suspended) {
1604 			/* This erase was suspended and resumed.
1605 			   Adjust the timeout */
1606 			timeo = jiffies + (HZ*20); /* FIXME */
1607 			chip->erase_suspended = 0;
1608 		}
1609 
1610 		if (chip_ready(map, adr)) {
1611 			xip_enable(map, chip, adr);
1612 			break;
1613 		}
1614 
1615 		if (time_after(jiffies, timeo)) {
1616 			xip_enable(map, chip, adr);
1617 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1618 				__func__ );
1619 			break;
1620 		}
1621 
1622 		/* Latency issues. Drop the lock, wait a while and retry */
1623 		UDELAY(map, chip, adr, 1000000/HZ);
1624 	}
1625 	/* Did we succeed? */
1626 	if (!chip_good(map, adr, map_word_ff(map))) {
1627 		/* reset on all failures. */
1628 		map_write( map, CMD(0xF0), chip->start );
1629 		/* FIXME - should have reset delay before continuing */
1630 
1631 		ret = -EIO;
1632 	}
1633 
1634 	chip->state = FL_READY;
1635 	put_chip(map, chip, adr);
1636 	spin_unlock(chip->mutex);
1637 	return ret;
1638 }
1639 
1640 
1641 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1642 {
1643 	unsigned long ofs, len;
1644 	int ret;
1645 
1646 	ofs = instr->addr;
1647 	len = instr->len;
1648 
1649 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1650 	if (ret)
1651 		return ret;
1652 
1653 	instr->state = MTD_ERASE_DONE;
1654 	mtd_erase_callback(instr);
1655 
1656 	return 0;
1657 }
1658 
1659 
1660 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1661 {
1662 	struct map_info *map = mtd->priv;
1663 	struct cfi_private *cfi = map->fldrv_priv;
1664 	int ret = 0;
1665 
1666 	if (instr->addr != 0)
1667 		return -EINVAL;
1668 
1669 	if (instr->len != mtd->size)
1670 		return -EINVAL;
1671 
1672 	ret = do_erase_chip(map, &cfi->chips[0]);
1673 	if (ret)
1674 		return ret;
1675 
1676 	instr->state = MTD_ERASE_DONE;
1677 	mtd_erase_callback(instr);
1678 
1679 	return 0;
1680 }
1681 
1682 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1683 			 unsigned long adr, int len, void *thunk)
1684 {
1685 	struct cfi_private *cfi = map->fldrv_priv;
1686 	int ret;
1687 
1688 	spin_lock(chip->mutex);
1689 	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1690 	if (ret)
1691 		goto out_unlock;
1692 	chip->state = FL_LOCKING;
1693 
1694 	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1695 	      __func__, adr, len);
1696 
1697 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1698 			 cfi->device_type, NULL);
1699 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1700 			 cfi->device_type, NULL);
1701 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1702 			 cfi->device_type, NULL);
1703 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1704 			 cfi->device_type, NULL);
1705 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1706 			 cfi->device_type, NULL);
1707 	map_write(map, CMD(0x40), chip->start + adr);
1708 
1709 	chip->state = FL_READY;
1710 	put_chip(map, chip, adr + chip->start);
1711 	ret = 0;
1712 
1713 out_unlock:
1714 	spin_unlock(chip->mutex);
1715 	return ret;
1716 }
1717 
1718 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1719 			   unsigned long adr, int len, void *thunk)
1720 {
1721 	struct cfi_private *cfi = map->fldrv_priv;
1722 	int ret;
1723 
1724 	spin_lock(chip->mutex);
1725 	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1726 	if (ret)
1727 		goto out_unlock;
1728 	chip->state = FL_UNLOCKING;
1729 
1730 	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1731 	      __func__, adr, len);
1732 
1733 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1734 			 cfi->device_type, NULL);
1735 	map_write(map, CMD(0x70), adr);
1736 
1737 	chip->state = FL_READY;
1738 	put_chip(map, chip, adr + chip->start);
1739 	ret = 0;
1740 
1741 out_unlock:
1742 	spin_unlock(chip->mutex);
1743 	return ret;
1744 }
1745 
1746 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1747 {
1748 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1749 }
1750 
1751 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1752 {
1753 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1754 }
1755 
1756 
1757 static void cfi_amdstd_sync (struct mtd_info *mtd)
1758 {
1759 	struct map_info *map = mtd->priv;
1760 	struct cfi_private *cfi = map->fldrv_priv;
1761 	int i;
1762 	struct flchip *chip;
1763 	int ret = 0;
1764 	DECLARE_WAITQUEUE(wait, current);
1765 
1766 	for (i=0; !ret && i<cfi->numchips; i++) {
1767 		chip = &cfi->chips[i];
1768 
1769 	retry:
1770 		spin_lock(chip->mutex);
1771 
1772 		switch(chip->state) {
1773 		case FL_READY:
1774 		case FL_STATUS:
1775 		case FL_CFI_QUERY:
1776 		case FL_JEDEC_QUERY:
1777 			chip->oldstate = chip->state;
1778 			chip->state = FL_SYNCING;
1779 			/* No need to wake_up() on this state change -
1780 			 * as the whole point is that nobody can do anything
1781 			 * with the chip now anyway.
1782 			 */
1783 		case FL_SYNCING:
1784 			spin_unlock(chip->mutex);
1785 			break;
1786 
1787 		default:
1788 			/* Not an idle state */
1789 			set_current_state(TASK_UNINTERRUPTIBLE);
1790 			add_wait_queue(&chip->wq, &wait);
1791 
1792 			spin_unlock(chip->mutex);
1793 
1794 			schedule();
1795 
1796 			remove_wait_queue(&chip->wq, &wait);
1797 
1798 			goto retry;
1799 		}
1800 	}
1801 
1802 	/* Unlock the chips again */
1803 
1804 	for (i--; i >=0; i--) {
1805 		chip = &cfi->chips[i];
1806 
1807 		spin_lock(chip->mutex);
1808 
1809 		if (chip->state == FL_SYNCING) {
1810 			chip->state = chip->oldstate;
1811 			wake_up(&chip->wq);
1812 		}
1813 		spin_unlock(chip->mutex);
1814 	}
1815 }
1816 
1817 
1818 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1819 {
1820 	struct map_info *map = mtd->priv;
1821 	struct cfi_private *cfi = map->fldrv_priv;
1822 	int i;
1823 	struct flchip *chip;
1824 	int ret = 0;
1825 
1826 	for (i=0; !ret && i<cfi->numchips; i++) {
1827 		chip = &cfi->chips[i];
1828 
1829 		spin_lock(chip->mutex);
1830 
1831 		switch(chip->state) {
1832 		case FL_READY:
1833 		case FL_STATUS:
1834 		case FL_CFI_QUERY:
1835 		case FL_JEDEC_QUERY:
1836 			chip->oldstate = chip->state;
1837 			chip->state = FL_PM_SUSPENDED;
1838 			/* No need to wake_up() on this state change -
1839 			 * as the whole point is that nobody can do anything
1840 			 * with the chip now anyway.
1841 			 */
1842 		case FL_PM_SUSPENDED:
1843 			break;
1844 
1845 		default:
1846 			ret = -EAGAIN;
1847 			break;
1848 		}
1849 		spin_unlock(chip->mutex);
1850 	}
1851 
1852 	/* Unlock the chips again */
1853 
1854 	if (ret) {
1855 		for (i--; i >=0; i--) {
1856 			chip = &cfi->chips[i];
1857 
1858 			spin_lock(chip->mutex);
1859 
1860 			if (chip->state == FL_PM_SUSPENDED) {
1861 				chip->state = chip->oldstate;
1862 				wake_up(&chip->wq);
1863 			}
1864 			spin_unlock(chip->mutex);
1865 		}
1866 	}
1867 
1868 	return ret;
1869 }
1870 
1871 
1872 static void cfi_amdstd_resume(struct mtd_info *mtd)
1873 {
1874 	struct map_info *map = mtd->priv;
1875 	struct cfi_private *cfi = map->fldrv_priv;
1876 	int i;
1877 	struct flchip *chip;
1878 
1879 	for (i=0; i<cfi->numchips; i++) {
1880 
1881 		chip = &cfi->chips[i];
1882 
1883 		spin_lock(chip->mutex);
1884 
1885 		if (chip->state == FL_PM_SUSPENDED) {
1886 			chip->state = FL_READY;
1887 			map_write(map, CMD(0xF0), chip->start);
1888 			wake_up(&chip->wq);
1889 		}
1890 		else
1891 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1892 
1893 		spin_unlock(chip->mutex);
1894 	}
1895 }
1896 
1897 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1898 {
1899 	struct map_info *map = mtd->priv;
1900 	struct cfi_private *cfi = map->fldrv_priv;
1901 
1902 	kfree(cfi->cmdset_priv);
1903 	kfree(cfi->cfiq);
1904 	kfree(cfi);
1905 	kfree(mtd->eraseregions);
1906 }
1907 
1908 MODULE_LICENSE("GPL");
1909 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1910 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1911