1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000	Nicolas Pitre <nico@cam.org>
11  * 	- completely revamped method functions so they are aware and
12  * 	  independent of the flash geometry (buswidth, interleave, etc.)
13  * 	- scalability vs code size is completely set at compile-time
14  * 	  (see include/linux/mtd/cfi.h for selection)
15  *	- optimized write buffer method
16  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *	- reworked lock/unlock/erase support for var size flash
18  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
19  * 	- auto unlock sectors on resume for auto locking flash on power up
20  */
21 
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29 
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
41 
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44 
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
47 
48 #define MANUFACTURER_INTEL	0x0089
49 #define I82802AB	0x00ad
50 #define I82802AC	0x00ac
51 #define MANUFACTURER_ST         0x0020
52 #define M50LPW080       0x002F
53 #define AT49BV640D	0x02de
54 
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 #ifdef CONFIG_MTD_OTP
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69 					    struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71 					    struct otp_info *, size_t);
72 #endif
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76 
77 static void cfi_intelext_destroy(struct mtd_info *);
78 
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80 
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83 
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85 		     size_t *retlen, u_char **mtdbuf);
86 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
87 			size_t len);
88 
89 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
91 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
92 #include "fwh_lock.h"
93 
94 
95 
96 /*
97  *  *********** SETUP AND PROBE BITS  ***********
98  */
99 
100 static struct mtd_chip_driver cfi_intelext_chipdrv = {
101 	.probe		= NULL, /* Not usable directly */
102 	.destroy	= cfi_intelext_destroy,
103 	.name		= "cfi_cmdset_0001",
104 	.module		= THIS_MODULE
105 };
106 
107 /* #define DEBUG_LOCK_BITS */
108 /* #define DEBUG_CFI_FEATURES */
109 
110 #ifdef DEBUG_CFI_FEATURES
111 static void cfi_tell_features(struct cfi_pri_intelext *extp)
112 {
113 	int i;
114 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
115 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
116 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
117 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
118 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
119 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
120 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
121 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
122 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
123 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
124 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
125 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
126 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
127 	for (i=11; i<32; i++) {
128 		if (extp->FeatureSupport & (1<<i))
129 			printk("     - Unknown Bit %X:      supported\n", i);
130 	}
131 
132 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
133 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
134 	for (i=1; i<8; i++) {
135 		if (extp->SuspendCmdSupport & (1<<i))
136 			printk("     - Unknown Bit %X:               supported\n", i);
137 	}
138 
139 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
140 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
141 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
142 	for (i=2; i<3; i++) {
143 		if (extp->BlkStatusRegMask & (1<<i))
144 			printk("     - Unknown Bit %X Active: yes\n",i);
145 	}
146 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
147 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
148 	for (i=6; i<16; i++) {
149 		if (extp->BlkStatusRegMask & (1<<i))
150 			printk("     - Unknown Bit %X Active: yes\n",i);
151 	}
152 
153 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
154 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
155 	if (extp->VppOptimal)
156 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
157 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
158 }
159 #endif
160 
161 /* Atmel chips don't use the same PRI format as Intel chips */
162 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
163 {
164 	struct map_info *map = mtd->priv;
165 	struct cfi_private *cfi = map->fldrv_priv;
166 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
167 	struct cfi_pri_atmel atmel_pri;
168 	uint32_t features = 0;
169 
170 	/* Reverse byteswapping */
171 	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
172 	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
173 	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
174 
175 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
177 
178 	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
179 
180 	if (atmel_pri.Features & 0x01) /* chip erase supported */
181 		features |= (1<<0);
182 	if (atmel_pri.Features & 0x02) /* erase suspend supported */
183 		features |= (1<<1);
184 	if (atmel_pri.Features & 0x04) /* program suspend supported */
185 		features |= (1<<2);
186 	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
187 		features |= (1<<9);
188 	if (atmel_pri.Features & 0x20) /* page mode read supported */
189 		features |= (1<<7);
190 	if (atmel_pri.Features & 0x40) /* queued erase supported */
191 		features |= (1<<4);
192 	if (atmel_pri.Features & 0x80) /* Protection bits supported */
193 		features |= (1<<6);
194 
195 	extp->FeatureSupport = features;
196 
197 	/* burst write mode not supported */
198 	cfi->cfiq->BufWriteTimeoutTyp = 0;
199 	cfi->cfiq->BufWriteTimeoutMax = 0;
200 }
201 
202 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
203 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
204 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
205 {
206 	struct map_info *map = mtd->priv;
207 	struct cfi_private *cfi = map->fldrv_priv;
208 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
209 
210 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
211 	                    "erase on write disabled.\n");
212 	extp->SuspendCmdSupport &= ~1;
213 }
214 #endif
215 
216 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
217 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
218 {
219 	struct map_info *map = mtd->priv;
220 	struct cfi_private *cfi = map->fldrv_priv;
221 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
222 
223 	if (cfip && (cfip->FeatureSupport&4)) {
224 		cfip->FeatureSupport &= ~4;
225 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
226 	}
227 }
228 #endif
229 
230 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
231 {
232 	struct map_info *map = mtd->priv;
233 	struct cfi_private *cfi = map->fldrv_priv;
234 
235 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
236 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
237 }
238 
239 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
240 {
241 	struct map_info *map = mtd->priv;
242 	struct cfi_private *cfi = map->fldrv_priv;
243 
244 	/* Note this is done after the region info is endian swapped */
245 	cfi->cfiq->EraseRegionInfo[1] =
246 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
247 };
248 
249 static void fixup_use_point(struct mtd_info *mtd, void *param)
250 {
251 	struct map_info *map = mtd->priv;
252 	if (!mtd->point && map_is_linear(map)) {
253 		mtd->point   = cfi_intelext_point;
254 		mtd->unpoint = cfi_intelext_unpoint;
255 	}
256 }
257 
258 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
259 {
260 	struct map_info *map = mtd->priv;
261 	struct cfi_private *cfi = map->fldrv_priv;
262 	if (cfi->cfiq->BufWriteTimeoutTyp) {
263 		printk(KERN_INFO "Using buffer write method\n" );
264 		mtd->write = cfi_intelext_write_buffers;
265 		mtd->writev = cfi_intelext_writev;
266 	}
267 }
268 
269 /*
270  * Some chips power-up with all sectors locked by default.
271  */
272 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
273 {
274 	struct map_info *map = mtd->priv;
275 	struct cfi_private *cfi = map->fldrv_priv;
276 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
277 
278 	if (cfip->FeatureSupport&32) {
279 		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
280 		mtd->flags |= MTD_POWERUP_LOCK;
281 	}
282 }
283 
284 static struct cfi_fixup cfi_fixup_table[] = {
285 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
286 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
287 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
288 #endif
289 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
290 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
291 #endif
292 #if !FORCE_WORD_WRITE
293 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
294 #endif
295 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
296 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
297 	{ MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
298 	{ 0, 0, NULL, NULL }
299 };
300 
301 static struct cfi_fixup jedec_fixup_table[] = {
302 	{ MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
303 	{ MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
304 	{ MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
305 	{ 0, 0, NULL, NULL }
306 };
307 static struct cfi_fixup fixup_table[] = {
308 	/* The CFI vendor ids and the JEDEC vendor IDs appear
309 	 * to be common.  It is like the devices id's are as
310 	 * well.  This table is to pick all cases where
311 	 * we know that is the case.
312 	 */
313 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
314 	{ 0, 0, NULL, NULL }
315 };
316 
317 static inline struct cfi_pri_intelext *
318 read_pri_intelext(struct map_info *map, __u16 adr)
319 {
320 	struct cfi_pri_intelext *extp;
321 	unsigned int extp_size = sizeof(*extp);
322 
323  again:
324 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
325 	if (!extp)
326 		return NULL;
327 
328 	if (extp->MajorVersion != '1' ||
329 	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
330 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
331 		       "version %c.%c.\n",  extp->MajorVersion,
332 		       extp->MinorVersion);
333 		kfree(extp);
334 		return NULL;
335 	}
336 
337 	/* Do some byteswapping if necessary */
338 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
339 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
340 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
341 
342 	if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
343 		unsigned int extra_size = 0;
344 		int nb_parts, i;
345 
346 		/* Protection Register info */
347 		extra_size += (extp->NumProtectionFields - 1) *
348 			      sizeof(struct cfi_intelext_otpinfo);
349 
350 		/* Burst Read info */
351 		extra_size += 2;
352 		if (extp_size < sizeof(*extp) + extra_size)
353 			goto need_more;
354 		extra_size += extp->extra[extra_size-1];
355 
356 		/* Number of hardware-partitions */
357 		extra_size += 1;
358 		if (extp_size < sizeof(*extp) + extra_size)
359 			goto need_more;
360 		nb_parts = extp->extra[extra_size - 1];
361 
362 		/* skip the sizeof(partregion) field in CFI 1.4 */
363 		if (extp->MinorVersion >= '4')
364 			extra_size += 2;
365 
366 		for (i = 0; i < nb_parts; i++) {
367 			struct cfi_intelext_regioninfo *rinfo;
368 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
369 			extra_size += sizeof(*rinfo);
370 			if (extp_size < sizeof(*extp) + extra_size)
371 				goto need_more;
372 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
373 			extra_size += (rinfo->NumBlockTypes - 1)
374 				      * sizeof(struct cfi_intelext_blockinfo);
375 		}
376 
377 		if (extp->MinorVersion >= '4')
378 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
379 
380 		if (extp_size < sizeof(*extp) + extra_size) {
381 			need_more:
382 			extp_size = sizeof(*extp) + extra_size;
383 			kfree(extp);
384 			if (extp_size > 4096) {
385 				printk(KERN_ERR
386 					"%s: cfi_pri_intelext is too fat\n",
387 					__FUNCTION__);
388 				return NULL;
389 			}
390 			goto again;
391 		}
392 	}
393 
394 	return extp;
395 }
396 
397 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
398 {
399 	struct cfi_private *cfi = map->fldrv_priv;
400 	struct mtd_info *mtd;
401 	int i;
402 
403 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
404 	if (!mtd) {
405 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
406 		return NULL;
407 	}
408 	mtd->priv = map;
409 	mtd->type = MTD_NORFLASH;
410 
411 	/* Fill in the default mtd operations */
412 	mtd->erase   = cfi_intelext_erase_varsize;
413 	mtd->read    = cfi_intelext_read;
414 	mtd->write   = cfi_intelext_write_words;
415 	mtd->sync    = cfi_intelext_sync;
416 	mtd->lock    = cfi_intelext_lock;
417 	mtd->unlock  = cfi_intelext_unlock;
418 	mtd->suspend = cfi_intelext_suspend;
419 	mtd->resume  = cfi_intelext_resume;
420 	mtd->flags   = MTD_CAP_NORFLASH;
421 	mtd->name    = map->name;
422 	mtd->writesize = 1;
423 
424 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
425 
426 	if (cfi->cfi_mode == CFI_MODE_CFI) {
427 		/*
428 		 * It's a real CFI chip, not one for which the probe
429 		 * routine faked a CFI structure. So we read the feature
430 		 * table from it.
431 		 */
432 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
433 		struct cfi_pri_intelext *extp;
434 
435 		extp = read_pri_intelext(map, adr);
436 		if (!extp) {
437 			kfree(mtd);
438 			return NULL;
439 		}
440 
441 		/* Install our own private info structure */
442 		cfi->cmdset_priv = extp;
443 
444 		cfi_fixup(mtd, cfi_fixup_table);
445 
446 #ifdef DEBUG_CFI_FEATURES
447 		/* Tell the user about it in lots of lovely detail */
448 		cfi_tell_features(extp);
449 #endif
450 
451 		if(extp->SuspendCmdSupport & 1) {
452 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
453 		}
454 	}
455 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
456 		/* Apply jedec specific fixups */
457 		cfi_fixup(mtd, jedec_fixup_table);
458 	}
459 	/* Apply generic fixups */
460 	cfi_fixup(mtd, fixup_table);
461 
462 	for (i=0; i< cfi->numchips; i++) {
463 		if (cfi->cfiq->WordWriteTimeoutTyp)
464 			cfi->chips[i].word_write_time =
465 				1<<cfi->cfiq->WordWriteTimeoutTyp;
466 		else
467 			cfi->chips[i].word_write_time = 50000;
468 
469 		if (cfi->cfiq->BufWriteTimeoutTyp)
470 			cfi->chips[i].buffer_write_time =
471 				1<<cfi->cfiq->BufWriteTimeoutTyp;
472 		/* No default; if it isn't specified, we won't use it */
473 
474 		if (cfi->cfiq->BlockEraseTimeoutTyp)
475 			cfi->chips[i].erase_time =
476 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
477 		else
478 			cfi->chips[i].erase_time = 2000000;
479 
480 		cfi->chips[i].ref_point_counter = 0;
481 		init_waitqueue_head(&(cfi->chips[i].wq));
482 	}
483 
484 	map->fldrv = &cfi_intelext_chipdrv;
485 
486 	return cfi_intelext_setup(mtd);
487 }
488 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
489 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
490 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
491 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
492 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
493 
494 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
495 {
496 	struct map_info *map = mtd->priv;
497 	struct cfi_private *cfi = map->fldrv_priv;
498 	unsigned long offset = 0;
499 	int i,j;
500 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
501 
502 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
503 
504 	mtd->size = devsize * cfi->numchips;
505 
506 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
507 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
508 			* mtd->numeraseregions, GFP_KERNEL);
509 	if (!mtd->eraseregions) {
510 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
511 		goto setup_err;
512 	}
513 
514 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
515 		unsigned long ernum, ersize;
516 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
517 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
518 
519 		if (mtd->erasesize < ersize) {
520 			mtd->erasesize = ersize;
521 		}
522 		for (j=0; j<cfi->numchips; j++) {
523 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
524 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
525 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
526 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
527 		}
528 		offset += (ersize * ernum);
529 	}
530 
531 	if (offset != devsize) {
532 		/* Argh */
533 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
534 		goto setup_err;
535 	}
536 
537 	for (i=0; i<mtd->numeraseregions;i++){
538 		printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
539 		       i,mtd->eraseregions[i].offset,
540 		       mtd->eraseregions[i].erasesize,
541 		       mtd->eraseregions[i].numblocks);
542 	}
543 
544 #ifdef CONFIG_MTD_OTP
545 	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
546 	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
547 	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
548 	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
549 	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
550 	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
551 #endif
552 
553 	/* This function has the potential to distort the reality
554 	   a bit and therefore should be called last. */
555 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
556 		goto setup_err;
557 
558 	__module_get(THIS_MODULE);
559 	register_reboot_notifier(&mtd->reboot_notifier);
560 	return mtd;
561 
562  setup_err:
563 	if(mtd) {
564 		kfree(mtd->eraseregions);
565 		kfree(mtd);
566 	}
567 	kfree(cfi->cmdset_priv);
568 	return NULL;
569 }
570 
571 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
572 					struct cfi_private **pcfi)
573 {
574 	struct map_info *map = mtd->priv;
575 	struct cfi_private *cfi = *pcfi;
576 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
577 
578 	/*
579 	 * Probing of multi-partition flash chips.
580 	 *
581 	 * To support multiple partitions when available, we simply arrange
582 	 * for each of them to have their own flchip structure even if they
583 	 * are on the same physical chip.  This means completely recreating
584 	 * a new cfi_private structure right here which is a blatent code
585 	 * layering violation, but this is still the least intrusive
586 	 * arrangement at this point. This can be rearranged in the future
587 	 * if someone feels motivated enough.  --nico
588 	 */
589 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
590 	    && extp->FeatureSupport & (1 << 9)) {
591 		struct cfi_private *newcfi;
592 		struct flchip *chip;
593 		struct flchip_shared *shared;
594 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
595 
596 		/* Protection Register info */
597 		offs = (extp->NumProtectionFields - 1) *
598 		       sizeof(struct cfi_intelext_otpinfo);
599 
600 		/* Burst Read info */
601 		offs += extp->extra[offs+1]+2;
602 
603 		/* Number of partition regions */
604 		numregions = extp->extra[offs];
605 		offs += 1;
606 
607 		/* skip the sizeof(partregion) field in CFI 1.4 */
608 		if (extp->MinorVersion >= '4')
609 			offs += 2;
610 
611 		/* Number of hardware partitions */
612 		numparts = 0;
613 		for (i = 0; i < numregions; i++) {
614 			struct cfi_intelext_regioninfo *rinfo;
615 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
616 			numparts += rinfo->NumIdentPartitions;
617 			offs += sizeof(*rinfo)
618 				+ (rinfo->NumBlockTypes - 1) *
619 				  sizeof(struct cfi_intelext_blockinfo);
620 		}
621 
622 		/* Programming Region info */
623 		if (extp->MinorVersion >= '4') {
624 			struct cfi_intelext_programming_regioninfo *prinfo;
625 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
626 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
627 			mtd->flags &= ~MTD_BIT_WRITEABLE;
628 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
629 			       map->name, mtd->writesize,
630 			       cfi->interleave * prinfo->ControlValid,
631 			       cfi->interleave * prinfo->ControlInvalid);
632 		}
633 
634 		/*
635 		 * All functions below currently rely on all chips having
636 		 * the same geometry so we'll just assume that all hardware
637 		 * partitions are of the same size too.
638 		 */
639 		partshift = cfi->chipshift - __ffs(numparts);
640 
641 		if ((1 << partshift) < mtd->erasesize) {
642 			printk( KERN_ERR
643 				"%s: bad number of hw partitions (%d)\n",
644 				__FUNCTION__, numparts);
645 			return -EINVAL;
646 		}
647 
648 		numvirtchips = cfi->numchips * numparts;
649 		newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
650 		if (!newcfi)
651 			return -ENOMEM;
652 		shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
653 		if (!shared) {
654 			kfree(newcfi);
655 			return -ENOMEM;
656 		}
657 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
658 		newcfi->numchips = numvirtchips;
659 		newcfi->chipshift = partshift;
660 
661 		chip = &newcfi->chips[0];
662 		for (i = 0; i < cfi->numchips; i++) {
663 			shared[i].writing = shared[i].erasing = NULL;
664 			spin_lock_init(&shared[i].lock);
665 			for (j = 0; j < numparts; j++) {
666 				*chip = cfi->chips[i];
667 				chip->start += j << partshift;
668 				chip->priv = &shared[i];
669 				/* those should be reset too since
670 				   they create memory references. */
671 				init_waitqueue_head(&chip->wq);
672 				spin_lock_init(&chip->_spinlock);
673 				chip->mutex = &chip->_spinlock;
674 				chip++;
675 			}
676 		}
677 
678 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
679 				  "--> %d partitions of %d KiB\n",
680 				  map->name, cfi->numchips, cfi->interleave,
681 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
682 
683 		map->fldrv_priv = newcfi;
684 		*pcfi = newcfi;
685 		kfree(cfi);
686 	}
687 
688 	return 0;
689 }
690 
691 /*
692  *  *********** CHIP ACCESS FUNCTIONS ***********
693  */
694 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
695 {
696 	DECLARE_WAITQUEUE(wait, current);
697 	struct cfi_private *cfi = map->fldrv_priv;
698 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
699 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
700 	unsigned long timeo = jiffies + HZ;
701 
702 	switch (chip->state) {
703 
704 	case FL_STATUS:
705 		for (;;) {
706 			status = map_read(map, adr);
707 			if (map_word_andequal(map, status, status_OK, status_OK))
708 				break;
709 
710 			/* At this point we're fine with write operations
711 			   in other partitions as they don't conflict. */
712 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
713 				break;
714 
715 			spin_unlock(chip->mutex);
716 			cfi_udelay(1);
717 			spin_lock(chip->mutex);
718 			/* Someone else might have been playing with it. */
719 			return -EAGAIN;
720 		}
721 		/* Fall through */
722 	case FL_READY:
723 	case FL_CFI_QUERY:
724 	case FL_JEDEC_QUERY:
725 		return 0;
726 
727 	case FL_ERASING:
728 		if (!cfip ||
729 		    !(cfip->FeatureSupport & 2) ||
730 		    !(mode == FL_READY || mode == FL_POINT ||
731 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
732 			goto sleep;
733 
734 
735 		/* Erase suspend */
736 		map_write(map, CMD(0xB0), adr);
737 
738 		/* If the flash has finished erasing, then 'erase suspend'
739 		 * appears to make some (28F320) flash devices switch to
740 		 * 'read' mode.  Make sure that we switch to 'read status'
741 		 * mode so we get the right data. --rmk
742 		 */
743 		map_write(map, CMD(0x70), adr);
744 		chip->oldstate = FL_ERASING;
745 		chip->state = FL_ERASE_SUSPENDING;
746 		chip->erase_suspended = 1;
747 		for (;;) {
748 			status = map_read(map, adr);
749 			if (map_word_andequal(map, status, status_OK, status_OK))
750 			        break;
751 
752 			if (time_after(jiffies, timeo)) {
753 				/* Urgh. Resume and pretend we weren't here.  */
754 				map_write(map, CMD(0xd0), adr);
755 				/* Make sure we're in 'read status' mode if it had finished */
756 				map_write(map, CMD(0x70), adr);
757 				chip->state = FL_ERASING;
758 				chip->oldstate = FL_READY;
759 				printk(KERN_ERR "%s: Chip not ready after erase "
760 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
761 				return -EIO;
762 			}
763 
764 			spin_unlock(chip->mutex);
765 			cfi_udelay(1);
766 			spin_lock(chip->mutex);
767 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
768 			   So we can just loop here. */
769 		}
770 		chip->state = FL_STATUS;
771 		return 0;
772 
773 	case FL_XIP_WHILE_ERASING:
774 		if (mode != FL_READY && mode != FL_POINT &&
775 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
776 			goto sleep;
777 		chip->oldstate = chip->state;
778 		chip->state = FL_READY;
779 		return 0;
780 
781 	case FL_SHUTDOWN:
782 		/* The machine is rebooting now,so no one can get chip anymore */
783 		return -EIO;
784 	case FL_POINT:
785 		/* Only if there's no operation suspended... */
786 		if (mode == FL_READY && chip->oldstate == FL_READY)
787 			return 0;
788 		/* Fall through */
789 	default:
790 	sleep:
791 		set_current_state(TASK_UNINTERRUPTIBLE);
792 		add_wait_queue(&chip->wq, &wait);
793 		spin_unlock(chip->mutex);
794 		schedule();
795 		remove_wait_queue(&chip->wq, &wait);
796 		spin_lock(chip->mutex);
797 		return -EAGAIN;
798 	}
799 }
800 
801 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
802 {
803 	int ret;
804 	DECLARE_WAITQUEUE(wait, current);
805 
806  retry:
807 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
808 			   || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
809 		/*
810 		 * OK. We have possibility for contention on the write/erase
811 		 * operations which are global to the real chip and not per
812 		 * partition.  So let's fight it over in the partition which
813 		 * currently has authority on the operation.
814 		 *
815 		 * The rules are as follows:
816 		 *
817 		 * - any write operation must own shared->writing.
818 		 *
819 		 * - any erase operation must own _both_ shared->writing and
820 		 *   shared->erasing.
821 		 *
822 		 * - contention arbitration is handled in the owner's context.
823 		 *
824 		 * The 'shared' struct can be read and/or written only when
825 		 * its lock is taken.
826 		 */
827 		struct flchip_shared *shared = chip->priv;
828 		struct flchip *contender;
829 		spin_lock(&shared->lock);
830 		contender = shared->writing;
831 		if (contender && contender != chip) {
832 			/*
833 			 * The engine to perform desired operation on this
834 			 * partition is already in use by someone else.
835 			 * Let's fight over it in the context of the chip
836 			 * currently using it.  If it is possible to suspend,
837 			 * that other partition will do just that, otherwise
838 			 * it'll happily send us to sleep.  In any case, when
839 			 * get_chip returns success we're clear to go ahead.
840 			 */
841 			ret = spin_trylock(contender->mutex);
842 			spin_unlock(&shared->lock);
843 			if (!ret)
844 				goto retry;
845 			spin_unlock(chip->mutex);
846 			ret = chip_ready(map, contender, contender->start, mode);
847 			spin_lock(chip->mutex);
848 
849 			if (ret == -EAGAIN) {
850 				spin_unlock(contender->mutex);
851 				goto retry;
852 			}
853 			if (ret) {
854 				spin_unlock(contender->mutex);
855 				return ret;
856 			}
857 			spin_lock(&shared->lock);
858 			spin_unlock(contender->mutex);
859 		}
860 
861 		/* Check if we already have suspended erase
862 		 * on this chip. Sleep. */
863 		if (mode == FL_ERASING && shared->erasing
864 		    && shared->erasing->oldstate == FL_ERASING) {
865 			spin_unlock(&shared->lock);
866 			set_current_state(TASK_UNINTERRUPTIBLE);
867 			add_wait_queue(&chip->wq, &wait);
868 			spin_unlock(chip->mutex);
869 			schedule();
870 			remove_wait_queue(&chip->wq, &wait);
871 			spin_lock(chip->mutex);
872 			goto retry;
873 		}
874 
875 		/* We now own it */
876 		shared->writing = chip;
877 		if (mode == FL_ERASING)
878 			shared->erasing = chip;
879 		spin_unlock(&shared->lock);
880 	}
881 	ret = chip_ready(map, chip, adr, mode);
882 	if (ret == -EAGAIN)
883 		goto retry;
884 
885 	return ret;
886 }
887 
888 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
889 {
890 	struct cfi_private *cfi = map->fldrv_priv;
891 
892 	if (chip->priv) {
893 		struct flchip_shared *shared = chip->priv;
894 		spin_lock(&shared->lock);
895 		if (shared->writing == chip && chip->oldstate == FL_READY) {
896 			/* We own the ability to write, but we're done */
897 			shared->writing = shared->erasing;
898 			if (shared->writing && shared->writing != chip) {
899 				/* give back ownership to who we loaned it from */
900 				struct flchip *loaner = shared->writing;
901 				spin_lock(loaner->mutex);
902 				spin_unlock(&shared->lock);
903 				spin_unlock(chip->mutex);
904 				put_chip(map, loaner, loaner->start);
905 				spin_lock(chip->mutex);
906 				spin_unlock(loaner->mutex);
907 				wake_up(&chip->wq);
908 				return;
909 			}
910 			shared->erasing = NULL;
911 			shared->writing = NULL;
912 		} else if (shared->erasing == chip && shared->writing != chip) {
913 			/*
914 			 * We own the ability to erase without the ability
915 			 * to write, which means the erase was suspended
916 			 * and some other partition is currently writing.
917 			 * Don't let the switch below mess things up since
918 			 * we don't have ownership to resume anything.
919 			 */
920 			spin_unlock(&shared->lock);
921 			wake_up(&chip->wq);
922 			return;
923 		}
924 		spin_unlock(&shared->lock);
925 	}
926 
927 	switch(chip->oldstate) {
928 	case FL_ERASING:
929 		chip->state = chip->oldstate;
930 		/* What if one interleaved chip has finished and the
931 		   other hasn't? The old code would leave the finished
932 		   one in READY mode. That's bad, and caused -EROFS
933 		   errors to be returned from do_erase_oneblock because
934 		   that's the only bit it checked for at the time.
935 		   As the state machine appears to explicitly allow
936 		   sending the 0x70 (Read Status) command to an erasing
937 		   chip and expecting it to be ignored, that's what we
938 		   do. */
939 		map_write(map, CMD(0xd0), adr);
940 		map_write(map, CMD(0x70), adr);
941 		chip->oldstate = FL_READY;
942 		chip->state = FL_ERASING;
943 		break;
944 
945 	case FL_XIP_WHILE_ERASING:
946 		chip->state = chip->oldstate;
947 		chip->oldstate = FL_READY;
948 		break;
949 
950 	case FL_READY:
951 	case FL_STATUS:
952 	case FL_JEDEC_QUERY:
953 		/* We should really make set_vpp() count, rather than doing this */
954 		DISABLE_VPP(map);
955 		break;
956 	default:
957 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
958 	}
959 	wake_up(&chip->wq);
960 }
961 
962 #ifdef CONFIG_MTD_XIP
963 
964 /*
965  * No interrupt what so ever can be serviced while the flash isn't in array
966  * mode.  This is ensured by the xip_disable() and xip_enable() functions
967  * enclosing any code path where the flash is known not to be in array mode.
968  * And within a XIP disabled code path, only functions marked with __xipram
969  * may be called and nothing else (it's a good thing to inspect generated
970  * assembly to make sure inline functions were actually inlined and that gcc
971  * didn't emit calls to its own support functions). Also configuring MTD CFI
972  * support to a single buswidth and a single interleave is also recommended.
973  */
974 
975 static void xip_disable(struct map_info *map, struct flchip *chip,
976 			unsigned long adr)
977 {
978 	/* TODO: chips with no XIP use should ignore and return */
979 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
980 	local_irq_disable();
981 }
982 
983 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
984 				unsigned long adr)
985 {
986 	struct cfi_private *cfi = map->fldrv_priv;
987 	if (chip->state != FL_POINT && chip->state != FL_READY) {
988 		map_write(map, CMD(0xff), adr);
989 		chip->state = FL_READY;
990 	}
991 	(void) map_read(map, adr);
992 	xip_iprefetch();
993 	local_irq_enable();
994 }
995 
996 /*
997  * When a delay is required for the flash operation to complete, the
998  * xip_wait_for_operation() function is polling for both the given timeout
999  * and pending (but still masked) hardware interrupts.  Whenever there is an
1000  * interrupt pending then the flash erase or write operation is suspended,
1001  * array mode restored and interrupts unmasked.  Task scheduling might also
1002  * happen at that point.  The CPU eventually returns from the interrupt or
1003  * the call to schedule() and the suspended flash operation is resumed for
1004  * the remaining of the delay period.
1005  *
1006  * Warning: this function _will_ fool interrupt latency tracing tools.
1007  */
1008 
1009 static int __xipram xip_wait_for_operation(
1010 		struct map_info *map, struct flchip *chip,
1011 		unsigned long adr, unsigned int chip_op_time )
1012 {
1013 	struct cfi_private *cfi = map->fldrv_priv;
1014 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1015 	map_word status, OK = CMD(0x80);
1016 	unsigned long usec, suspended, start, done;
1017 	flstate_t oldstate, newstate;
1018 
1019        	start = xip_currtime();
1020 	usec = chip_op_time * 8;
1021 	if (usec == 0)
1022 		usec = 500000;
1023 	done = 0;
1024 
1025 	do {
1026 		cpu_relax();
1027 		if (xip_irqpending() && cfip &&
1028 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1029 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1030 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1031 			/*
1032 			 * Let's suspend the erase or write operation when
1033 			 * supported.  Note that we currently don't try to
1034 			 * suspend interleaved chips if there is already
1035 			 * another operation suspended (imagine what happens
1036 			 * when one chip was already done with the current
1037 			 * operation while another chip suspended it, then
1038 			 * we resume the whole thing at once).  Yes, it
1039 			 * can happen!
1040 			 */
1041 			usec -= done;
1042 			map_write(map, CMD(0xb0), adr);
1043 			map_write(map, CMD(0x70), adr);
1044 			suspended = xip_currtime();
1045 			do {
1046 				if (xip_elapsed_since(suspended) > 100000) {
1047 					/*
1048 					 * The chip doesn't want to suspend
1049 					 * after waiting for 100 msecs.
1050 					 * This is a critical error but there
1051 					 * is not much we can do here.
1052 					 */
1053 					return -EIO;
1054 				}
1055 				status = map_read(map, adr);
1056 			} while (!map_word_andequal(map, status, OK, OK));
1057 
1058 			/* Suspend succeeded */
1059 			oldstate = chip->state;
1060 			if (oldstate == FL_ERASING) {
1061 				if (!map_word_bitsset(map, status, CMD(0x40)))
1062 					break;
1063 				newstate = FL_XIP_WHILE_ERASING;
1064 				chip->erase_suspended = 1;
1065 			} else {
1066 				if (!map_word_bitsset(map, status, CMD(0x04)))
1067 					break;
1068 				newstate = FL_XIP_WHILE_WRITING;
1069 				chip->write_suspended = 1;
1070 			}
1071 			chip->state = newstate;
1072 			map_write(map, CMD(0xff), adr);
1073 			(void) map_read(map, adr);
1074 			asm volatile (".rep 8; nop; .endr");
1075 			local_irq_enable();
1076 			spin_unlock(chip->mutex);
1077 			asm volatile (".rep 8; nop; .endr");
1078 			cond_resched();
1079 
1080 			/*
1081 			 * We're back.  However someone else might have
1082 			 * decided to go write to the chip if we are in
1083 			 * a suspended erase state.  If so let's wait
1084 			 * until it's done.
1085 			 */
1086 			spin_lock(chip->mutex);
1087 			while (chip->state != newstate) {
1088 				DECLARE_WAITQUEUE(wait, current);
1089 				set_current_state(TASK_UNINTERRUPTIBLE);
1090 				add_wait_queue(&chip->wq, &wait);
1091 				spin_unlock(chip->mutex);
1092 				schedule();
1093 				remove_wait_queue(&chip->wq, &wait);
1094 				spin_lock(chip->mutex);
1095 			}
1096 			/* Disallow XIP again */
1097 			local_irq_disable();
1098 
1099 			/* Resume the write or erase operation */
1100 			map_write(map, CMD(0xd0), adr);
1101 			map_write(map, CMD(0x70), adr);
1102 			chip->state = oldstate;
1103 			start = xip_currtime();
1104 		} else if (usec >= 1000000/HZ) {
1105 			/*
1106 			 * Try to save on CPU power when waiting delay
1107 			 * is at least a system timer tick period.
1108 			 * No need to be extremely accurate here.
1109 			 */
1110 			xip_cpu_idle();
1111 		}
1112 		status = map_read(map, adr);
1113 		done = xip_elapsed_since(start);
1114 	} while (!map_word_andequal(map, status, OK, OK)
1115 		 && done < usec);
1116 
1117 	return (done >= usec) ? -ETIME : 0;
1118 }
1119 
1120 /*
1121  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1122  * the flash is actively programming or erasing since we have to poll for
1123  * the operation to complete anyway.  We can't do that in a generic way with
1124  * a XIP setup so do it before the actual flash operation in this case
1125  * and stub it out from INVAL_CACHE_AND_WAIT.
1126  */
1127 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1128 	INVALIDATE_CACHED_RANGE(map, from, size)
1129 
1130 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1131 	xip_wait_for_operation(map, chip, cmd_adr, usec)
1132 
1133 #else
1134 
1135 #define xip_disable(map, chip, adr)
1136 #define xip_enable(map, chip, adr)
1137 #define XIP_INVAL_CACHED_RANGE(x...)
1138 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1139 
1140 static int inval_cache_and_wait_for_operation(
1141 		struct map_info *map, struct flchip *chip,
1142 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1143 		unsigned int chip_op_time)
1144 {
1145 	struct cfi_private *cfi = map->fldrv_priv;
1146 	map_word status, status_OK = CMD(0x80);
1147 	int chip_state = chip->state;
1148 	unsigned int timeo, sleep_time;
1149 
1150 	spin_unlock(chip->mutex);
1151 	if (inval_len)
1152 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1153 	spin_lock(chip->mutex);
1154 
1155 	/* set our timeout to 8 times the expected delay */
1156 	timeo = chip_op_time * 8;
1157 	if (!timeo)
1158 		timeo = 500000;
1159 	sleep_time = chip_op_time / 2;
1160 
1161 	for (;;) {
1162 		status = map_read(map, cmd_adr);
1163 		if (map_word_andequal(map, status, status_OK, status_OK))
1164 			break;
1165 
1166 		if (!timeo) {
1167 			map_write(map, CMD(0x70), cmd_adr);
1168 			chip->state = FL_STATUS;
1169 			return -ETIME;
1170 		}
1171 
1172 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1173 		spin_unlock(chip->mutex);
1174 		if (sleep_time >= 1000000/HZ) {
1175 			/*
1176 			 * Half of the normal delay still remaining
1177 			 * can be performed with a sleeping delay instead
1178 			 * of busy waiting.
1179 			 */
1180 			msleep(sleep_time/1000);
1181 			timeo -= sleep_time;
1182 			sleep_time = 1000000/HZ;
1183 		} else {
1184 			udelay(1);
1185 			cond_resched();
1186 			timeo--;
1187 		}
1188 		spin_lock(chip->mutex);
1189 
1190 		while (chip->state != chip_state) {
1191 			/* Someone's suspended the operation: sleep */
1192 			DECLARE_WAITQUEUE(wait, current);
1193 			set_current_state(TASK_UNINTERRUPTIBLE);
1194 			add_wait_queue(&chip->wq, &wait);
1195 			spin_unlock(chip->mutex);
1196 			schedule();
1197 			remove_wait_queue(&chip->wq, &wait);
1198 			spin_lock(chip->mutex);
1199 		}
1200 	}
1201 
1202 	/* Done and happy. */
1203  	chip->state = FL_STATUS;
1204 	return 0;
1205 }
1206 
1207 #endif
1208 
1209 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1210 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1211 
1212 
1213 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1214 {
1215 	unsigned long cmd_addr;
1216 	struct cfi_private *cfi = map->fldrv_priv;
1217 	int ret = 0;
1218 
1219 	adr += chip->start;
1220 
1221 	/* Ensure cmd read/writes are aligned. */
1222 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1223 
1224 	spin_lock(chip->mutex);
1225 
1226 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1227 
1228 	if (!ret) {
1229 		if (chip->state != FL_POINT && chip->state != FL_READY)
1230 			map_write(map, CMD(0xff), cmd_addr);
1231 
1232 		chip->state = FL_POINT;
1233 		chip->ref_point_counter++;
1234 	}
1235 	spin_unlock(chip->mutex);
1236 
1237 	return ret;
1238 }
1239 
1240 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1241 {
1242 	struct map_info *map = mtd->priv;
1243 	struct cfi_private *cfi = map->fldrv_priv;
1244 	unsigned long ofs, last_end = 0;
1245 	int chipnum;
1246 	int ret = 0;
1247 
1248 	if (!map->virt || (from + len > mtd->size))
1249 		return -EINVAL;
1250 
1251 	/* Now lock the chip(s) to POINT state */
1252 
1253 	/* ofs: offset within the first chip that the first read should start */
1254 	chipnum = (from >> cfi->chipshift);
1255 	ofs = from - (chipnum << cfi->chipshift);
1256 
1257 	*mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1258 	*retlen = 0;
1259 
1260 	while (len) {
1261 		unsigned long thislen;
1262 
1263 		if (chipnum >= cfi->numchips)
1264 			break;
1265 
1266 		/* We cannot point across chips that are virtually disjoint */
1267 		if (!last_end)
1268 			last_end = cfi->chips[chipnum].start;
1269 		else if (cfi->chips[chipnum].start != last_end)
1270 			break;
1271 
1272 		if ((len + ofs -1) >> cfi->chipshift)
1273 			thislen = (1<<cfi->chipshift) - ofs;
1274 		else
1275 			thislen = len;
1276 
1277 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1278 		if (ret)
1279 			break;
1280 
1281 		*retlen += thislen;
1282 		len -= thislen;
1283 
1284 		ofs = 0;
1285 		last_end += 1 << cfi->chipshift;
1286 		chipnum++;
1287 	}
1288 	return 0;
1289 }
1290 
1291 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1292 {
1293 	struct map_info *map = mtd->priv;
1294 	struct cfi_private *cfi = map->fldrv_priv;
1295 	unsigned long ofs;
1296 	int chipnum;
1297 
1298 	/* Now unlock the chip(s) POINT state */
1299 
1300 	/* ofs: offset within the first chip that the first read should start */
1301 	chipnum = (from >> cfi->chipshift);
1302 	ofs = from - (chipnum <<  cfi->chipshift);
1303 
1304 	while (len) {
1305 		unsigned long thislen;
1306 		struct flchip *chip;
1307 
1308 		chip = &cfi->chips[chipnum];
1309 		if (chipnum >= cfi->numchips)
1310 			break;
1311 
1312 		if ((len + ofs -1) >> cfi->chipshift)
1313 			thislen = (1<<cfi->chipshift) - ofs;
1314 		else
1315 			thislen = len;
1316 
1317 		spin_lock(chip->mutex);
1318 		if (chip->state == FL_POINT) {
1319 			chip->ref_point_counter--;
1320 			if(chip->ref_point_counter == 0)
1321 				chip->state = FL_READY;
1322 		} else
1323 			printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1324 
1325 		put_chip(map, chip, chip->start);
1326 		spin_unlock(chip->mutex);
1327 
1328 		len -= thislen;
1329 		ofs = 0;
1330 		chipnum++;
1331 	}
1332 }
1333 
1334 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1335 {
1336 	unsigned long cmd_addr;
1337 	struct cfi_private *cfi = map->fldrv_priv;
1338 	int ret;
1339 
1340 	adr += chip->start;
1341 
1342 	/* Ensure cmd read/writes are aligned. */
1343 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1344 
1345 	spin_lock(chip->mutex);
1346 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1347 	if (ret) {
1348 		spin_unlock(chip->mutex);
1349 		return ret;
1350 	}
1351 
1352 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1353 		map_write(map, CMD(0xff), cmd_addr);
1354 
1355 		chip->state = FL_READY;
1356 	}
1357 
1358 	map_copy_from(map, buf, adr, len);
1359 
1360 	put_chip(map, chip, cmd_addr);
1361 
1362 	spin_unlock(chip->mutex);
1363 	return 0;
1364 }
1365 
1366 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1367 {
1368 	struct map_info *map = mtd->priv;
1369 	struct cfi_private *cfi = map->fldrv_priv;
1370 	unsigned long ofs;
1371 	int chipnum;
1372 	int ret = 0;
1373 
1374 	/* ofs: offset within the first chip that the first read should start */
1375 	chipnum = (from >> cfi->chipshift);
1376 	ofs = from - (chipnum <<  cfi->chipshift);
1377 
1378 	*retlen = 0;
1379 
1380 	while (len) {
1381 		unsigned long thislen;
1382 
1383 		if (chipnum >= cfi->numchips)
1384 			break;
1385 
1386 		if ((len + ofs -1) >> cfi->chipshift)
1387 			thislen = (1<<cfi->chipshift) - ofs;
1388 		else
1389 			thislen = len;
1390 
1391 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1392 		if (ret)
1393 			break;
1394 
1395 		*retlen += thislen;
1396 		len -= thislen;
1397 		buf += thislen;
1398 
1399 		ofs = 0;
1400 		chipnum++;
1401 	}
1402 	return ret;
1403 }
1404 
1405 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1406 				     unsigned long adr, map_word datum, int mode)
1407 {
1408 	struct cfi_private *cfi = map->fldrv_priv;
1409 	map_word status, write_cmd;
1410 	int ret=0;
1411 
1412 	adr += chip->start;
1413 
1414 	switch (mode) {
1415 	case FL_WRITING:
1416 		write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1417 		break;
1418 	case FL_OTP_WRITE:
1419 		write_cmd = CMD(0xc0);
1420 		break;
1421 	default:
1422 		return -EINVAL;
1423 	}
1424 
1425 	spin_lock(chip->mutex);
1426 	ret = get_chip(map, chip, adr, mode);
1427 	if (ret) {
1428 		spin_unlock(chip->mutex);
1429 		return ret;
1430 	}
1431 
1432 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1433 	ENABLE_VPP(map);
1434 	xip_disable(map, chip, adr);
1435 	map_write(map, write_cmd, adr);
1436 	map_write(map, datum, adr);
1437 	chip->state = mode;
1438 
1439 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1440 				   adr, map_bankwidth(map),
1441 				   chip->word_write_time);
1442 	if (ret) {
1443 		xip_enable(map, chip, adr);
1444 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1445 		goto out;
1446 	}
1447 
1448 	/* check for errors */
1449 	status = map_read(map, adr);
1450 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1451 		unsigned long chipstatus = MERGESTATUS(status);
1452 
1453 		/* reset status */
1454 		map_write(map, CMD(0x50), adr);
1455 		map_write(map, CMD(0x70), adr);
1456 		xip_enable(map, chip, adr);
1457 
1458 		if (chipstatus & 0x02) {
1459 			ret = -EROFS;
1460 		} else if (chipstatus & 0x08) {
1461 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1462 			ret = -EIO;
1463 		} else {
1464 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1465 			ret = -EINVAL;
1466 		}
1467 
1468 		goto out;
1469 	}
1470 
1471 	xip_enable(map, chip, adr);
1472  out:	put_chip(map, chip, adr);
1473 	spin_unlock(chip->mutex);
1474 	return ret;
1475 }
1476 
1477 
1478 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1479 {
1480 	struct map_info *map = mtd->priv;
1481 	struct cfi_private *cfi = map->fldrv_priv;
1482 	int ret = 0;
1483 	int chipnum;
1484 	unsigned long ofs;
1485 
1486 	*retlen = 0;
1487 	if (!len)
1488 		return 0;
1489 
1490 	chipnum = to >> cfi->chipshift;
1491 	ofs = to  - (chipnum << cfi->chipshift);
1492 
1493 	/* If it's not bus-aligned, do the first byte write */
1494 	if (ofs & (map_bankwidth(map)-1)) {
1495 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1496 		int gap = ofs - bus_ofs;
1497 		int n;
1498 		map_word datum;
1499 
1500 		n = min_t(int, len, map_bankwidth(map)-gap);
1501 		datum = map_word_ff(map);
1502 		datum = map_word_load_partial(map, datum, buf, gap, n);
1503 
1504 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1505 					       bus_ofs, datum, FL_WRITING);
1506 		if (ret)
1507 			return ret;
1508 
1509 		len -= n;
1510 		ofs += n;
1511 		buf += n;
1512 		(*retlen) += n;
1513 
1514 		if (ofs >> cfi->chipshift) {
1515 			chipnum ++;
1516 			ofs = 0;
1517 			if (chipnum == cfi->numchips)
1518 				return 0;
1519 		}
1520 	}
1521 
1522 	while(len >= map_bankwidth(map)) {
1523 		map_word datum = map_word_load(map, buf);
1524 
1525 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1526 				       ofs, datum, FL_WRITING);
1527 		if (ret)
1528 			return ret;
1529 
1530 		ofs += map_bankwidth(map);
1531 		buf += map_bankwidth(map);
1532 		(*retlen) += map_bankwidth(map);
1533 		len -= map_bankwidth(map);
1534 
1535 		if (ofs >> cfi->chipshift) {
1536 			chipnum ++;
1537 			ofs = 0;
1538 			if (chipnum == cfi->numchips)
1539 				return 0;
1540 		}
1541 	}
1542 
1543 	if (len & (map_bankwidth(map)-1)) {
1544 		map_word datum;
1545 
1546 		datum = map_word_ff(map);
1547 		datum = map_word_load_partial(map, datum, buf, 0, len);
1548 
1549 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1550 				       ofs, datum, FL_WRITING);
1551 		if (ret)
1552 			return ret;
1553 
1554 		(*retlen) += len;
1555 	}
1556 
1557 	return 0;
1558 }
1559 
1560 
1561 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1562 				    unsigned long adr, const struct kvec **pvec,
1563 				    unsigned long *pvec_seek, int len)
1564 {
1565 	struct cfi_private *cfi = map->fldrv_priv;
1566 	map_word status, write_cmd, datum;
1567 	unsigned long cmd_adr;
1568 	int ret, wbufsize, word_gap, words;
1569 	const struct kvec *vec;
1570 	unsigned long vec_seek;
1571 	unsigned long initial_adr;
1572 	int initial_len = len;
1573 
1574 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1575 	adr += chip->start;
1576 	initial_adr = adr;
1577 	cmd_adr = adr & ~(wbufsize-1);
1578 
1579 	/* Let's determine this according to the interleave only once */
1580 	write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1581 
1582 	spin_lock(chip->mutex);
1583 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1584 	if (ret) {
1585 		spin_unlock(chip->mutex);
1586 		return ret;
1587 	}
1588 
1589 	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1590 	ENABLE_VPP(map);
1591 	xip_disable(map, chip, cmd_adr);
1592 
1593 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1594 	   [...], the device will not accept any more Write to Buffer commands".
1595 	   So we must check here and reset those bits if they're set. Otherwise
1596 	   we're just pissing in the wind */
1597 	if (chip->state != FL_STATUS) {
1598 		map_write(map, CMD(0x70), cmd_adr);
1599 		chip->state = FL_STATUS;
1600 	}
1601 	status = map_read(map, cmd_adr);
1602 	if (map_word_bitsset(map, status, CMD(0x30))) {
1603 		xip_enable(map, chip, cmd_adr);
1604 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1605 		xip_disable(map, chip, cmd_adr);
1606 		map_write(map, CMD(0x50), cmd_adr);
1607 		map_write(map, CMD(0x70), cmd_adr);
1608 	}
1609 
1610 	chip->state = FL_WRITING_TO_BUFFER;
1611 	map_write(map, write_cmd, cmd_adr);
1612 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1613 	if (ret) {
1614 		/* Argh. Not ready for write to buffer */
1615 		map_word Xstatus = map_read(map, cmd_adr);
1616 		map_write(map, CMD(0x70), cmd_adr);
1617 		chip->state = FL_STATUS;
1618 		status = map_read(map, cmd_adr);
1619 		map_write(map, CMD(0x50), cmd_adr);
1620 		map_write(map, CMD(0x70), cmd_adr);
1621 		xip_enable(map, chip, cmd_adr);
1622 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1623 				map->name, Xstatus.x[0], status.x[0]);
1624 		goto out;
1625 	}
1626 
1627 	/* Figure out the number of words to write */
1628 	word_gap = (-adr & (map_bankwidth(map)-1));
1629 	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1630 	if (!word_gap) {
1631 		words--;
1632 	} else {
1633 		word_gap = map_bankwidth(map) - word_gap;
1634 		adr -= word_gap;
1635 		datum = map_word_ff(map);
1636 	}
1637 
1638 	/* Write length of data to come */
1639 	map_write(map, CMD(words), cmd_adr );
1640 
1641 	/* Write data */
1642 	vec = *pvec;
1643 	vec_seek = *pvec_seek;
1644 	do {
1645 		int n = map_bankwidth(map) - word_gap;
1646 		if (n > vec->iov_len - vec_seek)
1647 			n = vec->iov_len - vec_seek;
1648 		if (n > len)
1649 			n = len;
1650 
1651 		if (!word_gap && len < map_bankwidth(map))
1652 			datum = map_word_ff(map);
1653 
1654 		datum = map_word_load_partial(map, datum,
1655 					      vec->iov_base + vec_seek,
1656 					      word_gap, n);
1657 
1658 		len -= n;
1659 		word_gap += n;
1660 		if (!len || word_gap == map_bankwidth(map)) {
1661 			map_write(map, datum, adr);
1662 			adr += map_bankwidth(map);
1663 			word_gap = 0;
1664 		}
1665 
1666 		vec_seek += n;
1667 		if (vec_seek == vec->iov_len) {
1668 			vec++;
1669 			vec_seek = 0;
1670 		}
1671 	} while (len);
1672 	*pvec = vec;
1673 	*pvec_seek = vec_seek;
1674 
1675 	/* GO GO GO */
1676 	map_write(map, CMD(0xd0), cmd_adr);
1677 	chip->state = FL_WRITING;
1678 
1679 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1680 				   initial_adr, initial_len,
1681 				   chip->buffer_write_time);
1682 	if (ret) {
1683 		map_write(map, CMD(0x70), cmd_adr);
1684 		chip->state = FL_STATUS;
1685 		xip_enable(map, chip, cmd_adr);
1686 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1687 		goto out;
1688 	}
1689 
1690 	/* check for errors */
1691 	status = map_read(map, cmd_adr);
1692 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1693 		unsigned long chipstatus = MERGESTATUS(status);
1694 
1695 		/* reset status */
1696 		map_write(map, CMD(0x50), cmd_adr);
1697 		map_write(map, CMD(0x70), cmd_adr);
1698 		xip_enable(map, chip, cmd_adr);
1699 
1700 		if (chipstatus & 0x02) {
1701 			ret = -EROFS;
1702 		} else if (chipstatus & 0x08) {
1703 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1704 			ret = -EIO;
1705 		} else {
1706 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1707 			ret = -EINVAL;
1708 		}
1709 
1710 		goto out;
1711 	}
1712 
1713 	xip_enable(map, chip, cmd_adr);
1714  out:	put_chip(map, chip, cmd_adr);
1715 	spin_unlock(chip->mutex);
1716 	return ret;
1717 }
1718 
1719 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1720 				unsigned long count, loff_t to, size_t *retlen)
1721 {
1722 	struct map_info *map = mtd->priv;
1723 	struct cfi_private *cfi = map->fldrv_priv;
1724 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1725 	int ret = 0;
1726 	int chipnum;
1727 	unsigned long ofs, vec_seek, i;
1728 	size_t len = 0;
1729 
1730 	for (i = 0; i < count; i++)
1731 		len += vecs[i].iov_len;
1732 
1733 	*retlen = 0;
1734 	if (!len)
1735 		return 0;
1736 
1737 	chipnum = to >> cfi->chipshift;
1738 	ofs = to - (chipnum << cfi->chipshift);
1739 	vec_seek = 0;
1740 
1741 	do {
1742 		/* We must not cross write block boundaries */
1743 		int size = wbufsize - (ofs & (wbufsize-1));
1744 
1745 		if (size > len)
1746 			size = len;
1747 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1748 				      ofs, &vecs, &vec_seek, size);
1749 		if (ret)
1750 			return ret;
1751 
1752 		ofs += size;
1753 		(*retlen) += size;
1754 		len -= size;
1755 
1756 		if (ofs >> cfi->chipshift) {
1757 			chipnum ++;
1758 			ofs = 0;
1759 			if (chipnum == cfi->numchips)
1760 				return 0;
1761 		}
1762 
1763 		/* Be nice and reschedule with the chip in a usable state for other
1764 		   processes. */
1765 		cond_resched();
1766 
1767 	} while (len);
1768 
1769 	return 0;
1770 }
1771 
1772 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1773 				       size_t len, size_t *retlen, const u_char *buf)
1774 {
1775 	struct kvec vec;
1776 
1777 	vec.iov_base = (void *) buf;
1778 	vec.iov_len = len;
1779 
1780 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1781 }
1782 
1783 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1784 				      unsigned long adr, int len, void *thunk)
1785 {
1786 	struct cfi_private *cfi = map->fldrv_priv;
1787 	map_word status;
1788 	int retries = 3;
1789 	int ret;
1790 
1791 	adr += chip->start;
1792 
1793  retry:
1794 	spin_lock(chip->mutex);
1795 	ret = get_chip(map, chip, adr, FL_ERASING);
1796 	if (ret) {
1797 		spin_unlock(chip->mutex);
1798 		return ret;
1799 	}
1800 
1801 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1802 	ENABLE_VPP(map);
1803 	xip_disable(map, chip, adr);
1804 
1805 	/* Clear the status register first */
1806 	map_write(map, CMD(0x50), adr);
1807 
1808 	/* Now erase */
1809 	map_write(map, CMD(0x20), adr);
1810 	map_write(map, CMD(0xD0), adr);
1811 	chip->state = FL_ERASING;
1812 	chip->erase_suspended = 0;
1813 
1814 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1815 				   adr, len,
1816 				   chip->erase_time);
1817 	if (ret) {
1818 		map_write(map, CMD(0x70), adr);
1819 		chip->state = FL_STATUS;
1820 		xip_enable(map, chip, adr);
1821 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1822 		goto out;
1823 	}
1824 
1825 	/* We've broken this before. It doesn't hurt to be safe */
1826 	map_write(map, CMD(0x70), adr);
1827 	chip->state = FL_STATUS;
1828 	status = map_read(map, adr);
1829 
1830 	/* check for errors */
1831 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1832 		unsigned long chipstatus = MERGESTATUS(status);
1833 
1834 		/* Reset the error bits */
1835 		map_write(map, CMD(0x50), adr);
1836 		map_write(map, CMD(0x70), adr);
1837 		xip_enable(map, chip, adr);
1838 
1839 		if ((chipstatus & 0x30) == 0x30) {
1840 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1841 			ret = -EINVAL;
1842 		} else if (chipstatus & 0x02) {
1843 			/* Protection bit set */
1844 			ret = -EROFS;
1845 		} else if (chipstatus & 0x8) {
1846 			/* Voltage */
1847 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1848 			ret = -EIO;
1849 		} else if (chipstatus & 0x20 && retries--) {
1850 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1851 			put_chip(map, chip, adr);
1852 			spin_unlock(chip->mutex);
1853 			goto retry;
1854 		} else {
1855 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1856 			ret = -EIO;
1857 		}
1858 
1859 		goto out;
1860 	}
1861 
1862 	xip_enable(map, chip, adr);
1863  out:	put_chip(map, chip, adr);
1864 	spin_unlock(chip->mutex);
1865 	return ret;
1866 }
1867 
1868 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1869 {
1870 	unsigned long ofs, len;
1871 	int ret;
1872 
1873 	ofs = instr->addr;
1874 	len = instr->len;
1875 
1876 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1877 	if (ret)
1878 		return ret;
1879 
1880 	instr->state = MTD_ERASE_DONE;
1881 	mtd_erase_callback(instr);
1882 
1883 	return 0;
1884 }
1885 
1886 static void cfi_intelext_sync (struct mtd_info *mtd)
1887 {
1888 	struct map_info *map = mtd->priv;
1889 	struct cfi_private *cfi = map->fldrv_priv;
1890 	int i;
1891 	struct flchip *chip;
1892 	int ret = 0;
1893 
1894 	for (i=0; !ret && i<cfi->numchips; i++) {
1895 		chip = &cfi->chips[i];
1896 
1897 		spin_lock(chip->mutex);
1898 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
1899 
1900 		if (!ret) {
1901 			chip->oldstate = chip->state;
1902 			chip->state = FL_SYNCING;
1903 			/* No need to wake_up() on this state change -
1904 			 * as the whole point is that nobody can do anything
1905 			 * with the chip now anyway.
1906 			 */
1907 		}
1908 		spin_unlock(chip->mutex);
1909 	}
1910 
1911 	/* Unlock the chips again */
1912 
1913 	for (i--; i >=0; i--) {
1914 		chip = &cfi->chips[i];
1915 
1916 		spin_lock(chip->mutex);
1917 
1918 		if (chip->state == FL_SYNCING) {
1919 			chip->state = chip->oldstate;
1920 			chip->oldstate = FL_READY;
1921 			wake_up(&chip->wq);
1922 		}
1923 		spin_unlock(chip->mutex);
1924 	}
1925 }
1926 
1927 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1928 						struct flchip *chip,
1929 						unsigned long adr,
1930 						int len, void *thunk)
1931 {
1932 	struct cfi_private *cfi = map->fldrv_priv;
1933 	int status, ofs_factor = cfi->interleave * cfi->device_type;
1934 
1935 	adr += chip->start;
1936 	xip_disable(map, chip, adr+(2*ofs_factor));
1937 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
1938 	chip->state = FL_JEDEC_QUERY;
1939 	status = cfi_read_query(map, adr+(2*ofs_factor));
1940 	xip_enable(map, chip, 0);
1941 	return status;
1942 }
1943 
1944 #ifdef DEBUG_LOCK_BITS
1945 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1946 						struct flchip *chip,
1947 						unsigned long adr,
1948 						int len, void *thunk)
1949 {
1950 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1951 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1952 	return 0;
1953 }
1954 #endif
1955 
1956 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
1957 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
1958 
1959 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1960 				       unsigned long adr, int len, void *thunk)
1961 {
1962 	struct cfi_private *cfi = map->fldrv_priv;
1963 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1964 	int udelay;
1965 	int ret;
1966 
1967 	adr += chip->start;
1968 
1969 	spin_lock(chip->mutex);
1970 	ret = get_chip(map, chip, adr, FL_LOCKING);
1971 	if (ret) {
1972 		spin_unlock(chip->mutex);
1973 		return ret;
1974 	}
1975 
1976 	ENABLE_VPP(map);
1977 	xip_disable(map, chip, adr);
1978 
1979 	map_write(map, CMD(0x60), adr);
1980 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1981 		map_write(map, CMD(0x01), adr);
1982 		chip->state = FL_LOCKING;
1983 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1984 		map_write(map, CMD(0xD0), adr);
1985 		chip->state = FL_UNLOCKING;
1986 	} else
1987 		BUG();
1988 
1989 	/*
1990 	 * If Instant Individual Block Locking supported then no need
1991 	 * to delay.
1992 	 */
1993 	udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1994 
1995 	ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1996 	if (ret) {
1997 		map_write(map, CMD(0x70), adr);
1998 		chip->state = FL_STATUS;
1999 		xip_enable(map, chip, adr);
2000 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2001 		goto out;
2002 	}
2003 
2004 	xip_enable(map, chip, adr);
2005 out:	put_chip(map, chip, adr);
2006 	spin_unlock(chip->mutex);
2007 	return ret;
2008 }
2009 
2010 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2011 {
2012 	int ret;
2013 
2014 #ifdef DEBUG_LOCK_BITS
2015 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2016 	       __FUNCTION__, ofs, len);
2017 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2018 		ofs, len, NULL);
2019 #endif
2020 
2021 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2022 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2023 
2024 #ifdef DEBUG_LOCK_BITS
2025 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2026 	       __FUNCTION__, ret);
2027 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2028 		ofs, len, NULL);
2029 #endif
2030 
2031 	return ret;
2032 }
2033 
2034 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2035 {
2036 	int ret;
2037 
2038 #ifdef DEBUG_LOCK_BITS
2039 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2040 	       __FUNCTION__, ofs, len);
2041 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2042 		ofs, len, NULL);
2043 #endif
2044 
2045 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2046 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2047 
2048 #ifdef DEBUG_LOCK_BITS
2049 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2050 	       __FUNCTION__, ret);
2051 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2052 		ofs, len, NULL);
2053 #endif
2054 
2055 	return ret;
2056 }
2057 
2058 #ifdef CONFIG_MTD_OTP
2059 
2060 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2061 			u_long data_offset, u_char *buf, u_int size,
2062 			u_long prot_offset, u_int groupno, u_int groupsize);
2063 
2064 static int __xipram
2065 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2066 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2067 {
2068 	struct cfi_private *cfi = map->fldrv_priv;
2069 	int ret;
2070 
2071 	spin_lock(chip->mutex);
2072 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2073 	if (ret) {
2074 		spin_unlock(chip->mutex);
2075 		return ret;
2076 	}
2077 
2078 	/* let's ensure we're not reading back cached data from array mode */
2079 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2080 
2081 	xip_disable(map, chip, chip->start);
2082 	if (chip->state != FL_JEDEC_QUERY) {
2083 		map_write(map, CMD(0x90), chip->start);
2084 		chip->state = FL_JEDEC_QUERY;
2085 	}
2086 	map_copy_from(map, buf, chip->start + offset, size);
2087 	xip_enable(map, chip, chip->start);
2088 
2089 	/* then ensure we don't keep OTP data in the cache */
2090 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2091 
2092 	put_chip(map, chip, chip->start);
2093 	spin_unlock(chip->mutex);
2094 	return 0;
2095 }
2096 
2097 static int
2098 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2099 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2100 {
2101 	int ret;
2102 
2103 	while (size) {
2104 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2105 		int gap = offset - bus_ofs;
2106 		int n = min_t(int, size, map_bankwidth(map)-gap);
2107 		map_word datum = map_word_ff(map);
2108 
2109 		datum = map_word_load_partial(map, datum, buf, gap, n);
2110 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2111 		if (ret)
2112 			return ret;
2113 
2114 		offset += n;
2115 		buf += n;
2116 		size -= n;
2117 	}
2118 
2119 	return 0;
2120 }
2121 
2122 static int
2123 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2124 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2125 {
2126 	struct cfi_private *cfi = map->fldrv_priv;
2127 	map_word datum;
2128 
2129 	/* make sure area matches group boundaries */
2130 	if (size != grpsz)
2131 		return -EXDEV;
2132 
2133 	datum = map_word_ff(map);
2134 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2135 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2136 }
2137 
2138 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2139 				 size_t *retlen, u_char *buf,
2140 				 otp_op_t action, int user_regs)
2141 {
2142 	struct map_info *map = mtd->priv;
2143 	struct cfi_private *cfi = map->fldrv_priv;
2144 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2145 	struct flchip *chip;
2146 	struct cfi_intelext_otpinfo *otp;
2147 	u_long devsize, reg_prot_offset, data_offset;
2148 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2149 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2150 	int ret;
2151 
2152 	*retlen = 0;
2153 
2154 	/* Check that we actually have some OTP registers */
2155 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2156 		return -ENODATA;
2157 
2158 	/* we need real chips here not virtual ones */
2159 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2160 	chip_step = devsize >> cfi->chipshift;
2161 	chip_num = 0;
2162 
2163 	/* Some chips have OTP located in the _top_ partition only.
2164 	   For example: Intel 28F256L18T (T means top-parameter device) */
2165 	if (cfi->mfr == MANUFACTURER_INTEL) {
2166 		switch (cfi->id) {
2167 		case 0x880b:
2168 		case 0x880c:
2169 		case 0x880d:
2170 			chip_num = chip_step - 1;
2171 		}
2172 	}
2173 
2174 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2175 		chip = &cfi->chips[chip_num];
2176 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2177 
2178 		/* first OTP region */
2179 		field = 0;
2180 		reg_prot_offset = extp->ProtRegAddr;
2181 		reg_fact_groups = 1;
2182 		reg_fact_size = 1 << extp->FactProtRegSize;
2183 		reg_user_groups = 1;
2184 		reg_user_size = 1 << extp->UserProtRegSize;
2185 
2186 		while (len > 0) {
2187 			/* flash geometry fixup */
2188 			data_offset = reg_prot_offset + 1;
2189 			data_offset *= cfi->interleave * cfi->device_type;
2190 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2191 			reg_fact_size *= cfi->interleave;
2192 			reg_user_size *= cfi->interleave;
2193 
2194 			if (user_regs) {
2195 				groups = reg_user_groups;
2196 				groupsize = reg_user_size;
2197 				/* skip over factory reg area */
2198 				groupno = reg_fact_groups;
2199 				data_offset += reg_fact_groups * reg_fact_size;
2200 			} else {
2201 				groups = reg_fact_groups;
2202 				groupsize = reg_fact_size;
2203 				groupno = 0;
2204 			}
2205 
2206 			while (len > 0 && groups > 0) {
2207 				if (!action) {
2208 					/*
2209 					 * Special case: if action is NULL
2210 					 * we fill buf with otp_info records.
2211 					 */
2212 					struct otp_info *otpinfo;
2213 					map_word lockword;
2214 					len -= sizeof(struct otp_info);
2215 					if (len <= 0)
2216 						return -ENOSPC;
2217 					ret = do_otp_read(map, chip,
2218 							  reg_prot_offset,
2219 							  (u_char *)&lockword,
2220 							  map_bankwidth(map),
2221 							  0, 0,  0);
2222 					if (ret)
2223 						return ret;
2224 					otpinfo = (struct otp_info *)buf;
2225 					otpinfo->start = from;
2226 					otpinfo->length = groupsize;
2227 					otpinfo->locked =
2228 					   !map_word_bitsset(map, lockword,
2229 							     CMD(1 << groupno));
2230 					from += groupsize;
2231 					buf += sizeof(*otpinfo);
2232 					*retlen += sizeof(*otpinfo);
2233 				} else if (from >= groupsize) {
2234 					from -= groupsize;
2235 					data_offset += groupsize;
2236 				} else {
2237 					int size = groupsize;
2238 					data_offset += from;
2239 					size -= from;
2240 					from = 0;
2241 					if (size > len)
2242 						size = len;
2243 					ret = action(map, chip, data_offset,
2244 						     buf, size, reg_prot_offset,
2245 						     groupno, groupsize);
2246 					if (ret < 0)
2247 						return ret;
2248 					buf += size;
2249 					len -= size;
2250 					*retlen += size;
2251 					data_offset += size;
2252 				}
2253 				groupno++;
2254 				groups--;
2255 			}
2256 
2257 			/* next OTP region */
2258 			if (++field == extp->NumProtectionFields)
2259 				break;
2260 			reg_prot_offset = otp->ProtRegAddr;
2261 			reg_fact_groups = otp->FactGroups;
2262 			reg_fact_size = 1 << otp->FactProtRegSize;
2263 			reg_user_groups = otp->UserGroups;
2264 			reg_user_size = 1 << otp->UserProtRegSize;
2265 			otp++;
2266 		}
2267 	}
2268 
2269 	return 0;
2270 }
2271 
2272 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2273 					   size_t len, size_t *retlen,
2274 					    u_char *buf)
2275 {
2276 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2277 				     buf, do_otp_read, 0);
2278 }
2279 
2280 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2281 					   size_t len, size_t *retlen,
2282 					    u_char *buf)
2283 {
2284 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2285 				     buf, do_otp_read, 1);
2286 }
2287 
2288 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2289 					    size_t len, size_t *retlen,
2290 					     u_char *buf)
2291 {
2292 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2293 				     buf, do_otp_write, 1);
2294 }
2295 
2296 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2297 					   loff_t from, size_t len)
2298 {
2299 	size_t retlen;
2300 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2301 				     NULL, do_otp_lock, 1);
2302 }
2303 
2304 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2305 					   struct otp_info *buf, size_t len)
2306 {
2307 	size_t retlen;
2308 	int ret;
2309 
2310 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2311 	return ret ? : retlen;
2312 }
2313 
2314 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2315 					   struct otp_info *buf, size_t len)
2316 {
2317 	size_t retlen;
2318 	int ret;
2319 
2320 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2321 	return ret ? : retlen;
2322 }
2323 
2324 #endif
2325 
2326 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2327 {
2328 	struct mtd_erase_region_info *region;
2329 	int block, status, i;
2330 	unsigned long adr;
2331 	size_t len;
2332 
2333 	for (i = 0; i < mtd->numeraseregions; i++) {
2334 		region = &mtd->eraseregions[i];
2335 		if (!region->lockmap)
2336 			continue;
2337 
2338 		for (block = 0; block < region->numblocks; block++){
2339 			len = region->erasesize;
2340 			adr = region->offset + block * len;
2341 
2342 			status = cfi_varsize_frob(mtd,
2343 					do_getlockstatus_oneblock, adr, len, NULL);
2344 			if (status)
2345 				set_bit(block, region->lockmap);
2346 			else
2347 				clear_bit(block, region->lockmap);
2348 		}
2349 	}
2350 }
2351 
2352 static int cfi_intelext_suspend(struct mtd_info *mtd)
2353 {
2354 	struct map_info *map = mtd->priv;
2355 	struct cfi_private *cfi = map->fldrv_priv;
2356 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2357 	int i;
2358 	struct flchip *chip;
2359 	int ret = 0;
2360 
2361 	if ((mtd->flags & MTD_POWERUP_LOCK)
2362 	    && extp && (extp->FeatureSupport & (1 << 5)))
2363 		cfi_intelext_save_locks(mtd);
2364 
2365 	for (i=0; !ret && i<cfi->numchips; i++) {
2366 		chip = &cfi->chips[i];
2367 
2368 		spin_lock(chip->mutex);
2369 
2370 		switch (chip->state) {
2371 		case FL_READY:
2372 		case FL_STATUS:
2373 		case FL_CFI_QUERY:
2374 		case FL_JEDEC_QUERY:
2375 			if (chip->oldstate == FL_READY) {
2376 				/* place the chip in a known state before suspend */
2377 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2378 				chip->oldstate = chip->state;
2379 				chip->state = FL_PM_SUSPENDED;
2380 				/* No need to wake_up() on this state change -
2381 				 * as the whole point is that nobody can do anything
2382 				 * with the chip now anyway.
2383 				 */
2384 			} else {
2385 				/* There seems to be an operation pending. We must wait for it. */
2386 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2387 				ret = -EAGAIN;
2388 			}
2389 			break;
2390 		default:
2391 			/* Should we actually wait? Once upon a time these routines weren't
2392 			   allowed to. Or should we return -EAGAIN, because the upper layers
2393 			   ought to have already shut down anything which was using the device
2394 			   anyway? The latter for now. */
2395 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2396 			ret = -EAGAIN;
2397 		case FL_PM_SUSPENDED:
2398 			break;
2399 		}
2400 		spin_unlock(chip->mutex);
2401 	}
2402 
2403 	/* Unlock the chips again */
2404 
2405 	if (ret) {
2406 		for (i--; i >=0; i--) {
2407 			chip = &cfi->chips[i];
2408 
2409 			spin_lock(chip->mutex);
2410 
2411 			if (chip->state == FL_PM_SUSPENDED) {
2412 				/* No need to force it into a known state here,
2413 				   because we're returning failure, and it didn't
2414 				   get power cycled */
2415 				chip->state = chip->oldstate;
2416 				chip->oldstate = FL_READY;
2417 				wake_up(&chip->wq);
2418 			}
2419 			spin_unlock(chip->mutex);
2420 		}
2421 	}
2422 
2423 	return ret;
2424 }
2425 
2426 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2427 {
2428 	struct mtd_erase_region_info *region;
2429 	int block, i;
2430 	unsigned long adr;
2431 	size_t len;
2432 
2433 	for (i = 0; i < mtd->numeraseregions; i++) {
2434 		region = &mtd->eraseregions[i];
2435 		if (!region->lockmap)
2436 			continue;
2437 
2438 		for (block = 0; block < region->numblocks; block++) {
2439 			len = region->erasesize;
2440 			adr = region->offset + block * len;
2441 
2442 			if (!test_bit(block, region->lockmap))
2443 				cfi_intelext_unlock(mtd, adr, len);
2444 		}
2445 	}
2446 }
2447 
2448 static void cfi_intelext_resume(struct mtd_info *mtd)
2449 {
2450 	struct map_info *map = mtd->priv;
2451 	struct cfi_private *cfi = map->fldrv_priv;
2452 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2453 	int i;
2454 	struct flchip *chip;
2455 
2456 	for (i=0; i<cfi->numchips; i++) {
2457 
2458 		chip = &cfi->chips[i];
2459 
2460 		spin_lock(chip->mutex);
2461 
2462 		/* Go to known state. Chip may have been power cycled */
2463 		if (chip->state == FL_PM_SUSPENDED) {
2464 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2465 			chip->oldstate = chip->state = FL_READY;
2466 			wake_up(&chip->wq);
2467 		}
2468 
2469 		spin_unlock(chip->mutex);
2470 	}
2471 
2472 	if ((mtd->flags & MTD_POWERUP_LOCK)
2473 	    && extp && (extp->FeatureSupport & (1 << 5)))
2474 		cfi_intelext_restore_locks(mtd);
2475 }
2476 
2477 static int cfi_intelext_reset(struct mtd_info *mtd)
2478 {
2479 	struct map_info *map = mtd->priv;
2480 	struct cfi_private *cfi = map->fldrv_priv;
2481 	int i, ret;
2482 
2483 	for (i=0; i < cfi->numchips; i++) {
2484 		struct flchip *chip = &cfi->chips[i];
2485 
2486 		/* force the completion of any ongoing operation
2487 		   and switch to array mode so any bootloader in
2488 		   flash is accessible for soft reboot. */
2489 		spin_lock(chip->mutex);
2490 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2491 		if (!ret) {
2492 			map_write(map, CMD(0xff), chip->start);
2493 			chip->state = FL_SHUTDOWN;
2494 		}
2495 		spin_unlock(chip->mutex);
2496 	}
2497 
2498 	return 0;
2499 }
2500 
2501 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2502 			       void *v)
2503 {
2504 	struct mtd_info *mtd;
2505 
2506 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2507 	cfi_intelext_reset(mtd);
2508 	return NOTIFY_DONE;
2509 }
2510 
2511 static void cfi_intelext_destroy(struct mtd_info *mtd)
2512 {
2513 	struct map_info *map = mtd->priv;
2514 	struct cfi_private *cfi = map->fldrv_priv;
2515 	struct mtd_erase_region_info *region;
2516 	int i;
2517 	cfi_intelext_reset(mtd);
2518 	unregister_reboot_notifier(&mtd->reboot_notifier);
2519 	kfree(cfi->cmdset_priv);
2520 	kfree(cfi->cfiq);
2521 	kfree(cfi->chips[0].priv);
2522 	kfree(cfi);
2523 	for (i = 0; i < mtd->numeraseregions; i++) {
2524 		region = &mtd->eraseregions[i];
2525 		if (region->lockmap)
2526 			kfree(region->lockmap);
2527 	}
2528 	kfree(mtd->eraseregions);
2529 }
2530 
2531 MODULE_LICENSE("GPL");
2532 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2533 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2534 MODULE_ALIAS("cfi_cmdset_0003");
2535 MODULE_ALIAS("cfi_cmdset_0200");
2536