1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
9  * 	- completely revamped method functions so they are aware and
10  * 	  independent of the flash geometry (buswidth, interleave, etc.)
11  * 	- scalability vs code size is completely set at compile-time
12  * 	  (see include/linux/mtd/cfi.h for selection)
13  *	- optimized write buffer method
14  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *	- reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  * 	- auto unlock sectors on resume for auto locking flash on power up
18  */
19 
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
39 
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42 
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
45 
46 /* Intel chips */
47 #define I82802AB	0x00ad
48 #define I82802AC	0x00ac
49 #define PF38F4476	0x881c
50 /* STMicroelectronics chips */
51 #define M50LPW080       0x002F
52 #define M50FLW080A	0x0080
53 #define M50FLW080B	0x0081
54 /* Atmel chips */
55 #define AT49BV640D	0x02de
56 #define AT49BV640DT	0x02db
57 
58 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
60 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
61 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
62 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
63 static void cfi_intelext_sync (struct mtd_info *);
64 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
65 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
66 #ifdef CONFIG_MTD_OTP
67 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
68 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
71 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
72 					    struct otp_info *, size_t);
73 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
74 					    struct otp_info *, size_t);
75 #endif
76 static int cfi_intelext_suspend (struct mtd_info *);
77 static void cfi_intelext_resume (struct mtd_info *);
78 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
79 
80 static void cfi_intelext_destroy(struct mtd_info *);
81 
82 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
83 
84 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
85 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
86 
87 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
88 		     size_t *retlen, void **virt, resource_size_t *phys);
89 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
90 
91 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
92 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
94 #include "fwh_lock.h"
95 
96 
97 
98 /*
99  *  *********** SETUP AND PROBE BITS  ***********
100  */
101 
102 static struct mtd_chip_driver cfi_intelext_chipdrv = {
103 	.probe		= NULL, /* Not usable directly */
104 	.destroy	= cfi_intelext_destroy,
105 	.name		= "cfi_cmdset_0001",
106 	.module		= THIS_MODULE
107 };
108 
109 /* #define DEBUG_LOCK_BITS */
110 /* #define DEBUG_CFI_FEATURES */
111 
112 #ifdef DEBUG_CFI_FEATURES
113 static void cfi_tell_features(struct cfi_pri_intelext *extp)
114 {
115 	int i;
116 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
117 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
118 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
119 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
120 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
121 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
122 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
123 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
124 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
125 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
126 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
127 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
128 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
129 	for (i=11; i<32; i++) {
130 		if (extp->FeatureSupport & (1<<i))
131 			printk("     - Unknown Bit %X:      supported\n", i);
132 	}
133 
134 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
135 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
136 	for (i=1; i<8; i++) {
137 		if (extp->SuspendCmdSupport & (1<<i))
138 			printk("     - Unknown Bit %X:               supported\n", i);
139 	}
140 
141 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
142 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
143 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
144 	for (i=2; i<3; i++) {
145 		if (extp->BlkStatusRegMask & (1<<i))
146 			printk("     - Unknown Bit %X Active: yes\n",i);
147 	}
148 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
149 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
150 	for (i=6; i<16; i++) {
151 		if (extp->BlkStatusRegMask & (1<<i))
152 			printk("     - Unknown Bit %X Active: yes\n",i);
153 	}
154 
155 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
156 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
157 	if (extp->VppOptimal)
158 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
159 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
160 }
161 #endif
162 
163 /* Atmel chips don't use the same PRI format as Intel chips */
164 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
165 {
166 	struct map_info *map = mtd->priv;
167 	struct cfi_private *cfi = map->fldrv_priv;
168 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
169 	struct cfi_pri_atmel atmel_pri;
170 	uint32_t features = 0;
171 
172 	/* Reverse byteswapping */
173 	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
174 	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
175 	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
176 
177 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
179 
180 	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
181 
182 	if (atmel_pri.Features & 0x01) /* chip erase supported */
183 		features |= (1<<0);
184 	if (atmel_pri.Features & 0x02) /* erase suspend supported */
185 		features |= (1<<1);
186 	if (atmel_pri.Features & 0x04) /* program suspend supported */
187 		features |= (1<<2);
188 	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
189 		features |= (1<<9);
190 	if (atmel_pri.Features & 0x20) /* page mode read supported */
191 		features |= (1<<7);
192 	if (atmel_pri.Features & 0x40) /* queued erase supported */
193 		features |= (1<<4);
194 	if (atmel_pri.Features & 0x80) /* Protection bits supported */
195 		features |= (1<<6);
196 
197 	extp->FeatureSupport = features;
198 
199 	/* burst write mode not supported */
200 	cfi->cfiq->BufWriteTimeoutTyp = 0;
201 	cfi->cfiq->BufWriteTimeoutMax = 0;
202 }
203 
204 static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
205 {
206 	struct map_info *map = mtd->priv;
207 	struct cfi_private *cfi = map->fldrv_priv;
208 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
209 
210 	cfip->FeatureSupport |= (1 << 5);
211 	mtd->flags |= MTD_POWERUP_LOCK;
212 }
213 
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
216 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
217 {
218 	struct map_info *map = mtd->priv;
219 	struct cfi_private *cfi = map->fldrv_priv;
220 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
221 
222 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
223 	                    "erase on write disabled.\n");
224 	extp->SuspendCmdSupport &= ~1;
225 }
226 #endif
227 
228 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
229 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
230 {
231 	struct map_info *map = mtd->priv;
232 	struct cfi_private *cfi = map->fldrv_priv;
233 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
234 
235 	if (cfip && (cfip->FeatureSupport&4)) {
236 		cfip->FeatureSupport &= ~4;
237 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
238 	}
239 }
240 #endif
241 
242 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
243 {
244 	struct map_info *map = mtd->priv;
245 	struct cfi_private *cfi = map->fldrv_priv;
246 
247 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
248 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
249 }
250 
251 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
252 {
253 	struct map_info *map = mtd->priv;
254 	struct cfi_private *cfi = map->fldrv_priv;
255 
256 	/* Note this is done after the region info is endian swapped */
257 	cfi->cfiq->EraseRegionInfo[1] =
258 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
259 };
260 
261 static void fixup_use_point(struct mtd_info *mtd, void *param)
262 {
263 	struct map_info *map = mtd->priv;
264 	if (!mtd->point && map_is_linear(map)) {
265 		mtd->point   = cfi_intelext_point;
266 		mtd->unpoint = cfi_intelext_unpoint;
267 	}
268 }
269 
270 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
271 {
272 	struct map_info *map = mtd->priv;
273 	struct cfi_private *cfi = map->fldrv_priv;
274 	if (cfi->cfiq->BufWriteTimeoutTyp) {
275 		printk(KERN_INFO "Using buffer write method\n" );
276 		mtd->write = cfi_intelext_write_buffers;
277 		mtd->writev = cfi_intelext_writev;
278 	}
279 }
280 
281 /*
282  * Some chips power-up with all sectors locked by default.
283  */
284 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
285 {
286 	struct map_info *map = mtd->priv;
287 	struct cfi_private *cfi = map->fldrv_priv;
288 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
289 
290 	if (cfip->FeatureSupport&32) {
291 		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
292 		mtd->flags |= MTD_POWERUP_LOCK;
293 	}
294 }
295 
296 static struct cfi_fixup cfi_fixup_table[] = {
297 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
298 	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
299 	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
300 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
301 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
302 #endif
303 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
304 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
305 #endif
306 #if !FORCE_WORD_WRITE
307 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
308 #endif
309 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
310 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
311 	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
312 	{ 0, 0, NULL, NULL }
313 };
314 
315 static struct cfi_fixup jedec_fixup_table[] = {
316 	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
317 	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
318 	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
319 	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
320 	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
321 	{ 0, 0, NULL, NULL }
322 };
323 static struct cfi_fixup fixup_table[] = {
324 	/* The CFI vendor ids and the JEDEC vendor IDs appear
325 	 * to be common.  It is like the devices id's are as
326 	 * well.  This table is to pick all cases where
327 	 * we know that is the case.
328 	 */
329 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
330 	{ 0, 0, NULL, NULL }
331 };
332 
333 static void cfi_fixup_major_minor(struct cfi_private *cfi,
334 						struct cfi_pri_intelext *extp)
335 {
336 	if (cfi->mfr == CFI_MFR_INTEL &&
337 			cfi->id == PF38F4476 && extp->MinorVersion == '3')
338 		extp->MinorVersion = '1';
339 }
340 
341 static inline struct cfi_pri_intelext *
342 read_pri_intelext(struct map_info *map, __u16 adr)
343 {
344 	struct cfi_private *cfi = map->fldrv_priv;
345 	struct cfi_pri_intelext *extp;
346 	unsigned int extra_size = 0;
347 	unsigned int extp_size = sizeof(*extp);
348 
349  again:
350 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
351 	if (!extp)
352 		return NULL;
353 
354 	cfi_fixup_major_minor(cfi, extp);
355 
356 	if (extp->MajorVersion != '1' ||
357 	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
358 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
359 		       "version %c.%c.\n",  extp->MajorVersion,
360 		       extp->MinorVersion);
361 		kfree(extp);
362 		return NULL;
363 	}
364 
365 	/* Do some byteswapping if necessary */
366 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
367 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
368 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
369 
370 	if (extp->MinorVersion >= '0') {
371 		extra_size = 0;
372 
373 		/* Protection Register info */
374 		extra_size += (extp->NumProtectionFields - 1) *
375 			      sizeof(struct cfi_intelext_otpinfo);
376 	}
377 
378 	if (extp->MinorVersion >= '1') {
379 		/* Burst Read info */
380 		extra_size += 2;
381 		if (extp_size < sizeof(*extp) + extra_size)
382 			goto need_more;
383 		extra_size += extp->extra[extra_size - 1];
384 	}
385 
386 	if (extp->MinorVersion >= '3') {
387 		int nb_parts, i;
388 
389 		/* Number of hardware-partitions */
390 		extra_size += 1;
391 		if (extp_size < sizeof(*extp) + extra_size)
392 			goto need_more;
393 		nb_parts = extp->extra[extra_size - 1];
394 
395 		/* skip the sizeof(partregion) field in CFI 1.4 */
396 		if (extp->MinorVersion >= '4')
397 			extra_size += 2;
398 
399 		for (i = 0; i < nb_parts; i++) {
400 			struct cfi_intelext_regioninfo *rinfo;
401 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
402 			extra_size += sizeof(*rinfo);
403 			if (extp_size < sizeof(*extp) + extra_size)
404 				goto need_more;
405 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
406 			extra_size += (rinfo->NumBlockTypes - 1)
407 				      * sizeof(struct cfi_intelext_blockinfo);
408 		}
409 
410 		if (extp->MinorVersion >= '4')
411 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
412 
413 		if (extp_size < sizeof(*extp) + extra_size) {
414 			need_more:
415 			extp_size = sizeof(*extp) + extra_size;
416 			kfree(extp);
417 			if (extp_size > 4096) {
418 				printk(KERN_ERR
419 					"%s: cfi_pri_intelext is too fat\n",
420 					__func__);
421 				return NULL;
422 			}
423 			goto again;
424 		}
425 	}
426 
427 	return extp;
428 }
429 
430 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
431 {
432 	struct cfi_private *cfi = map->fldrv_priv;
433 	struct mtd_info *mtd;
434 	int i;
435 
436 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
437 	if (!mtd) {
438 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
439 		return NULL;
440 	}
441 	mtd->priv = map;
442 	mtd->type = MTD_NORFLASH;
443 
444 	/* Fill in the default mtd operations */
445 	mtd->erase   = cfi_intelext_erase_varsize;
446 	mtd->read    = cfi_intelext_read;
447 	mtd->write   = cfi_intelext_write_words;
448 	mtd->sync    = cfi_intelext_sync;
449 	mtd->lock    = cfi_intelext_lock;
450 	mtd->unlock  = cfi_intelext_unlock;
451 	mtd->suspend = cfi_intelext_suspend;
452 	mtd->resume  = cfi_intelext_resume;
453 	mtd->flags   = MTD_CAP_NORFLASH;
454 	mtd->name    = map->name;
455 	mtd->writesize = 1;
456 
457 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
458 
459 	if (cfi->cfi_mode == CFI_MODE_CFI) {
460 		/*
461 		 * It's a real CFI chip, not one for which the probe
462 		 * routine faked a CFI structure. So we read the feature
463 		 * table from it.
464 		 */
465 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
466 		struct cfi_pri_intelext *extp;
467 
468 		extp = read_pri_intelext(map, adr);
469 		if (!extp) {
470 			kfree(mtd);
471 			return NULL;
472 		}
473 
474 		/* Install our own private info structure */
475 		cfi->cmdset_priv = extp;
476 
477 		cfi_fixup(mtd, cfi_fixup_table);
478 
479 #ifdef DEBUG_CFI_FEATURES
480 		/* Tell the user about it in lots of lovely detail */
481 		cfi_tell_features(extp);
482 #endif
483 
484 		if(extp->SuspendCmdSupport & 1) {
485 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
486 		}
487 	}
488 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
489 		/* Apply jedec specific fixups */
490 		cfi_fixup(mtd, jedec_fixup_table);
491 	}
492 	/* Apply generic fixups */
493 	cfi_fixup(mtd, fixup_table);
494 
495 	for (i=0; i< cfi->numchips; i++) {
496 		if (cfi->cfiq->WordWriteTimeoutTyp)
497 			cfi->chips[i].word_write_time =
498 				1<<cfi->cfiq->WordWriteTimeoutTyp;
499 		else
500 			cfi->chips[i].word_write_time = 50000;
501 
502 		if (cfi->cfiq->BufWriteTimeoutTyp)
503 			cfi->chips[i].buffer_write_time =
504 				1<<cfi->cfiq->BufWriteTimeoutTyp;
505 		/* No default; if it isn't specified, we won't use it */
506 
507 		if (cfi->cfiq->BlockEraseTimeoutTyp)
508 			cfi->chips[i].erase_time =
509 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
510 		else
511 			cfi->chips[i].erase_time = 2000000;
512 
513 		if (cfi->cfiq->WordWriteTimeoutTyp &&
514 		    cfi->cfiq->WordWriteTimeoutMax)
515 			cfi->chips[i].word_write_time_max =
516 				1<<(cfi->cfiq->WordWriteTimeoutTyp +
517 				    cfi->cfiq->WordWriteTimeoutMax);
518 		else
519 			cfi->chips[i].word_write_time_max = 50000 * 8;
520 
521 		if (cfi->cfiq->BufWriteTimeoutTyp &&
522 		    cfi->cfiq->BufWriteTimeoutMax)
523 			cfi->chips[i].buffer_write_time_max =
524 				1<<(cfi->cfiq->BufWriteTimeoutTyp +
525 				    cfi->cfiq->BufWriteTimeoutMax);
526 
527 		if (cfi->cfiq->BlockEraseTimeoutTyp &&
528 		    cfi->cfiq->BlockEraseTimeoutMax)
529 			cfi->chips[i].erase_time_max =
530 				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
531 				       cfi->cfiq->BlockEraseTimeoutMax);
532 		else
533 			cfi->chips[i].erase_time_max = 2000000 * 8;
534 
535 		cfi->chips[i].ref_point_counter = 0;
536 		init_waitqueue_head(&(cfi->chips[i].wq));
537 	}
538 
539 	map->fldrv = &cfi_intelext_chipdrv;
540 
541 	return cfi_intelext_setup(mtd);
542 }
543 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
544 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
545 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
546 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
547 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
548 
549 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
550 {
551 	struct map_info *map = mtd->priv;
552 	struct cfi_private *cfi = map->fldrv_priv;
553 	unsigned long offset = 0;
554 	int i,j;
555 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
556 
557 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
558 
559 	mtd->size = devsize * cfi->numchips;
560 
561 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
562 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
563 			* mtd->numeraseregions, GFP_KERNEL);
564 	if (!mtd->eraseregions) {
565 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
566 		goto setup_err;
567 	}
568 
569 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
570 		unsigned long ernum, ersize;
571 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
572 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
573 
574 		if (mtd->erasesize < ersize) {
575 			mtd->erasesize = ersize;
576 		}
577 		for (j=0; j<cfi->numchips; j++) {
578 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
579 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
580 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
581 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
582 		}
583 		offset += (ersize * ernum);
584 	}
585 
586 	if (offset != devsize) {
587 		/* Argh */
588 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
589 		goto setup_err;
590 	}
591 
592 	for (i=0; i<mtd->numeraseregions;i++){
593 		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
594 		       i,(unsigned long long)mtd->eraseregions[i].offset,
595 		       mtd->eraseregions[i].erasesize,
596 		       mtd->eraseregions[i].numblocks);
597 	}
598 
599 #ifdef CONFIG_MTD_OTP
600 	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
601 	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
602 	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
603 	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
604 	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
605 	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
606 #endif
607 
608 	/* This function has the potential to distort the reality
609 	   a bit and therefore should be called last. */
610 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
611 		goto setup_err;
612 
613 	__module_get(THIS_MODULE);
614 	register_reboot_notifier(&mtd->reboot_notifier);
615 	return mtd;
616 
617  setup_err:
618 	if(mtd) {
619 		kfree(mtd->eraseregions);
620 		kfree(mtd);
621 	}
622 	kfree(cfi->cmdset_priv);
623 	return NULL;
624 }
625 
626 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
627 					struct cfi_private **pcfi)
628 {
629 	struct map_info *map = mtd->priv;
630 	struct cfi_private *cfi = *pcfi;
631 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
632 
633 	/*
634 	 * Probing of multi-partition flash chips.
635 	 *
636 	 * To support multiple partitions when available, we simply arrange
637 	 * for each of them to have their own flchip structure even if they
638 	 * are on the same physical chip.  This means completely recreating
639 	 * a new cfi_private structure right here which is a blatent code
640 	 * layering violation, but this is still the least intrusive
641 	 * arrangement at this point. This can be rearranged in the future
642 	 * if someone feels motivated enough.  --nico
643 	 */
644 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
645 	    && extp->FeatureSupport & (1 << 9)) {
646 		struct cfi_private *newcfi;
647 		struct flchip *chip;
648 		struct flchip_shared *shared;
649 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
650 
651 		/* Protection Register info */
652 		offs = (extp->NumProtectionFields - 1) *
653 		       sizeof(struct cfi_intelext_otpinfo);
654 
655 		/* Burst Read info */
656 		offs += extp->extra[offs+1]+2;
657 
658 		/* Number of partition regions */
659 		numregions = extp->extra[offs];
660 		offs += 1;
661 
662 		/* skip the sizeof(partregion) field in CFI 1.4 */
663 		if (extp->MinorVersion >= '4')
664 			offs += 2;
665 
666 		/* Number of hardware partitions */
667 		numparts = 0;
668 		for (i = 0; i < numregions; i++) {
669 			struct cfi_intelext_regioninfo *rinfo;
670 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
671 			numparts += rinfo->NumIdentPartitions;
672 			offs += sizeof(*rinfo)
673 				+ (rinfo->NumBlockTypes - 1) *
674 				  sizeof(struct cfi_intelext_blockinfo);
675 		}
676 
677 		if (!numparts)
678 			numparts = 1;
679 
680 		/* Programming Region info */
681 		if (extp->MinorVersion >= '4') {
682 			struct cfi_intelext_programming_regioninfo *prinfo;
683 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
684 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
685 			mtd->flags &= ~MTD_BIT_WRITEABLE;
686 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
687 			       map->name, mtd->writesize,
688 			       cfi->interleave * prinfo->ControlValid,
689 			       cfi->interleave * prinfo->ControlInvalid);
690 		}
691 
692 		/*
693 		 * All functions below currently rely on all chips having
694 		 * the same geometry so we'll just assume that all hardware
695 		 * partitions are of the same size too.
696 		 */
697 		partshift = cfi->chipshift - __ffs(numparts);
698 
699 		if ((1 << partshift) < mtd->erasesize) {
700 			printk( KERN_ERR
701 				"%s: bad number of hw partitions (%d)\n",
702 				__func__, numparts);
703 			return -EINVAL;
704 		}
705 
706 		numvirtchips = cfi->numchips * numparts;
707 		newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
708 		if (!newcfi)
709 			return -ENOMEM;
710 		shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
711 		if (!shared) {
712 			kfree(newcfi);
713 			return -ENOMEM;
714 		}
715 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
716 		newcfi->numchips = numvirtchips;
717 		newcfi->chipshift = partshift;
718 
719 		chip = &newcfi->chips[0];
720 		for (i = 0; i < cfi->numchips; i++) {
721 			shared[i].writing = shared[i].erasing = NULL;
722 			spin_lock_init(&shared[i].lock);
723 			for (j = 0; j < numparts; j++) {
724 				*chip = cfi->chips[i];
725 				chip->start += j << partshift;
726 				chip->priv = &shared[i];
727 				/* those should be reset too since
728 				   they create memory references. */
729 				init_waitqueue_head(&chip->wq);
730 				spin_lock_init(&chip->_spinlock);
731 				chip->mutex = &chip->_spinlock;
732 				chip++;
733 			}
734 		}
735 
736 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
737 				  "--> %d partitions of %d KiB\n",
738 				  map->name, cfi->numchips, cfi->interleave,
739 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
740 
741 		map->fldrv_priv = newcfi;
742 		*pcfi = newcfi;
743 		kfree(cfi);
744 	}
745 
746 	return 0;
747 }
748 
749 /*
750  *  *********** CHIP ACCESS FUNCTIONS ***********
751  */
752 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753 {
754 	DECLARE_WAITQUEUE(wait, current);
755 	struct cfi_private *cfi = map->fldrv_priv;
756 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
757 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
758 	unsigned long timeo = jiffies + HZ;
759 
760 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
761 	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
762 		goto sleep;
763 
764 	switch (chip->state) {
765 
766 	case FL_STATUS:
767 		for (;;) {
768 			status = map_read(map, adr);
769 			if (map_word_andequal(map, status, status_OK, status_OK))
770 				break;
771 
772 			/* At this point we're fine with write operations
773 			   in other partitions as they don't conflict. */
774 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
775 				break;
776 
777 			spin_unlock(chip->mutex);
778 			cfi_udelay(1);
779 			spin_lock(chip->mutex);
780 			/* Someone else might have been playing with it. */
781 			return -EAGAIN;
782 		}
783 		/* Fall through */
784 	case FL_READY:
785 	case FL_CFI_QUERY:
786 	case FL_JEDEC_QUERY:
787 		return 0;
788 
789 	case FL_ERASING:
790 		if (!cfip ||
791 		    !(cfip->FeatureSupport & 2) ||
792 		    !(mode == FL_READY || mode == FL_POINT ||
793 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
794 			goto sleep;
795 
796 
797 		/* Erase suspend */
798 		map_write(map, CMD(0xB0), adr);
799 
800 		/* If the flash has finished erasing, then 'erase suspend'
801 		 * appears to make some (28F320) flash devices switch to
802 		 * 'read' mode.  Make sure that we switch to 'read status'
803 		 * mode so we get the right data. --rmk
804 		 */
805 		map_write(map, CMD(0x70), adr);
806 		chip->oldstate = FL_ERASING;
807 		chip->state = FL_ERASE_SUSPENDING;
808 		chip->erase_suspended = 1;
809 		for (;;) {
810 			status = map_read(map, adr);
811 			if (map_word_andequal(map, status, status_OK, status_OK))
812 			        break;
813 
814 			if (time_after(jiffies, timeo)) {
815 				/* Urgh. Resume and pretend we weren't here.  */
816 				map_write(map, CMD(0xd0), adr);
817 				/* Make sure we're in 'read status' mode if it had finished */
818 				map_write(map, CMD(0x70), adr);
819 				chip->state = FL_ERASING;
820 				chip->oldstate = FL_READY;
821 				printk(KERN_ERR "%s: Chip not ready after erase "
822 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
823 				return -EIO;
824 			}
825 
826 			spin_unlock(chip->mutex);
827 			cfi_udelay(1);
828 			spin_lock(chip->mutex);
829 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
830 			   So we can just loop here. */
831 		}
832 		chip->state = FL_STATUS;
833 		return 0;
834 
835 	case FL_XIP_WHILE_ERASING:
836 		if (mode != FL_READY && mode != FL_POINT &&
837 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
838 			goto sleep;
839 		chip->oldstate = chip->state;
840 		chip->state = FL_READY;
841 		return 0;
842 
843 	case FL_SHUTDOWN:
844 		/* The machine is rebooting now,so no one can get chip anymore */
845 		return -EIO;
846 	case FL_POINT:
847 		/* Only if there's no operation suspended... */
848 		if (mode == FL_READY && chip->oldstate == FL_READY)
849 			return 0;
850 		/* Fall through */
851 	default:
852 	sleep:
853 		set_current_state(TASK_UNINTERRUPTIBLE);
854 		add_wait_queue(&chip->wq, &wait);
855 		spin_unlock(chip->mutex);
856 		schedule();
857 		remove_wait_queue(&chip->wq, &wait);
858 		spin_lock(chip->mutex);
859 		return -EAGAIN;
860 	}
861 }
862 
863 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
864 {
865 	int ret;
866 	DECLARE_WAITQUEUE(wait, current);
867 
868  retry:
869 	if (chip->priv &&
870 	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
871 	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
872 		/*
873 		 * OK. We have possibility for contention on the write/erase
874 		 * operations which are global to the real chip and not per
875 		 * partition.  So let's fight it over in the partition which
876 		 * currently has authority on the operation.
877 		 *
878 		 * The rules are as follows:
879 		 *
880 		 * - any write operation must own shared->writing.
881 		 *
882 		 * - any erase operation must own _both_ shared->writing and
883 		 *   shared->erasing.
884 		 *
885 		 * - contention arbitration is handled in the owner's context.
886 		 *
887 		 * The 'shared' struct can be read and/or written only when
888 		 * its lock is taken.
889 		 */
890 		struct flchip_shared *shared = chip->priv;
891 		struct flchip *contender;
892 		spin_lock(&shared->lock);
893 		contender = shared->writing;
894 		if (contender && contender != chip) {
895 			/*
896 			 * The engine to perform desired operation on this
897 			 * partition is already in use by someone else.
898 			 * Let's fight over it in the context of the chip
899 			 * currently using it.  If it is possible to suspend,
900 			 * that other partition will do just that, otherwise
901 			 * it'll happily send us to sleep.  In any case, when
902 			 * get_chip returns success we're clear to go ahead.
903 			 */
904 			ret = spin_trylock(contender->mutex);
905 			spin_unlock(&shared->lock);
906 			if (!ret)
907 				goto retry;
908 			spin_unlock(chip->mutex);
909 			ret = chip_ready(map, contender, contender->start, mode);
910 			spin_lock(chip->mutex);
911 
912 			if (ret == -EAGAIN) {
913 				spin_unlock(contender->mutex);
914 				goto retry;
915 			}
916 			if (ret) {
917 				spin_unlock(contender->mutex);
918 				return ret;
919 			}
920 			spin_lock(&shared->lock);
921 
922 			/* We should not own chip if it is already
923 			 * in FL_SYNCING state. Put contender and retry. */
924 			if (chip->state == FL_SYNCING) {
925 				put_chip(map, contender, contender->start);
926 				spin_unlock(contender->mutex);
927 				goto retry;
928 			}
929 			spin_unlock(contender->mutex);
930 		}
931 
932 		/* Check if we already have suspended erase
933 		 * on this chip. Sleep. */
934 		if (mode == FL_ERASING && shared->erasing
935 		    && shared->erasing->oldstate == FL_ERASING) {
936 			spin_unlock(&shared->lock);
937 			set_current_state(TASK_UNINTERRUPTIBLE);
938 			add_wait_queue(&chip->wq, &wait);
939 			spin_unlock(chip->mutex);
940 			schedule();
941 			remove_wait_queue(&chip->wq, &wait);
942 			spin_lock(chip->mutex);
943 			goto retry;
944 		}
945 
946 		/* We now own it */
947 		shared->writing = chip;
948 		if (mode == FL_ERASING)
949 			shared->erasing = chip;
950 		spin_unlock(&shared->lock);
951 	}
952 	ret = chip_ready(map, chip, adr, mode);
953 	if (ret == -EAGAIN)
954 		goto retry;
955 
956 	return ret;
957 }
958 
959 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
960 {
961 	struct cfi_private *cfi = map->fldrv_priv;
962 
963 	if (chip->priv) {
964 		struct flchip_shared *shared = chip->priv;
965 		spin_lock(&shared->lock);
966 		if (shared->writing == chip && chip->oldstate == FL_READY) {
967 			/* We own the ability to write, but we're done */
968 			shared->writing = shared->erasing;
969 			if (shared->writing && shared->writing != chip) {
970 				/* give back ownership to who we loaned it from */
971 				struct flchip *loaner = shared->writing;
972 				spin_lock(loaner->mutex);
973 				spin_unlock(&shared->lock);
974 				spin_unlock(chip->mutex);
975 				put_chip(map, loaner, loaner->start);
976 				spin_lock(chip->mutex);
977 				spin_unlock(loaner->mutex);
978 				wake_up(&chip->wq);
979 				return;
980 			}
981 			shared->erasing = NULL;
982 			shared->writing = NULL;
983 		} else if (shared->erasing == chip && shared->writing != chip) {
984 			/*
985 			 * We own the ability to erase without the ability
986 			 * to write, which means the erase was suspended
987 			 * and some other partition is currently writing.
988 			 * Don't let the switch below mess things up since
989 			 * we don't have ownership to resume anything.
990 			 */
991 			spin_unlock(&shared->lock);
992 			wake_up(&chip->wq);
993 			return;
994 		}
995 		spin_unlock(&shared->lock);
996 	}
997 
998 	switch(chip->oldstate) {
999 	case FL_ERASING:
1000 		chip->state = chip->oldstate;
1001 		/* What if one interleaved chip has finished and the
1002 		   other hasn't? The old code would leave the finished
1003 		   one in READY mode. That's bad, and caused -EROFS
1004 		   errors to be returned from do_erase_oneblock because
1005 		   that's the only bit it checked for at the time.
1006 		   As the state machine appears to explicitly allow
1007 		   sending the 0x70 (Read Status) command to an erasing
1008 		   chip and expecting it to be ignored, that's what we
1009 		   do. */
1010 		map_write(map, CMD(0xd0), adr);
1011 		map_write(map, CMD(0x70), adr);
1012 		chip->oldstate = FL_READY;
1013 		chip->state = FL_ERASING;
1014 		break;
1015 
1016 	case FL_XIP_WHILE_ERASING:
1017 		chip->state = chip->oldstate;
1018 		chip->oldstate = FL_READY;
1019 		break;
1020 
1021 	case FL_READY:
1022 	case FL_STATUS:
1023 	case FL_JEDEC_QUERY:
1024 		/* We should really make set_vpp() count, rather than doing this */
1025 		DISABLE_VPP(map);
1026 		break;
1027 	default:
1028 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1029 	}
1030 	wake_up(&chip->wq);
1031 }
1032 
1033 #ifdef CONFIG_MTD_XIP
1034 
1035 /*
1036  * No interrupt what so ever can be serviced while the flash isn't in array
1037  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1038  * enclosing any code path where the flash is known not to be in array mode.
1039  * And within a XIP disabled code path, only functions marked with __xipram
1040  * may be called and nothing else (it's a good thing to inspect generated
1041  * assembly to make sure inline functions were actually inlined and that gcc
1042  * didn't emit calls to its own support functions). Also configuring MTD CFI
1043  * support to a single buswidth and a single interleave is also recommended.
1044  */
1045 
1046 static void xip_disable(struct map_info *map, struct flchip *chip,
1047 			unsigned long adr)
1048 {
1049 	/* TODO: chips with no XIP use should ignore and return */
1050 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1051 	local_irq_disable();
1052 }
1053 
1054 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1055 				unsigned long adr)
1056 {
1057 	struct cfi_private *cfi = map->fldrv_priv;
1058 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1059 		map_write(map, CMD(0xff), adr);
1060 		chip->state = FL_READY;
1061 	}
1062 	(void) map_read(map, adr);
1063 	xip_iprefetch();
1064 	local_irq_enable();
1065 }
1066 
1067 /*
1068  * When a delay is required for the flash operation to complete, the
1069  * xip_wait_for_operation() function is polling for both the given timeout
1070  * and pending (but still masked) hardware interrupts.  Whenever there is an
1071  * interrupt pending then the flash erase or write operation is suspended,
1072  * array mode restored and interrupts unmasked.  Task scheduling might also
1073  * happen at that point.  The CPU eventually returns from the interrupt or
1074  * the call to schedule() and the suspended flash operation is resumed for
1075  * the remaining of the delay period.
1076  *
1077  * Warning: this function _will_ fool interrupt latency tracing tools.
1078  */
1079 
1080 static int __xipram xip_wait_for_operation(
1081 		struct map_info *map, struct flchip *chip,
1082 		unsigned long adr, unsigned int chip_op_time_max)
1083 {
1084 	struct cfi_private *cfi = map->fldrv_priv;
1085 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1086 	map_word status, OK = CMD(0x80);
1087 	unsigned long usec, suspended, start, done;
1088 	flstate_t oldstate, newstate;
1089 
1090        	start = xip_currtime();
1091 	usec = chip_op_time_max;
1092 	if (usec == 0)
1093 		usec = 500000;
1094 	done = 0;
1095 
1096 	do {
1097 		cpu_relax();
1098 		if (xip_irqpending() && cfip &&
1099 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1100 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1101 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1102 			/*
1103 			 * Let's suspend the erase or write operation when
1104 			 * supported.  Note that we currently don't try to
1105 			 * suspend interleaved chips if there is already
1106 			 * another operation suspended (imagine what happens
1107 			 * when one chip was already done with the current
1108 			 * operation while another chip suspended it, then
1109 			 * we resume the whole thing at once).  Yes, it
1110 			 * can happen!
1111 			 */
1112 			usec -= done;
1113 			map_write(map, CMD(0xb0), adr);
1114 			map_write(map, CMD(0x70), adr);
1115 			suspended = xip_currtime();
1116 			do {
1117 				if (xip_elapsed_since(suspended) > 100000) {
1118 					/*
1119 					 * The chip doesn't want to suspend
1120 					 * after waiting for 100 msecs.
1121 					 * This is a critical error but there
1122 					 * is not much we can do here.
1123 					 */
1124 					return -EIO;
1125 				}
1126 				status = map_read(map, adr);
1127 			} while (!map_word_andequal(map, status, OK, OK));
1128 
1129 			/* Suspend succeeded */
1130 			oldstate = chip->state;
1131 			if (oldstate == FL_ERASING) {
1132 				if (!map_word_bitsset(map, status, CMD(0x40)))
1133 					break;
1134 				newstate = FL_XIP_WHILE_ERASING;
1135 				chip->erase_suspended = 1;
1136 			} else {
1137 				if (!map_word_bitsset(map, status, CMD(0x04)))
1138 					break;
1139 				newstate = FL_XIP_WHILE_WRITING;
1140 				chip->write_suspended = 1;
1141 			}
1142 			chip->state = newstate;
1143 			map_write(map, CMD(0xff), adr);
1144 			(void) map_read(map, adr);
1145 			xip_iprefetch();
1146 			local_irq_enable();
1147 			spin_unlock(chip->mutex);
1148 			xip_iprefetch();
1149 			cond_resched();
1150 
1151 			/*
1152 			 * We're back.  However someone else might have
1153 			 * decided to go write to the chip if we are in
1154 			 * a suspended erase state.  If so let's wait
1155 			 * until it's done.
1156 			 */
1157 			spin_lock(chip->mutex);
1158 			while (chip->state != newstate) {
1159 				DECLARE_WAITQUEUE(wait, current);
1160 				set_current_state(TASK_UNINTERRUPTIBLE);
1161 				add_wait_queue(&chip->wq, &wait);
1162 				spin_unlock(chip->mutex);
1163 				schedule();
1164 				remove_wait_queue(&chip->wq, &wait);
1165 				spin_lock(chip->mutex);
1166 			}
1167 			/* Disallow XIP again */
1168 			local_irq_disable();
1169 
1170 			/* Resume the write or erase operation */
1171 			map_write(map, CMD(0xd0), adr);
1172 			map_write(map, CMD(0x70), adr);
1173 			chip->state = oldstate;
1174 			start = xip_currtime();
1175 		} else if (usec >= 1000000/HZ) {
1176 			/*
1177 			 * Try to save on CPU power when waiting delay
1178 			 * is at least a system timer tick period.
1179 			 * No need to be extremely accurate here.
1180 			 */
1181 			xip_cpu_idle();
1182 		}
1183 		status = map_read(map, adr);
1184 		done = xip_elapsed_since(start);
1185 	} while (!map_word_andequal(map, status, OK, OK)
1186 		 && done < usec);
1187 
1188 	return (done >= usec) ? -ETIME : 0;
1189 }
1190 
1191 /*
1192  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1193  * the flash is actively programming or erasing since we have to poll for
1194  * the operation to complete anyway.  We can't do that in a generic way with
1195  * a XIP setup so do it before the actual flash operation in this case
1196  * and stub it out from INVAL_CACHE_AND_WAIT.
1197  */
1198 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1199 	INVALIDATE_CACHED_RANGE(map, from, size)
1200 
1201 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1202 	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1203 
1204 #else
1205 
1206 #define xip_disable(map, chip, adr)
1207 #define xip_enable(map, chip, adr)
1208 #define XIP_INVAL_CACHED_RANGE(x...)
1209 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1210 
1211 static int inval_cache_and_wait_for_operation(
1212 		struct map_info *map, struct flchip *chip,
1213 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1214 		unsigned int chip_op_time, unsigned int chip_op_time_max)
1215 {
1216 	struct cfi_private *cfi = map->fldrv_priv;
1217 	map_word status, status_OK = CMD(0x80);
1218 	int chip_state = chip->state;
1219 	unsigned int timeo, sleep_time, reset_timeo;
1220 
1221 	spin_unlock(chip->mutex);
1222 	if (inval_len)
1223 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1224 	spin_lock(chip->mutex);
1225 
1226 	timeo = chip_op_time_max;
1227 	if (!timeo)
1228 		timeo = 500000;
1229 	reset_timeo = timeo;
1230 	sleep_time = chip_op_time / 2;
1231 
1232 	for (;;) {
1233 		status = map_read(map, cmd_adr);
1234 		if (map_word_andequal(map, status, status_OK, status_OK))
1235 			break;
1236 
1237 		if (!timeo) {
1238 			map_write(map, CMD(0x70), cmd_adr);
1239 			chip->state = FL_STATUS;
1240 			return -ETIME;
1241 		}
1242 
1243 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1244 		spin_unlock(chip->mutex);
1245 		if (sleep_time >= 1000000/HZ) {
1246 			/*
1247 			 * Half of the normal delay still remaining
1248 			 * can be performed with a sleeping delay instead
1249 			 * of busy waiting.
1250 			 */
1251 			msleep(sleep_time/1000);
1252 			timeo -= sleep_time;
1253 			sleep_time = 1000000/HZ;
1254 		} else {
1255 			udelay(1);
1256 			cond_resched();
1257 			timeo--;
1258 		}
1259 		spin_lock(chip->mutex);
1260 
1261 		while (chip->state != chip_state) {
1262 			/* Someone's suspended the operation: sleep */
1263 			DECLARE_WAITQUEUE(wait, current);
1264 			set_current_state(TASK_UNINTERRUPTIBLE);
1265 			add_wait_queue(&chip->wq, &wait);
1266 			spin_unlock(chip->mutex);
1267 			schedule();
1268 			remove_wait_queue(&chip->wq, &wait);
1269 			spin_lock(chip->mutex);
1270 		}
1271 		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1272 			/* Erase suspend occured while sleep: reset timeout */
1273 			timeo = reset_timeo;
1274 			chip->erase_suspended = 0;
1275 		}
1276 		if (chip->write_suspended && chip_state == FL_WRITING)  {
1277 			/* Write suspend occured while sleep: reset timeout */
1278 			timeo = reset_timeo;
1279 			chip->write_suspended = 0;
1280 		}
1281 	}
1282 
1283 	/* Done and happy. */
1284  	chip->state = FL_STATUS;
1285 	return 0;
1286 }
1287 
1288 #endif
1289 
1290 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1291 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1292 
1293 
1294 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1295 {
1296 	unsigned long cmd_addr;
1297 	struct cfi_private *cfi = map->fldrv_priv;
1298 	int ret = 0;
1299 
1300 	adr += chip->start;
1301 
1302 	/* Ensure cmd read/writes are aligned. */
1303 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1304 
1305 	spin_lock(chip->mutex);
1306 
1307 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1308 
1309 	if (!ret) {
1310 		if (chip->state != FL_POINT && chip->state != FL_READY)
1311 			map_write(map, CMD(0xff), cmd_addr);
1312 
1313 		chip->state = FL_POINT;
1314 		chip->ref_point_counter++;
1315 	}
1316 	spin_unlock(chip->mutex);
1317 
1318 	return ret;
1319 }
1320 
1321 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1322 		size_t *retlen, void **virt, resource_size_t *phys)
1323 {
1324 	struct map_info *map = mtd->priv;
1325 	struct cfi_private *cfi = map->fldrv_priv;
1326 	unsigned long ofs, last_end = 0;
1327 	int chipnum;
1328 	int ret = 0;
1329 
1330 	if (!map->virt || (from + len > mtd->size))
1331 		return -EINVAL;
1332 
1333 	/* Now lock the chip(s) to POINT state */
1334 
1335 	/* ofs: offset within the first chip that the first read should start */
1336 	chipnum = (from >> cfi->chipshift);
1337 	ofs = from - (chipnum << cfi->chipshift);
1338 
1339 	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1340 	*retlen = 0;
1341 	if (phys)
1342 		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1343 
1344 	while (len) {
1345 		unsigned long thislen;
1346 
1347 		if (chipnum >= cfi->numchips)
1348 			break;
1349 
1350 		/* We cannot point across chips that are virtually disjoint */
1351 		if (!last_end)
1352 			last_end = cfi->chips[chipnum].start;
1353 		else if (cfi->chips[chipnum].start != last_end)
1354 			break;
1355 
1356 		if ((len + ofs -1) >> cfi->chipshift)
1357 			thislen = (1<<cfi->chipshift) - ofs;
1358 		else
1359 			thislen = len;
1360 
1361 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1362 		if (ret)
1363 			break;
1364 
1365 		*retlen += thislen;
1366 		len -= thislen;
1367 
1368 		ofs = 0;
1369 		last_end += 1 << cfi->chipshift;
1370 		chipnum++;
1371 	}
1372 	return 0;
1373 }
1374 
1375 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1376 {
1377 	struct map_info *map = mtd->priv;
1378 	struct cfi_private *cfi = map->fldrv_priv;
1379 	unsigned long ofs;
1380 	int chipnum;
1381 
1382 	/* Now unlock the chip(s) POINT state */
1383 
1384 	/* ofs: offset within the first chip that the first read should start */
1385 	chipnum = (from >> cfi->chipshift);
1386 	ofs = from - (chipnum <<  cfi->chipshift);
1387 
1388 	while (len) {
1389 		unsigned long thislen;
1390 		struct flchip *chip;
1391 
1392 		chip = &cfi->chips[chipnum];
1393 		if (chipnum >= cfi->numchips)
1394 			break;
1395 
1396 		if ((len + ofs -1) >> cfi->chipshift)
1397 			thislen = (1<<cfi->chipshift) - ofs;
1398 		else
1399 			thislen = len;
1400 
1401 		spin_lock(chip->mutex);
1402 		if (chip->state == FL_POINT) {
1403 			chip->ref_point_counter--;
1404 			if(chip->ref_point_counter == 0)
1405 				chip->state = FL_READY;
1406 		} else
1407 			printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1408 
1409 		put_chip(map, chip, chip->start);
1410 		spin_unlock(chip->mutex);
1411 
1412 		len -= thislen;
1413 		ofs = 0;
1414 		chipnum++;
1415 	}
1416 }
1417 
1418 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1419 {
1420 	unsigned long cmd_addr;
1421 	struct cfi_private *cfi = map->fldrv_priv;
1422 	int ret;
1423 
1424 	adr += chip->start;
1425 
1426 	/* Ensure cmd read/writes are aligned. */
1427 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1428 
1429 	spin_lock(chip->mutex);
1430 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1431 	if (ret) {
1432 		spin_unlock(chip->mutex);
1433 		return ret;
1434 	}
1435 
1436 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1437 		map_write(map, CMD(0xff), cmd_addr);
1438 
1439 		chip->state = FL_READY;
1440 	}
1441 
1442 	map_copy_from(map, buf, adr, len);
1443 
1444 	put_chip(map, chip, cmd_addr);
1445 
1446 	spin_unlock(chip->mutex);
1447 	return 0;
1448 }
1449 
1450 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1451 {
1452 	struct map_info *map = mtd->priv;
1453 	struct cfi_private *cfi = map->fldrv_priv;
1454 	unsigned long ofs;
1455 	int chipnum;
1456 	int ret = 0;
1457 
1458 	/* ofs: offset within the first chip that the first read should start */
1459 	chipnum = (from >> cfi->chipshift);
1460 	ofs = from - (chipnum <<  cfi->chipshift);
1461 
1462 	*retlen = 0;
1463 
1464 	while (len) {
1465 		unsigned long thislen;
1466 
1467 		if (chipnum >= cfi->numchips)
1468 			break;
1469 
1470 		if ((len + ofs -1) >> cfi->chipshift)
1471 			thislen = (1<<cfi->chipshift) - ofs;
1472 		else
1473 			thislen = len;
1474 
1475 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1476 		if (ret)
1477 			break;
1478 
1479 		*retlen += thislen;
1480 		len -= thislen;
1481 		buf += thislen;
1482 
1483 		ofs = 0;
1484 		chipnum++;
1485 	}
1486 	return ret;
1487 }
1488 
1489 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1490 				     unsigned long adr, map_word datum, int mode)
1491 {
1492 	struct cfi_private *cfi = map->fldrv_priv;
1493 	map_word status, write_cmd;
1494 	int ret=0;
1495 
1496 	adr += chip->start;
1497 
1498 	switch (mode) {
1499 	case FL_WRITING:
1500 		write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1501 		break;
1502 	case FL_OTP_WRITE:
1503 		write_cmd = CMD(0xc0);
1504 		break;
1505 	default:
1506 		return -EINVAL;
1507 	}
1508 
1509 	spin_lock(chip->mutex);
1510 	ret = get_chip(map, chip, adr, mode);
1511 	if (ret) {
1512 		spin_unlock(chip->mutex);
1513 		return ret;
1514 	}
1515 
1516 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1517 	ENABLE_VPP(map);
1518 	xip_disable(map, chip, adr);
1519 	map_write(map, write_cmd, adr);
1520 	map_write(map, datum, adr);
1521 	chip->state = mode;
1522 
1523 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1524 				   adr, map_bankwidth(map),
1525 				   chip->word_write_time,
1526 				   chip->word_write_time_max);
1527 	if (ret) {
1528 		xip_enable(map, chip, adr);
1529 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1530 		goto out;
1531 	}
1532 
1533 	/* check for errors */
1534 	status = map_read(map, adr);
1535 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1536 		unsigned long chipstatus = MERGESTATUS(status);
1537 
1538 		/* reset status */
1539 		map_write(map, CMD(0x50), adr);
1540 		map_write(map, CMD(0x70), adr);
1541 		xip_enable(map, chip, adr);
1542 
1543 		if (chipstatus & 0x02) {
1544 			ret = -EROFS;
1545 		} else if (chipstatus & 0x08) {
1546 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1547 			ret = -EIO;
1548 		} else {
1549 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1550 			ret = -EINVAL;
1551 		}
1552 
1553 		goto out;
1554 	}
1555 
1556 	xip_enable(map, chip, adr);
1557  out:	put_chip(map, chip, adr);
1558 	spin_unlock(chip->mutex);
1559 	return ret;
1560 }
1561 
1562 
1563 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1564 {
1565 	struct map_info *map = mtd->priv;
1566 	struct cfi_private *cfi = map->fldrv_priv;
1567 	int ret = 0;
1568 	int chipnum;
1569 	unsigned long ofs;
1570 
1571 	*retlen = 0;
1572 	if (!len)
1573 		return 0;
1574 
1575 	chipnum = to >> cfi->chipshift;
1576 	ofs = to  - (chipnum << cfi->chipshift);
1577 
1578 	/* If it's not bus-aligned, do the first byte write */
1579 	if (ofs & (map_bankwidth(map)-1)) {
1580 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1581 		int gap = ofs - bus_ofs;
1582 		int n;
1583 		map_word datum;
1584 
1585 		n = min_t(int, len, map_bankwidth(map)-gap);
1586 		datum = map_word_ff(map);
1587 		datum = map_word_load_partial(map, datum, buf, gap, n);
1588 
1589 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1590 					       bus_ofs, datum, FL_WRITING);
1591 		if (ret)
1592 			return ret;
1593 
1594 		len -= n;
1595 		ofs += n;
1596 		buf += n;
1597 		(*retlen) += n;
1598 
1599 		if (ofs >> cfi->chipshift) {
1600 			chipnum ++;
1601 			ofs = 0;
1602 			if (chipnum == cfi->numchips)
1603 				return 0;
1604 		}
1605 	}
1606 
1607 	while(len >= map_bankwidth(map)) {
1608 		map_word datum = map_word_load(map, buf);
1609 
1610 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1611 				       ofs, datum, FL_WRITING);
1612 		if (ret)
1613 			return ret;
1614 
1615 		ofs += map_bankwidth(map);
1616 		buf += map_bankwidth(map);
1617 		(*retlen) += map_bankwidth(map);
1618 		len -= map_bankwidth(map);
1619 
1620 		if (ofs >> cfi->chipshift) {
1621 			chipnum ++;
1622 			ofs = 0;
1623 			if (chipnum == cfi->numchips)
1624 				return 0;
1625 		}
1626 	}
1627 
1628 	if (len & (map_bankwidth(map)-1)) {
1629 		map_word datum;
1630 
1631 		datum = map_word_ff(map);
1632 		datum = map_word_load_partial(map, datum, buf, 0, len);
1633 
1634 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1635 				       ofs, datum, FL_WRITING);
1636 		if (ret)
1637 			return ret;
1638 
1639 		(*retlen) += len;
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 
1646 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1647 				    unsigned long adr, const struct kvec **pvec,
1648 				    unsigned long *pvec_seek, int len)
1649 {
1650 	struct cfi_private *cfi = map->fldrv_priv;
1651 	map_word status, write_cmd, datum;
1652 	unsigned long cmd_adr;
1653 	int ret, wbufsize, word_gap, words;
1654 	const struct kvec *vec;
1655 	unsigned long vec_seek;
1656 	unsigned long initial_adr;
1657 	int initial_len = len;
1658 
1659 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1660 	adr += chip->start;
1661 	initial_adr = adr;
1662 	cmd_adr = adr & ~(wbufsize-1);
1663 
1664 	/* Let's determine this according to the interleave only once */
1665 	write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1666 
1667 	spin_lock(chip->mutex);
1668 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1669 	if (ret) {
1670 		spin_unlock(chip->mutex);
1671 		return ret;
1672 	}
1673 
1674 	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1675 	ENABLE_VPP(map);
1676 	xip_disable(map, chip, cmd_adr);
1677 
1678 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1679 	   [...], the device will not accept any more Write to Buffer commands".
1680 	   So we must check here and reset those bits if they're set. Otherwise
1681 	   we're just pissing in the wind */
1682 	if (chip->state != FL_STATUS) {
1683 		map_write(map, CMD(0x70), cmd_adr);
1684 		chip->state = FL_STATUS;
1685 	}
1686 	status = map_read(map, cmd_adr);
1687 	if (map_word_bitsset(map, status, CMD(0x30))) {
1688 		xip_enable(map, chip, cmd_adr);
1689 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1690 		xip_disable(map, chip, cmd_adr);
1691 		map_write(map, CMD(0x50), cmd_adr);
1692 		map_write(map, CMD(0x70), cmd_adr);
1693 	}
1694 
1695 	chip->state = FL_WRITING_TO_BUFFER;
1696 	map_write(map, write_cmd, cmd_adr);
1697 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1698 	if (ret) {
1699 		/* Argh. Not ready for write to buffer */
1700 		map_word Xstatus = map_read(map, cmd_adr);
1701 		map_write(map, CMD(0x70), cmd_adr);
1702 		chip->state = FL_STATUS;
1703 		status = map_read(map, cmd_adr);
1704 		map_write(map, CMD(0x50), cmd_adr);
1705 		map_write(map, CMD(0x70), cmd_adr);
1706 		xip_enable(map, chip, cmd_adr);
1707 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1708 				map->name, Xstatus.x[0], status.x[0]);
1709 		goto out;
1710 	}
1711 
1712 	/* Figure out the number of words to write */
1713 	word_gap = (-adr & (map_bankwidth(map)-1));
1714 	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1715 	if (!word_gap) {
1716 		words--;
1717 	} else {
1718 		word_gap = map_bankwidth(map) - word_gap;
1719 		adr -= word_gap;
1720 		datum = map_word_ff(map);
1721 	}
1722 
1723 	/* Write length of data to come */
1724 	map_write(map, CMD(words), cmd_adr );
1725 
1726 	/* Write data */
1727 	vec = *pvec;
1728 	vec_seek = *pvec_seek;
1729 	do {
1730 		int n = map_bankwidth(map) - word_gap;
1731 		if (n > vec->iov_len - vec_seek)
1732 			n = vec->iov_len - vec_seek;
1733 		if (n > len)
1734 			n = len;
1735 
1736 		if (!word_gap && len < map_bankwidth(map))
1737 			datum = map_word_ff(map);
1738 
1739 		datum = map_word_load_partial(map, datum,
1740 					      vec->iov_base + vec_seek,
1741 					      word_gap, n);
1742 
1743 		len -= n;
1744 		word_gap += n;
1745 		if (!len || word_gap == map_bankwidth(map)) {
1746 			map_write(map, datum, adr);
1747 			adr += map_bankwidth(map);
1748 			word_gap = 0;
1749 		}
1750 
1751 		vec_seek += n;
1752 		if (vec_seek == vec->iov_len) {
1753 			vec++;
1754 			vec_seek = 0;
1755 		}
1756 	} while (len);
1757 	*pvec = vec;
1758 	*pvec_seek = vec_seek;
1759 
1760 	/* GO GO GO */
1761 	map_write(map, CMD(0xd0), cmd_adr);
1762 	chip->state = FL_WRITING;
1763 
1764 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1765 				   initial_adr, initial_len,
1766 				   chip->buffer_write_time,
1767 				   chip->buffer_write_time_max);
1768 	if (ret) {
1769 		map_write(map, CMD(0x70), cmd_adr);
1770 		chip->state = FL_STATUS;
1771 		xip_enable(map, chip, cmd_adr);
1772 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1773 		goto out;
1774 	}
1775 
1776 	/* check for errors */
1777 	status = map_read(map, cmd_adr);
1778 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1779 		unsigned long chipstatus = MERGESTATUS(status);
1780 
1781 		/* reset status */
1782 		map_write(map, CMD(0x50), cmd_adr);
1783 		map_write(map, CMD(0x70), cmd_adr);
1784 		xip_enable(map, chip, cmd_adr);
1785 
1786 		if (chipstatus & 0x02) {
1787 			ret = -EROFS;
1788 		} else if (chipstatus & 0x08) {
1789 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1790 			ret = -EIO;
1791 		} else {
1792 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1793 			ret = -EINVAL;
1794 		}
1795 
1796 		goto out;
1797 	}
1798 
1799 	xip_enable(map, chip, cmd_adr);
1800  out:	put_chip(map, chip, cmd_adr);
1801 	spin_unlock(chip->mutex);
1802 	return ret;
1803 }
1804 
1805 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1806 				unsigned long count, loff_t to, size_t *retlen)
1807 {
1808 	struct map_info *map = mtd->priv;
1809 	struct cfi_private *cfi = map->fldrv_priv;
1810 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1811 	int ret = 0;
1812 	int chipnum;
1813 	unsigned long ofs, vec_seek, i;
1814 	size_t len = 0;
1815 
1816 	for (i = 0; i < count; i++)
1817 		len += vecs[i].iov_len;
1818 
1819 	*retlen = 0;
1820 	if (!len)
1821 		return 0;
1822 
1823 	chipnum = to >> cfi->chipshift;
1824 	ofs = to - (chipnum << cfi->chipshift);
1825 	vec_seek = 0;
1826 
1827 	do {
1828 		/* We must not cross write block boundaries */
1829 		int size = wbufsize - (ofs & (wbufsize-1));
1830 
1831 		if (size > len)
1832 			size = len;
1833 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1834 				      ofs, &vecs, &vec_seek, size);
1835 		if (ret)
1836 			return ret;
1837 
1838 		ofs += size;
1839 		(*retlen) += size;
1840 		len -= size;
1841 
1842 		if (ofs >> cfi->chipshift) {
1843 			chipnum ++;
1844 			ofs = 0;
1845 			if (chipnum == cfi->numchips)
1846 				return 0;
1847 		}
1848 
1849 		/* Be nice and reschedule with the chip in a usable state for other
1850 		   processes. */
1851 		cond_resched();
1852 
1853 	} while (len);
1854 
1855 	return 0;
1856 }
1857 
1858 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1859 				       size_t len, size_t *retlen, const u_char *buf)
1860 {
1861 	struct kvec vec;
1862 
1863 	vec.iov_base = (void *) buf;
1864 	vec.iov_len = len;
1865 
1866 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1867 }
1868 
1869 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1870 				      unsigned long adr, int len, void *thunk)
1871 {
1872 	struct cfi_private *cfi = map->fldrv_priv;
1873 	map_word status;
1874 	int retries = 3;
1875 	int ret;
1876 
1877 	adr += chip->start;
1878 
1879  retry:
1880 	spin_lock(chip->mutex);
1881 	ret = get_chip(map, chip, adr, FL_ERASING);
1882 	if (ret) {
1883 		spin_unlock(chip->mutex);
1884 		return ret;
1885 	}
1886 
1887 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1888 	ENABLE_VPP(map);
1889 	xip_disable(map, chip, adr);
1890 
1891 	/* Clear the status register first */
1892 	map_write(map, CMD(0x50), adr);
1893 
1894 	/* Now erase */
1895 	map_write(map, CMD(0x20), adr);
1896 	map_write(map, CMD(0xD0), adr);
1897 	chip->state = FL_ERASING;
1898 	chip->erase_suspended = 0;
1899 
1900 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1901 				   adr, len,
1902 				   chip->erase_time,
1903 				   chip->erase_time_max);
1904 	if (ret) {
1905 		map_write(map, CMD(0x70), adr);
1906 		chip->state = FL_STATUS;
1907 		xip_enable(map, chip, adr);
1908 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1909 		goto out;
1910 	}
1911 
1912 	/* We've broken this before. It doesn't hurt to be safe */
1913 	map_write(map, CMD(0x70), adr);
1914 	chip->state = FL_STATUS;
1915 	status = map_read(map, adr);
1916 
1917 	/* check for errors */
1918 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1919 		unsigned long chipstatus = MERGESTATUS(status);
1920 
1921 		/* Reset the error bits */
1922 		map_write(map, CMD(0x50), adr);
1923 		map_write(map, CMD(0x70), adr);
1924 		xip_enable(map, chip, adr);
1925 
1926 		if ((chipstatus & 0x30) == 0x30) {
1927 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1928 			ret = -EINVAL;
1929 		} else if (chipstatus & 0x02) {
1930 			/* Protection bit set */
1931 			ret = -EROFS;
1932 		} else if (chipstatus & 0x8) {
1933 			/* Voltage */
1934 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1935 			ret = -EIO;
1936 		} else if (chipstatus & 0x20 && retries--) {
1937 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1938 			put_chip(map, chip, adr);
1939 			spin_unlock(chip->mutex);
1940 			goto retry;
1941 		} else {
1942 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1943 			ret = -EIO;
1944 		}
1945 
1946 		goto out;
1947 	}
1948 
1949 	xip_enable(map, chip, adr);
1950  out:	put_chip(map, chip, adr);
1951 	spin_unlock(chip->mutex);
1952 	return ret;
1953 }
1954 
1955 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1956 {
1957 	unsigned long ofs, len;
1958 	int ret;
1959 
1960 	ofs = instr->addr;
1961 	len = instr->len;
1962 
1963 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1964 	if (ret)
1965 		return ret;
1966 
1967 	instr->state = MTD_ERASE_DONE;
1968 	mtd_erase_callback(instr);
1969 
1970 	return 0;
1971 }
1972 
1973 static void cfi_intelext_sync (struct mtd_info *mtd)
1974 {
1975 	struct map_info *map = mtd->priv;
1976 	struct cfi_private *cfi = map->fldrv_priv;
1977 	int i;
1978 	struct flchip *chip;
1979 	int ret = 0;
1980 
1981 	for (i=0; !ret && i<cfi->numchips; i++) {
1982 		chip = &cfi->chips[i];
1983 
1984 		spin_lock(chip->mutex);
1985 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
1986 
1987 		if (!ret) {
1988 			chip->oldstate = chip->state;
1989 			chip->state = FL_SYNCING;
1990 			/* No need to wake_up() on this state change -
1991 			 * as the whole point is that nobody can do anything
1992 			 * with the chip now anyway.
1993 			 */
1994 		}
1995 		spin_unlock(chip->mutex);
1996 	}
1997 
1998 	/* Unlock the chips again */
1999 
2000 	for (i--; i >=0; i--) {
2001 		chip = &cfi->chips[i];
2002 
2003 		spin_lock(chip->mutex);
2004 
2005 		if (chip->state == FL_SYNCING) {
2006 			chip->state = chip->oldstate;
2007 			chip->oldstate = FL_READY;
2008 			wake_up(&chip->wq);
2009 		}
2010 		spin_unlock(chip->mutex);
2011 	}
2012 }
2013 
2014 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2015 						struct flchip *chip,
2016 						unsigned long adr,
2017 						int len, void *thunk)
2018 {
2019 	struct cfi_private *cfi = map->fldrv_priv;
2020 	int status, ofs_factor = cfi->interleave * cfi->device_type;
2021 
2022 	adr += chip->start;
2023 	xip_disable(map, chip, adr+(2*ofs_factor));
2024 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2025 	chip->state = FL_JEDEC_QUERY;
2026 	status = cfi_read_query(map, adr+(2*ofs_factor));
2027 	xip_enable(map, chip, 0);
2028 	return status;
2029 }
2030 
2031 #ifdef DEBUG_LOCK_BITS
2032 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2033 						struct flchip *chip,
2034 						unsigned long adr,
2035 						int len, void *thunk)
2036 {
2037 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2038 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2039 	return 0;
2040 }
2041 #endif
2042 
2043 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2044 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2045 
2046 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2047 				       unsigned long adr, int len, void *thunk)
2048 {
2049 	struct cfi_private *cfi = map->fldrv_priv;
2050 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2051 	int udelay;
2052 	int ret;
2053 
2054 	adr += chip->start;
2055 
2056 	spin_lock(chip->mutex);
2057 	ret = get_chip(map, chip, adr, FL_LOCKING);
2058 	if (ret) {
2059 		spin_unlock(chip->mutex);
2060 		return ret;
2061 	}
2062 
2063 	ENABLE_VPP(map);
2064 	xip_disable(map, chip, adr);
2065 
2066 	map_write(map, CMD(0x60), adr);
2067 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2068 		map_write(map, CMD(0x01), adr);
2069 		chip->state = FL_LOCKING;
2070 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2071 		map_write(map, CMD(0xD0), adr);
2072 		chip->state = FL_UNLOCKING;
2073 	} else
2074 		BUG();
2075 
2076 	/*
2077 	 * If Instant Individual Block Locking supported then no need
2078 	 * to delay.
2079 	 */
2080 	udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2081 
2082 	ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2083 	if (ret) {
2084 		map_write(map, CMD(0x70), adr);
2085 		chip->state = FL_STATUS;
2086 		xip_enable(map, chip, adr);
2087 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2088 		goto out;
2089 	}
2090 
2091 	xip_enable(map, chip, adr);
2092 out:	put_chip(map, chip, adr);
2093 	spin_unlock(chip->mutex);
2094 	return ret;
2095 }
2096 
2097 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2098 {
2099 	int ret;
2100 
2101 #ifdef DEBUG_LOCK_BITS
2102 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2103 	       __func__, ofs, len);
2104 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2105 		ofs, len, NULL);
2106 #endif
2107 
2108 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2109 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2110 
2111 #ifdef DEBUG_LOCK_BITS
2112 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2113 	       __func__, ret);
2114 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2115 		ofs, len, NULL);
2116 #endif
2117 
2118 	return ret;
2119 }
2120 
2121 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2122 {
2123 	int ret;
2124 
2125 #ifdef DEBUG_LOCK_BITS
2126 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2127 	       __func__, ofs, len);
2128 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2129 		ofs, len, NULL);
2130 #endif
2131 
2132 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2133 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2134 
2135 #ifdef DEBUG_LOCK_BITS
2136 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2137 	       __func__, ret);
2138 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2139 		ofs, len, NULL);
2140 #endif
2141 
2142 	return ret;
2143 }
2144 
2145 #ifdef CONFIG_MTD_OTP
2146 
2147 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2148 			u_long data_offset, u_char *buf, u_int size,
2149 			u_long prot_offset, u_int groupno, u_int groupsize);
2150 
2151 static int __xipram
2152 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2153 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2154 {
2155 	struct cfi_private *cfi = map->fldrv_priv;
2156 	int ret;
2157 
2158 	spin_lock(chip->mutex);
2159 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2160 	if (ret) {
2161 		spin_unlock(chip->mutex);
2162 		return ret;
2163 	}
2164 
2165 	/* let's ensure we're not reading back cached data from array mode */
2166 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2167 
2168 	xip_disable(map, chip, chip->start);
2169 	if (chip->state != FL_JEDEC_QUERY) {
2170 		map_write(map, CMD(0x90), chip->start);
2171 		chip->state = FL_JEDEC_QUERY;
2172 	}
2173 	map_copy_from(map, buf, chip->start + offset, size);
2174 	xip_enable(map, chip, chip->start);
2175 
2176 	/* then ensure we don't keep OTP data in the cache */
2177 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2178 
2179 	put_chip(map, chip, chip->start);
2180 	spin_unlock(chip->mutex);
2181 	return 0;
2182 }
2183 
2184 static int
2185 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2186 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2187 {
2188 	int ret;
2189 
2190 	while (size) {
2191 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2192 		int gap = offset - bus_ofs;
2193 		int n = min_t(int, size, map_bankwidth(map)-gap);
2194 		map_word datum = map_word_ff(map);
2195 
2196 		datum = map_word_load_partial(map, datum, buf, gap, n);
2197 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2198 		if (ret)
2199 			return ret;
2200 
2201 		offset += n;
2202 		buf += n;
2203 		size -= n;
2204 	}
2205 
2206 	return 0;
2207 }
2208 
2209 static int
2210 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2211 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2212 {
2213 	struct cfi_private *cfi = map->fldrv_priv;
2214 	map_word datum;
2215 
2216 	/* make sure area matches group boundaries */
2217 	if (size != grpsz)
2218 		return -EXDEV;
2219 
2220 	datum = map_word_ff(map);
2221 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2222 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2223 }
2224 
2225 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2226 				 size_t *retlen, u_char *buf,
2227 				 otp_op_t action, int user_regs)
2228 {
2229 	struct map_info *map = mtd->priv;
2230 	struct cfi_private *cfi = map->fldrv_priv;
2231 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2232 	struct flchip *chip;
2233 	struct cfi_intelext_otpinfo *otp;
2234 	u_long devsize, reg_prot_offset, data_offset;
2235 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2236 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2237 	int ret;
2238 
2239 	*retlen = 0;
2240 
2241 	/* Check that we actually have some OTP registers */
2242 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2243 		return -ENODATA;
2244 
2245 	/* we need real chips here not virtual ones */
2246 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2247 	chip_step = devsize >> cfi->chipshift;
2248 	chip_num = 0;
2249 
2250 	/* Some chips have OTP located in the _top_ partition only.
2251 	   For example: Intel 28F256L18T (T means top-parameter device) */
2252 	if (cfi->mfr == CFI_MFR_INTEL) {
2253 		switch (cfi->id) {
2254 		case 0x880b:
2255 		case 0x880c:
2256 		case 0x880d:
2257 			chip_num = chip_step - 1;
2258 		}
2259 	}
2260 
2261 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2262 		chip = &cfi->chips[chip_num];
2263 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2264 
2265 		/* first OTP region */
2266 		field = 0;
2267 		reg_prot_offset = extp->ProtRegAddr;
2268 		reg_fact_groups = 1;
2269 		reg_fact_size = 1 << extp->FactProtRegSize;
2270 		reg_user_groups = 1;
2271 		reg_user_size = 1 << extp->UserProtRegSize;
2272 
2273 		while (len > 0) {
2274 			/* flash geometry fixup */
2275 			data_offset = reg_prot_offset + 1;
2276 			data_offset *= cfi->interleave * cfi->device_type;
2277 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2278 			reg_fact_size *= cfi->interleave;
2279 			reg_user_size *= cfi->interleave;
2280 
2281 			if (user_regs) {
2282 				groups = reg_user_groups;
2283 				groupsize = reg_user_size;
2284 				/* skip over factory reg area */
2285 				groupno = reg_fact_groups;
2286 				data_offset += reg_fact_groups * reg_fact_size;
2287 			} else {
2288 				groups = reg_fact_groups;
2289 				groupsize = reg_fact_size;
2290 				groupno = 0;
2291 			}
2292 
2293 			while (len > 0 && groups > 0) {
2294 				if (!action) {
2295 					/*
2296 					 * Special case: if action is NULL
2297 					 * we fill buf with otp_info records.
2298 					 */
2299 					struct otp_info *otpinfo;
2300 					map_word lockword;
2301 					len -= sizeof(struct otp_info);
2302 					if (len <= 0)
2303 						return -ENOSPC;
2304 					ret = do_otp_read(map, chip,
2305 							  reg_prot_offset,
2306 							  (u_char *)&lockword,
2307 							  map_bankwidth(map),
2308 							  0, 0,  0);
2309 					if (ret)
2310 						return ret;
2311 					otpinfo = (struct otp_info *)buf;
2312 					otpinfo->start = from;
2313 					otpinfo->length = groupsize;
2314 					otpinfo->locked =
2315 					   !map_word_bitsset(map, lockword,
2316 							     CMD(1 << groupno));
2317 					from += groupsize;
2318 					buf += sizeof(*otpinfo);
2319 					*retlen += sizeof(*otpinfo);
2320 				} else if (from >= groupsize) {
2321 					from -= groupsize;
2322 					data_offset += groupsize;
2323 				} else {
2324 					int size = groupsize;
2325 					data_offset += from;
2326 					size -= from;
2327 					from = 0;
2328 					if (size > len)
2329 						size = len;
2330 					ret = action(map, chip, data_offset,
2331 						     buf, size, reg_prot_offset,
2332 						     groupno, groupsize);
2333 					if (ret < 0)
2334 						return ret;
2335 					buf += size;
2336 					len -= size;
2337 					*retlen += size;
2338 					data_offset += size;
2339 				}
2340 				groupno++;
2341 				groups--;
2342 			}
2343 
2344 			/* next OTP region */
2345 			if (++field == extp->NumProtectionFields)
2346 				break;
2347 			reg_prot_offset = otp->ProtRegAddr;
2348 			reg_fact_groups = otp->FactGroups;
2349 			reg_fact_size = 1 << otp->FactProtRegSize;
2350 			reg_user_groups = otp->UserGroups;
2351 			reg_user_size = 1 << otp->UserProtRegSize;
2352 			otp++;
2353 		}
2354 	}
2355 
2356 	return 0;
2357 }
2358 
2359 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2360 					   size_t len, size_t *retlen,
2361 					    u_char *buf)
2362 {
2363 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2364 				     buf, do_otp_read, 0);
2365 }
2366 
2367 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2368 					   size_t len, size_t *retlen,
2369 					    u_char *buf)
2370 {
2371 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2372 				     buf, do_otp_read, 1);
2373 }
2374 
2375 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2376 					    size_t len, size_t *retlen,
2377 					     u_char *buf)
2378 {
2379 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2380 				     buf, do_otp_write, 1);
2381 }
2382 
2383 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2384 					   loff_t from, size_t len)
2385 {
2386 	size_t retlen;
2387 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2388 				     NULL, do_otp_lock, 1);
2389 }
2390 
2391 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2392 					   struct otp_info *buf, size_t len)
2393 {
2394 	size_t retlen;
2395 	int ret;
2396 
2397 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2398 	return ret ? : retlen;
2399 }
2400 
2401 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2402 					   struct otp_info *buf, size_t len)
2403 {
2404 	size_t retlen;
2405 	int ret;
2406 
2407 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2408 	return ret ? : retlen;
2409 }
2410 
2411 #endif
2412 
2413 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2414 {
2415 	struct mtd_erase_region_info *region;
2416 	int block, status, i;
2417 	unsigned long adr;
2418 	size_t len;
2419 
2420 	for (i = 0; i < mtd->numeraseregions; i++) {
2421 		region = &mtd->eraseregions[i];
2422 		if (!region->lockmap)
2423 			continue;
2424 
2425 		for (block = 0; block < region->numblocks; block++){
2426 			len = region->erasesize;
2427 			adr = region->offset + block * len;
2428 
2429 			status = cfi_varsize_frob(mtd,
2430 					do_getlockstatus_oneblock, adr, len, NULL);
2431 			if (status)
2432 				set_bit(block, region->lockmap);
2433 			else
2434 				clear_bit(block, region->lockmap);
2435 		}
2436 	}
2437 }
2438 
2439 static int cfi_intelext_suspend(struct mtd_info *mtd)
2440 {
2441 	struct map_info *map = mtd->priv;
2442 	struct cfi_private *cfi = map->fldrv_priv;
2443 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2444 	int i;
2445 	struct flchip *chip;
2446 	int ret = 0;
2447 
2448 	if ((mtd->flags & MTD_POWERUP_LOCK)
2449 	    && extp && (extp->FeatureSupport & (1 << 5)))
2450 		cfi_intelext_save_locks(mtd);
2451 
2452 	for (i=0; !ret && i<cfi->numchips; i++) {
2453 		chip = &cfi->chips[i];
2454 
2455 		spin_lock(chip->mutex);
2456 
2457 		switch (chip->state) {
2458 		case FL_READY:
2459 		case FL_STATUS:
2460 		case FL_CFI_QUERY:
2461 		case FL_JEDEC_QUERY:
2462 			if (chip->oldstate == FL_READY) {
2463 				/* place the chip in a known state before suspend */
2464 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2465 				chip->oldstate = chip->state;
2466 				chip->state = FL_PM_SUSPENDED;
2467 				/* No need to wake_up() on this state change -
2468 				 * as the whole point is that nobody can do anything
2469 				 * with the chip now anyway.
2470 				 */
2471 			} else {
2472 				/* There seems to be an operation pending. We must wait for it. */
2473 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2474 				ret = -EAGAIN;
2475 			}
2476 			break;
2477 		default:
2478 			/* Should we actually wait? Once upon a time these routines weren't
2479 			   allowed to. Or should we return -EAGAIN, because the upper layers
2480 			   ought to have already shut down anything which was using the device
2481 			   anyway? The latter for now. */
2482 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2483 			ret = -EAGAIN;
2484 		case FL_PM_SUSPENDED:
2485 			break;
2486 		}
2487 		spin_unlock(chip->mutex);
2488 	}
2489 
2490 	/* Unlock the chips again */
2491 
2492 	if (ret) {
2493 		for (i--; i >=0; i--) {
2494 			chip = &cfi->chips[i];
2495 
2496 			spin_lock(chip->mutex);
2497 
2498 			if (chip->state == FL_PM_SUSPENDED) {
2499 				/* No need to force it into a known state here,
2500 				   because we're returning failure, and it didn't
2501 				   get power cycled */
2502 				chip->state = chip->oldstate;
2503 				chip->oldstate = FL_READY;
2504 				wake_up(&chip->wq);
2505 			}
2506 			spin_unlock(chip->mutex);
2507 		}
2508 	}
2509 
2510 	return ret;
2511 }
2512 
2513 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2514 {
2515 	struct mtd_erase_region_info *region;
2516 	int block, i;
2517 	unsigned long adr;
2518 	size_t len;
2519 
2520 	for (i = 0; i < mtd->numeraseregions; i++) {
2521 		region = &mtd->eraseregions[i];
2522 		if (!region->lockmap)
2523 			continue;
2524 
2525 		for (block = 0; block < region->numblocks; block++) {
2526 			len = region->erasesize;
2527 			adr = region->offset + block * len;
2528 
2529 			if (!test_bit(block, region->lockmap))
2530 				cfi_intelext_unlock(mtd, adr, len);
2531 		}
2532 	}
2533 }
2534 
2535 static void cfi_intelext_resume(struct mtd_info *mtd)
2536 {
2537 	struct map_info *map = mtd->priv;
2538 	struct cfi_private *cfi = map->fldrv_priv;
2539 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2540 	int i;
2541 	struct flchip *chip;
2542 
2543 	for (i=0; i<cfi->numchips; i++) {
2544 
2545 		chip = &cfi->chips[i];
2546 
2547 		spin_lock(chip->mutex);
2548 
2549 		/* Go to known state. Chip may have been power cycled */
2550 		if (chip->state == FL_PM_SUSPENDED) {
2551 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2552 			chip->oldstate = chip->state = FL_READY;
2553 			wake_up(&chip->wq);
2554 		}
2555 
2556 		spin_unlock(chip->mutex);
2557 	}
2558 
2559 	if ((mtd->flags & MTD_POWERUP_LOCK)
2560 	    && extp && (extp->FeatureSupport & (1 << 5)))
2561 		cfi_intelext_restore_locks(mtd);
2562 }
2563 
2564 static int cfi_intelext_reset(struct mtd_info *mtd)
2565 {
2566 	struct map_info *map = mtd->priv;
2567 	struct cfi_private *cfi = map->fldrv_priv;
2568 	int i, ret;
2569 
2570 	for (i=0; i < cfi->numchips; i++) {
2571 		struct flchip *chip = &cfi->chips[i];
2572 
2573 		/* force the completion of any ongoing operation
2574 		   and switch to array mode so any bootloader in
2575 		   flash is accessible for soft reboot. */
2576 		spin_lock(chip->mutex);
2577 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2578 		if (!ret) {
2579 			map_write(map, CMD(0xff), chip->start);
2580 			chip->state = FL_SHUTDOWN;
2581 			put_chip(map, chip, chip->start);
2582 		}
2583 		spin_unlock(chip->mutex);
2584 	}
2585 
2586 	return 0;
2587 }
2588 
2589 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2590 			       void *v)
2591 {
2592 	struct mtd_info *mtd;
2593 
2594 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2595 	cfi_intelext_reset(mtd);
2596 	return NOTIFY_DONE;
2597 }
2598 
2599 static void cfi_intelext_destroy(struct mtd_info *mtd)
2600 {
2601 	struct map_info *map = mtd->priv;
2602 	struct cfi_private *cfi = map->fldrv_priv;
2603 	struct mtd_erase_region_info *region;
2604 	int i;
2605 	cfi_intelext_reset(mtd);
2606 	unregister_reboot_notifier(&mtd->reboot_notifier);
2607 	kfree(cfi->cmdset_priv);
2608 	kfree(cfi->cfiq);
2609 	kfree(cfi->chips[0].priv);
2610 	kfree(cfi);
2611 	for (i = 0; i < mtd->numeraseregions; i++) {
2612 		region = &mtd->eraseregions[i];
2613 		if (region->lockmap)
2614 			kfree(region->lockmap);
2615 	}
2616 	kfree(mtd->eraseregions);
2617 }
2618 
2619 MODULE_LICENSE("GPL");
2620 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2621 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2622 MODULE_ALIAS("cfi_cmdset_0003");
2623 MODULE_ALIAS("cfi_cmdset_0200");
2624