1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
9  * 	- completely revamped method functions so they are aware and
10  * 	  independent of the flash geometry (buswidth, interleave, etc.)
11  * 	- scalability vs code size is completely set at compile-time
12  * 	  (see include/linux/mtd/cfi.h for selection)
13  *	- optimized write buffer method
14  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *	- reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  * 	- auto unlock sectors on resume for auto locking flash on power up
18  */
19 
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38 
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44 
45 /* Intel chips */
46 #define I82802AB	0x00ad
47 #define I82802AC	0x00ac
48 #define PF38F4476	0x881c
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A	0x0080
52 #define M50FLW080B	0x0081
53 /* Atmel chips */
54 #define AT49BV640D	0x02de
55 #define AT49BV640DT	0x02db
56 
57 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
60 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
61 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
62 static void cfi_intelext_sync (struct mtd_info *);
63 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
65 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
66 				  uint64_t len);
67 #ifdef CONFIG_MTD_OTP
68 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
71 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
72 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
73 					    struct otp_info *, size_t);
74 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
75 					    struct otp_info *, size_t);
76 #endif
77 static int cfi_intelext_suspend (struct mtd_info *);
78 static void cfi_intelext_resume (struct mtd_info *);
79 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
80 
81 static void cfi_intelext_destroy(struct mtd_info *);
82 
83 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
84 
85 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
86 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
87 
88 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
89 		     size_t *retlen, void **virt, resource_size_t *phys);
90 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91 
92 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
94 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
95 #include "fwh_lock.h"
96 
97 
98 
99 /*
100  *  *********** SETUP AND PROBE BITS  ***********
101  */
102 
103 static struct mtd_chip_driver cfi_intelext_chipdrv = {
104 	.probe		= NULL, /* Not usable directly */
105 	.destroy	= cfi_intelext_destroy,
106 	.name		= "cfi_cmdset_0001",
107 	.module		= THIS_MODULE
108 };
109 
110 /* #define DEBUG_LOCK_BITS */
111 /* #define DEBUG_CFI_FEATURES */
112 
113 #ifdef DEBUG_CFI_FEATURES
114 static void cfi_tell_features(struct cfi_pri_intelext *extp)
115 {
116 	int i;
117 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
118 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
119 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
120 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
121 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
122 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
123 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
124 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
125 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
126 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
127 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
128 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
129 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
130 	for (i=11; i<32; i++) {
131 		if (extp->FeatureSupport & (1<<i))
132 			printk("     - Unknown Bit %X:      supported\n", i);
133 	}
134 
135 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
136 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
137 	for (i=1; i<8; i++) {
138 		if (extp->SuspendCmdSupport & (1<<i))
139 			printk("     - Unknown Bit %X:               supported\n", i);
140 	}
141 
142 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
143 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
144 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
145 	for (i=2; i<3; i++) {
146 		if (extp->BlkStatusRegMask & (1<<i))
147 			printk("     - Unknown Bit %X Active: yes\n",i);
148 	}
149 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
150 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
151 	for (i=6; i<16; i++) {
152 		if (extp->BlkStatusRegMask & (1<<i))
153 			printk("     - Unknown Bit %X Active: yes\n",i);
154 	}
155 
156 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
157 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
158 	if (extp->VppOptimal)
159 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
160 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
161 }
162 #endif
163 
164 /* Atmel chips don't use the same PRI format as Intel chips */
165 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
166 {
167 	struct map_info *map = mtd->priv;
168 	struct cfi_private *cfi = map->fldrv_priv;
169 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
170 	struct cfi_pri_atmel atmel_pri;
171 	uint32_t features = 0;
172 
173 	/* Reverse byteswapping */
174 	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
175 	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
176 	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
177 
178 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
179 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180 
181 	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
182 
183 	if (atmel_pri.Features & 0x01) /* chip erase supported */
184 		features |= (1<<0);
185 	if (atmel_pri.Features & 0x02) /* erase suspend supported */
186 		features |= (1<<1);
187 	if (atmel_pri.Features & 0x04) /* program suspend supported */
188 		features |= (1<<2);
189 	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
190 		features |= (1<<9);
191 	if (atmel_pri.Features & 0x20) /* page mode read supported */
192 		features |= (1<<7);
193 	if (atmel_pri.Features & 0x40) /* queued erase supported */
194 		features |= (1<<4);
195 	if (atmel_pri.Features & 0x80) /* Protection bits supported */
196 		features |= (1<<6);
197 
198 	extp->FeatureSupport = features;
199 
200 	/* burst write mode not supported */
201 	cfi->cfiq->BufWriteTimeoutTyp = 0;
202 	cfi->cfiq->BufWriteTimeoutMax = 0;
203 }
204 
205 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
206 {
207 	struct map_info *map = mtd->priv;
208 	struct cfi_private *cfi = map->fldrv_priv;
209 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
210 
211 	cfip->FeatureSupport |= (1 << 5);
212 	mtd->flags |= MTD_POWERUP_LOCK;
213 }
214 
215 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
216 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
217 static void fixup_intel_strataflash(struct mtd_info *mtd)
218 {
219 	struct map_info *map = mtd->priv;
220 	struct cfi_private *cfi = map->fldrv_priv;
221 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
222 
223 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
224 	                    "erase on write disabled.\n");
225 	extp->SuspendCmdSupport &= ~1;
226 }
227 #endif
228 
229 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
230 static void fixup_no_write_suspend(struct mtd_info *mtd)
231 {
232 	struct map_info *map = mtd->priv;
233 	struct cfi_private *cfi = map->fldrv_priv;
234 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
235 
236 	if (cfip && (cfip->FeatureSupport&4)) {
237 		cfip->FeatureSupport &= ~4;
238 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
239 	}
240 }
241 #endif
242 
243 static void fixup_st_m28w320ct(struct mtd_info *mtd)
244 {
245 	struct map_info *map = mtd->priv;
246 	struct cfi_private *cfi = map->fldrv_priv;
247 
248 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
249 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
250 }
251 
252 static void fixup_st_m28w320cb(struct mtd_info *mtd)
253 {
254 	struct map_info *map = mtd->priv;
255 	struct cfi_private *cfi = map->fldrv_priv;
256 
257 	/* Note this is done after the region info is endian swapped */
258 	cfi->cfiq->EraseRegionInfo[1] =
259 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
260 };
261 
262 static void fixup_use_point(struct mtd_info *mtd)
263 {
264 	struct map_info *map = mtd->priv;
265 	if (!mtd->point && map_is_linear(map)) {
266 		mtd->point   = cfi_intelext_point;
267 		mtd->unpoint = cfi_intelext_unpoint;
268 	}
269 }
270 
271 static void fixup_use_write_buffers(struct mtd_info *mtd)
272 {
273 	struct map_info *map = mtd->priv;
274 	struct cfi_private *cfi = map->fldrv_priv;
275 	if (cfi->cfiq->BufWriteTimeoutTyp) {
276 		printk(KERN_INFO "Using buffer write method\n" );
277 		mtd->write = cfi_intelext_write_buffers;
278 		mtd->writev = cfi_intelext_writev;
279 	}
280 }
281 
282 /*
283  * Some chips power-up with all sectors locked by default.
284  */
285 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
286 {
287 	struct map_info *map = mtd->priv;
288 	struct cfi_private *cfi = map->fldrv_priv;
289 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
290 
291 	if (cfip->FeatureSupport&32) {
292 		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
293 		mtd->flags |= MTD_POWERUP_LOCK;
294 	}
295 }
296 
297 static struct cfi_fixup cfi_fixup_table[] = {
298 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
299 	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
300 	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
301 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
302 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
303 #endif
304 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
305 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
306 #endif
307 #if !FORCE_WORD_WRITE
308 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
309 #endif
310 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
311 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
312 	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
313 	{ 0, 0, NULL }
314 };
315 
316 static struct cfi_fixup jedec_fixup_table[] = {
317 	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
318 	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
319 	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
320 	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
321 	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
322 	{ 0, 0, NULL }
323 };
324 static struct cfi_fixup fixup_table[] = {
325 	/* The CFI vendor ids and the JEDEC vendor IDs appear
326 	 * to be common.  It is like the devices id's are as
327 	 * well.  This table is to pick all cases where
328 	 * we know that is the case.
329 	 */
330 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
331 	{ 0, 0, NULL }
332 };
333 
334 static void cfi_fixup_major_minor(struct cfi_private *cfi,
335 						struct cfi_pri_intelext *extp)
336 {
337 	if (cfi->mfr == CFI_MFR_INTEL &&
338 			cfi->id == PF38F4476 && extp->MinorVersion == '3')
339 		extp->MinorVersion = '1';
340 }
341 
342 static inline struct cfi_pri_intelext *
343 read_pri_intelext(struct map_info *map, __u16 adr)
344 {
345 	struct cfi_private *cfi = map->fldrv_priv;
346 	struct cfi_pri_intelext *extp;
347 	unsigned int extra_size = 0;
348 	unsigned int extp_size = sizeof(*extp);
349 
350  again:
351 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
352 	if (!extp)
353 		return NULL;
354 
355 	cfi_fixup_major_minor(cfi, extp);
356 
357 	if (extp->MajorVersion != '1' ||
358 	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
359 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
360 		       "version %c.%c.\n",  extp->MajorVersion,
361 		       extp->MinorVersion);
362 		kfree(extp);
363 		return NULL;
364 	}
365 
366 	/* Do some byteswapping if necessary */
367 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
368 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
369 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
370 
371 	if (extp->MinorVersion >= '0') {
372 		extra_size = 0;
373 
374 		/* Protection Register info */
375 		extra_size += (extp->NumProtectionFields - 1) *
376 			      sizeof(struct cfi_intelext_otpinfo);
377 	}
378 
379 	if (extp->MinorVersion >= '1') {
380 		/* Burst Read info */
381 		extra_size += 2;
382 		if (extp_size < sizeof(*extp) + extra_size)
383 			goto need_more;
384 		extra_size += extp->extra[extra_size - 1];
385 	}
386 
387 	if (extp->MinorVersion >= '3') {
388 		int nb_parts, i;
389 
390 		/* Number of hardware-partitions */
391 		extra_size += 1;
392 		if (extp_size < sizeof(*extp) + extra_size)
393 			goto need_more;
394 		nb_parts = extp->extra[extra_size - 1];
395 
396 		/* skip the sizeof(partregion) field in CFI 1.4 */
397 		if (extp->MinorVersion >= '4')
398 			extra_size += 2;
399 
400 		for (i = 0; i < nb_parts; i++) {
401 			struct cfi_intelext_regioninfo *rinfo;
402 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
403 			extra_size += sizeof(*rinfo);
404 			if (extp_size < sizeof(*extp) + extra_size)
405 				goto need_more;
406 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
407 			extra_size += (rinfo->NumBlockTypes - 1)
408 				      * sizeof(struct cfi_intelext_blockinfo);
409 		}
410 
411 		if (extp->MinorVersion >= '4')
412 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
413 
414 		if (extp_size < sizeof(*extp) + extra_size) {
415 			need_more:
416 			extp_size = sizeof(*extp) + extra_size;
417 			kfree(extp);
418 			if (extp_size > 4096) {
419 				printk(KERN_ERR
420 					"%s: cfi_pri_intelext is too fat\n",
421 					__func__);
422 				return NULL;
423 			}
424 			goto again;
425 		}
426 	}
427 
428 	return extp;
429 }
430 
431 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
432 {
433 	struct cfi_private *cfi = map->fldrv_priv;
434 	struct mtd_info *mtd;
435 	int i;
436 
437 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
438 	if (!mtd) {
439 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
440 		return NULL;
441 	}
442 	mtd->priv = map;
443 	mtd->type = MTD_NORFLASH;
444 
445 	/* Fill in the default mtd operations */
446 	mtd->erase   = cfi_intelext_erase_varsize;
447 	mtd->read    = cfi_intelext_read;
448 	mtd->write   = cfi_intelext_write_words;
449 	mtd->sync    = cfi_intelext_sync;
450 	mtd->lock    = cfi_intelext_lock;
451 	mtd->unlock  = cfi_intelext_unlock;
452 	mtd->is_locked = cfi_intelext_is_locked;
453 	mtd->suspend = cfi_intelext_suspend;
454 	mtd->resume  = cfi_intelext_resume;
455 	mtd->flags   = MTD_CAP_NORFLASH;
456 	mtd->name    = map->name;
457 	mtd->writesize = 1;
458 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
459 
460 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
461 
462 	if (cfi->cfi_mode == CFI_MODE_CFI) {
463 		/*
464 		 * It's a real CFI chip, not one for which the probe
465 		 * routine faked a CFI structure. So we read the feature
466 		 * table from it.
467 		 */
468 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
469 		struct cfi_pri_intelext *extp;
470 
471 		extp = read_pri_intelext(map, adr);
472 		if (!extp) {
473 			kfree(mtd);
474 			return NULL;
475 		}
476 
477 		/* Install our own private info structure */
478 		cfi->cmdset_priv = extp;
479 
480 		cfi_fixup(mtd, cfi_fixup_table);
481 
482 #ifdef DEBUG_CFI_FEATURES
483 		/* Tell the user about it in lots of lovely detail */
484 		cfi_tell_features(extp);
485 #endif
486 
487 		if(extp->SuspendCmdSupport & 1) {
488 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
489 		}
490 	}
491 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
492 		/* Apply jedec specific fixups */
493 		cfi_fixup(mtd, jedec_fixup_table);
494 	}
495 	/* Apply generic fixups */
496 	cfi_fixup(mtd, fixup_table);
497 
498 	for (i=0; i< cfi->numchips; i++) {
499 		if (cfi->cfiq->WordWriteTimeoutTyp)
500 			cfi->chips[i].word_write_time =
501 				1<<cfi->cfiq->WordWriteTimeoutTyp;
502 		else
503 			cfi->chips[i].word_write_time = 50000;
504 
505 		if (cfi->cfiq->BufWriteTimeoutTyp)
506 			cfi->chips[i].buffer_write_time =
507 				1<<cfi->cfiq->BufWriteTimeoutTyp;
508 		/* No default; if it isn't specified, we won't use it */
509 
510 		if (cfi->cfiq->BlockEraseTimeoutTyp)
511 			cfi->chips[i].erase_time =
512 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
513 		else
514 			cfi->chips[i].erase_time = 2000000;
515 
516 		if (cfi->cfiq->WordWriteTimeoutTyp &&
517 		    cfi->cfiq->WordWriteTimeoutMax)
518 			cfi->chips[i].word_write_time_max =
519 				1<<(cfi->cfiq->WordWriteTimeoutTyp +
520 				    cfi->cfiq->WordWriteTimeoutMax);
521 		else
522 			cfi->chips[i].word_write_time_max = 50000 * 8;
523 
524 		if (cfi->cfiq->BufWriteTimeoutTyp &&
525 		    cfi->cfiq->BufWriteTimeoutMax)
526 			cfi->chips[i].buffer_write_time_max =
527 				1<<(cfi->cfiq->BufWriteTimeoutTyp +
528 				    cfi->cfiq->BufWriteTimeoutMax);
529 
530 		if (cfi->cfiq->BlockEraseTimeoutTyp &&
531 		    cfi->cfiq->BlockEraseTimeoutMax)
532 			cfi->chips[i].erase_time_max =
533 				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
534 				       cfi->cfiq->BlockEraseTimeoutMax);
535 		else
536 			cfi->chips[i].erase_time_max = 2000000 * 8;
537 
538 		cfi->chips[i].ref_point_counter = 0;
539 		init_waitqueue_head(&(cfi->chips[i].wq));
540 	}
541 
542 	map->fldrv = &cfi_intelext_chipdrv;
543 
544 	return cfi_intelext_setup(mtd);
545 }
546 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
547 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
548 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
549 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
550 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
551 
552 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
553 {
554 	struct map_info *map = mtd->priv;
555 	struct cfi_private *cfi = map->fldrv_priv;
556 	unsigned long offset = 0;
557 	int i,j;
558 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
559 
560 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
561 
562 	mtd->size = devsize * cfi->numchips;
563 
564 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
565 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
566 			* mtd->numeraseregions, GFP_KERNEL);
567 	if (!mtd->eraseregions) {
568 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
569 		goto setup_err;
570 	}
571 
572 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
573 		unsigned long ernum, ersize;
574 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
575 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
576 
577 		if (mtd->erasesize < ersize) {
578 			mtd->erasesize = ersize;
579 		}
580 		for (j=0; j<cfi->numchips; j++) {
581 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
582 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
583 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
584 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
585 		}
586 		offset += (ersize * ernum);
587 	}
588 
589 	if (offset != devsize) {
590 		/* Argh */
591 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
592 		goto setup_err;
593 	}
594 
595 	for (i=0; i<mtd->numeraseregions;i++){
596 		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
597 		       i,(unsigned long long)mtd->eraseregions[i].offset,
598 		       mtd->eraseregions[i].erasesize,
599 		       mtd->eraseregions[i].numblocks);
600 	}
601 
602 #ifdef CONFIG_MTD_OTP
603 	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
604 	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
605 	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
606 	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
607 	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
608 	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
609 #endif
610 
611 	/* This function has the potential to distort the reality
612 	   a bit and therefore should be called last. */
613 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
614 		goto setup_err;
615 
616 	__module_get(THIS_MODULE);
617 	register_reboot_notifier(&mtd->reboot_notifier);
618 	return mtd;
619 
620  setup_err:
621 	kfree(mtd->eraseregions);
622 	kfree(mtd);
623 	kfree(cfi->cmdset_priv);
624 	return NULL;
625 }
626 
627 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
628 					struct cfi_private **pcfi)
629 {
630 	struct map_info *map = mtd->priv;
631 	struct cfi_private *cfi = *pcfi;
632 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
633 
634 	/*
635 	 * Probing of multi-partition flash chips.
636 	 *
637 	 * To support multiple partitions when available, we simply arrange
638 	 * for each of them to have their own flchip structure even if they
639 	 * are on the same physical chip.  This means completely recreating
640 	 * a new cfi_private structure right here which is a blatent code
641 	 * layering violation, but this is still the least intrusive
642 	 * arrangement at this point. This can be rearranged in the future
643 	 * if someone feels motivated enough.  --nico
644 	 */
645 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
646 	    && extp->FeatureSupport & (1 << 9)) {
647 		struct cfi_private *newcfi;
648 		struct flchip *chip;
649 		struct flchip_shared *shared;
650 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
651 
652 		/* Protection Register info */
653 		offs = (extp->NumProtectionFields - 1) *
654 		       sizeof(struct cfi_intelext_otpinfo);
655 
656 		/* Burst Read info */
657 		offs += extp->extra[offs+1]+2;
658 
659 		/* Number of partition regions */
660 		numregions = extp->extra[offs];
661 		offs += 1;
662 
663 		/* skip the sizeof(partregion) field in CFI 1.4 */
664 		if (extp->MinorVersion >= '4')
665 			offs += 2;
666 
667 		/* Number of hardware partitions */
668 		numparts = 0;
669 		for (i = 0; i < numregions; i++) {
670 			struct cfi_intelext_regioninfo *rinfo;
671 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
672 			numparts += rinfo->NumIdentPartitions;
673 			offs += sizeof(*rinfo)
674 				+ (rinfo->NumBlockTypes - 1) *
675 				  sizeof(struct cfi_intelext_blockinfo);
676 		}
677 
678 		if (!numparts)
679 			numparts = 1;
680 
681 		/* Programming Region info */
682 		if (extp->MinorVersion >= '4') {
683 			struct cfi_intelext_programming_regioninfo *prinfo;
684 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
685 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
686 			mtd->flags &= ~MTD_BIT_WRITEABLE;
687 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
688 			       map->name, mtd->writesize,
689 			       cfi->interleave * prinfo->ControlValid,
690 			       cfi->interleave * prinfo->ControlInvalid);
691 		}
692 
693 		/*
694 		 * All functions below currently rely on all chips having
695 		 * the same geometry so we'll just assume that all hardware
696 		 * partitions are of the same size too.
697 		 */
698 		partshift = cfi->chipshift - __ffs(numparts);
699 
700 		if ((1 << partshift) < mtd->erasesize) {
701 			printk( KERN_ERR
702 				"%s: bad number of hw partitions (%d)\n",
703 				__func__, numparts);
704 			return -EINVAL;
705 		}
706 
707 		numvirtchips = cfi->numchips * numparts;
708 		newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
709 		if (!newcfi)
710 			return -ENOMEM;
711 		shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
712 		if (!shared) {
713 			kfree(newcfi);
714 			return -ENOMEM;
715 		}
716 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
717 		newcfi->numchips = numvirtchips;
718 		newcfi->chipshift = partshift;
719 
720 		chip = &newcfi->chips[0];
721 		for (i = 0; i < cfi->numchips; i++) {
722 			shared[i].writing = shared[i].erasing = NULL;
723 			mutex_init(&shared[i].lock);
724 			for (j = 0; j < numparts; j++) {
725 				*chip = cfi->chips[i];
726 				chip->start += j << partshift;
727 				chip->priv = &shared[i];
728 				/* those should be reset too since
729 				   they create memory references. */
730 				init_waitqueue_head(&chip->wq);
731 				mutex_init(&chip->mutex);
732 				chip++;
733 			}
734 		}
735 
736 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
737 				  "--> %d partitions of %d KiB\n",
738 				  map->name, cfi->numchips, cfi->interleave,
739 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
740 
741 		map->fldrv_priv = newcfi;
742 		*pcfi = newcfi;
743 		kfree(cfi);
744 	}
745 
746 	return 0;
747 }
748 
749 /*
750  *  *********** CHIP ACCESS FUNCTIONS ***********
751  */
752 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753 {
754 	DECLARE_WAITQUEUE(wait, current);
755 	struct cfi_private *cfi = map->fldrv_priv;
756 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
757 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
758 	unsigned long timeo = jiffies + HZ;
759 
760 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
761 	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
762 		goto sleep;
763 
764 	switch (chip->state) {
765 
766 	case FL_STATUS:
767 		for (;;) {
768 			status = map_read(map, adr);
769 			if (map_word_andequal(map, status, status_OK, status_OK))
770 				break;
771 
772 			/* At this point we're fine with write operations
773 			   in other partitions as they don't conflict. */
774 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
775 				break;
776 
777 			mutex_unlock(&chip->mutex);
778 			cfi_udelay(1);
779 			mutex_lock(&chip->mutex);
780 			/* Someone else might have been playing with it. */
781 			return -EAGAIN;
782 		}
783 		/* Fall through */
784 	case FL_READY:
785 	case FL_CFI_QUERY:
786 	case FL_JEDEC_QUERY:
787 		return 0;
788 
789 	case FL_ERASING:
790 		if (!cfip ||
791 		    !(cfip->FeatureSupport & 2) ||
792 		    !(mode == FL_READY || mode == FL_POINT ||
793 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
794 			goto sleep;
795 
796 
797 		/* Erase suspend */
798 		map_write(map, CMD(0xB0), adr);
799 
800 		/* If the flash has finished erasing, then 'erase suspend'
801 		 * appears to make some (28F320) flash devices switch to
802 		 * 'read' mode.  Make sure that we switch to 'read status'
803 		 * mode so we get the right data. --rmk
804 		 */
805 		map_write(map, CMD(0x70), adr);
806 		chip->oldstate = FL_ERASING;
807 		chip->state = FL_ERASE_SUSPENDING;
808 		chip->erase_suspended = 1;
809 		for (;;) {
810 			status = map_read(map, adr);
811 			if (map_word_andequal(map, status, status_OK, status_OK))
812 			        break;
813 
814 			if (time_after(jiffies, timeo)) {
815 				/* Urgh. Resume and pretend we weren't here.  */
816 				map_write(map, CMD(0xd0), adr);
817 				/* Make sure we're in 'read status' mode if it had finished */
818 				map_write(map, CMD(0x70), adr);
819 				chip->state = FL_ERASING;
820 				chip->oldstate = FL_READY;
821 				printk(KERN_ERR "%s: Chip not ready after erase "
822 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
823 				return -EIO;
824 			}
825 
826 			mutex_unlock(&chip->mutex);
827 			cfi_udelay(1);
828 			mutex_lock(&chip->mutex);
829 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
830 			   So we can just loop here. */
831 		}
832 		chip->state = FL_STATUS;
833 		return 0;
834 
835 	case FL_XIP_WHILE_ERASING:
836 		if (mode != FL_READY && mode != FL_POINT &&
837 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
838 			goto sleep;
839 		chip->oldstate = chip->state;
840 		chip->state = FL_READY;
841 		return 0;
842 
843 	case FL_SHUTDOWN:
844 		/* The machine is rebooting now,so no one can get chip anymore */
845 		return -EIO;
846 	case FL_POINT:
847 		/* Only if there's no operation suspended... */
848 		if (mode == FL_READY && chip->oldstate == FL_READY)
849 			return 0;
850 		/* Fall through */
851 	default:
852 	sleep:
853 		set_current_state(TASK_UNINTERRUPTIBLE);
854 		add_wait_queue(&chip->wq, &wait);
855 		mutex_unlock(&chip->mutex);
856 		schedule();
857 		remove_wait_queue(&chip->wq, &wait);
858 		mutex_lock(&chip->mutex);
859 		return -EAGAIN;
860 	}
861 }
862 
863 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
864 {
865 	int ret;
866 	DECLARE_WAITQUEUE(wait, current);
867 
868  retry:
869 	if (chip->priv &&
870 	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
871 	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
872 		/*
873 		 * OK. We have possibility for contention on the write/erase
874 		 * operations which are global to the real chip and not per
875 		 * partition.  So let's fight it over in the partition which
876 		 * currently has authority on the operation.
877 		 *
878 		 * The rules are as follows:
879 		 *
880 		 * - any write operation must own shared->writing.
881 		 *
882 		 * - any erase operation must own _both_ shared->writing and
883 		 *   shared->erasing.
884 		 *
885 		 * - contention arbitration is handled in the owner's context.
886 		 *
887 		 * The 'shared' struct can be read and/or written only when
888 		 * its lock is taken.
889 		 */
890 		struct flchip_shared *shared = chip->priv;
891 		struct flchip *contender;
892 		mutex_lock(&shared->lock);
893 		contender = shared->writing;
894 		if (contender && contender != chip) {
895 			/*
896 			 * The engine to perform desired operation on this
897 			 * partition is already in use by someone else.
898 			 * Let's fight over it in the context of the chip
899 			 * currently using it.  If it is possible to suspend,
900 			 * that other partition will do just that, otherwise
901 			 * it'll happily send us to sleep.  In any case, when
902 			 * get_chip returns success we're clear to go ahead.
903 			 */
904 			ret = mutex_trylock(&contender->mutex);
905 			mutex_unlock(&shared->lock);
906 			if (!ret)
907 				goto retry;
908 			mutex_unlock(&chip->mutex);
909 			ret = chip_ready(map, contender, contender->start, mode);
910 			mutex_lock(&chip->mutex);
911 
912 			if (ret == -EAGAIN) {
913 				mutex_unlock(&contender->mutex);
914 				goto retry;
915 			}
916 			if (ret) {
917 				mutex_unlock(&contender->mutex);
918 				return ret;
919 			}
920 			mutex_lock(&shared->lock);
921 
922 			/* We should not own chip if it is already
923 			 * in FL_SYNCING state. Put contender and retry. */
924 			if (chip->state == FL_SYNCING) {
925 				put_chip(map, contender, contender->start);
926 				mutex_unlock(&contender->mutex);
927 				goto retry;
928 			}
929 			mutex_unlock(&contender->mutex);
930 		}
931 
932 		/* Check if we already have suspended erase
933 		 * on this chip. Sleep. */
934 		if (mode == FL_ERASING && shared->erasing
935 		    && shared->erasing->oldstate == FL_ERASING) {
936 			mutex_unlock(&shared->lock);
937 			set_current_state(TASK_UNINTERRUPTIBLE);
938 			add_wait_queue(&chip->wq, &wait);
939 			mutex_unlock(&chip->mutex);
940 			schedule();
941 			remove_wait_queue(&chip->wq, &wait);
942 			mutex_lock(&chip->mutex);
943 			goto retry;
944 		}
945 
946 		/* We now own it */
947 		shared->writing = chip;
948 		if (mode == FL_ERASING)
949 			shared->erasing = chip;
950 		mutex_unlock(&shared->lock);
951 	}
952 	ret = chip_ready(map, chip, adr, mode);
953 	if (ret == -EAGAIN)
954 		goto retry;
955 
956 	return ret;
957 }
958 
959 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
960 {
961 	struct cfi_private *cfi = map->fldrv_priv;
962 
963 	if (chip->priv) {
964 		struct flchip_shared *shared = chip->priv;
965 		mutex_lock(&shared->lock);
966 		if (shared->writing == chip && chip->oldstate == FL_READY) {
967 			/* We own the ability to write, but we're done */
968 			shared->writing = shared->erasing;
969 			if (shared->writing && shared->writing != chip) {
970 				/* give back ownership to who we loaned it from */
971 				struct flchip *loaner = shared->writing;
972 				mutex_lock(&loaner->mutex);
973 				mutex_unlock(&shared->lock);
974 				mutex_unlock(&chip->mutex);
975 				put_chip(map, loaner, loaner->start);
976 				mutex_lock(&chip->mutex);
977 				mutex_unlock(&loaner->mutex);
978 				wake_up(&chip->wq);
979 				return;
980 			}
981 			shared->erasing = NULL;
982 			shared->writing = NULL;
983 		} else if (shared->erasing == chip && shared->writing != chip) {
984 			/*
985 			 * We own the ability to erase without the ability
986 			 * to write, which means the erase was suspended
987 			 * and some other partition is currently writing.
988 			 * Don't let the switch below mess things up since
989 			 * we don't have ownership to resume anything.
990 			 */
991 			mutex_unlock(&shared->lock);
992 			wake_up(&chip->wq);
993 			return;
994 		}
995 		mutex_unlock(&shared->lock);
996 	}
997 
998 	switch(chip->oldstate) {
999 	case FL_ERASING:
1000 		chip->state = chip->oldstate;
1001 		/* What if one interleaved chip has finished and the
1002 		   other hasn't? The old code would leave the finished
1003 		   one in READY mode. That's bad, and caused -EROFS
1004 		   errors to be returned from do_erase_oneblock because
1005 		   that's the only bit it checked for at the time.
1006 		   As the state machine appears to explicitly allow
1007 		   sending the 0x70 (Read Status) command to an erasing
1008 		   chip and expecting it to be ignored, that's what we
1009 		   do. */
1010 		map_write(map, CMD(0xd0), adr);
1011 		map_write(map, CMD(0x70), adr);
1012 		chip->oldstate = FL_READY;
1013 		chip->state = FL_ERASING;
1014 		break;
1015 
1016 	case FL_XIP_WHILE_ERASING:
1017 		chip->state = chip->oldstate;
1018 		chip->oldstate = FL_READY;
1019 		break;
1020 
1021 	case FL_READY:
1022 	case FL_STATUS:
1023 	case FL_JEDEC_QUERY:
1024 		/* We should really make set_vpp() count, rather than doing this */
1025 		DISABLE_VPP(map);
1026 		break;
1027 	default:
1028 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1029 	}
1030 	wake_up(&chip->wq);
1031 }
1032 
1033 #ifdef CONFIG_MTD_XIP
1034 
1035 /*
1036  * No interrupt what so ever can be serviced while the flash isn't in array
1037  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1038  * enclosing any code path where the flash is known not to be in array mode.
1039  * And within a XIP disabled code path, only functions marked with __xipram
1040  * may be called and nothing else (it's a good thing to inspect generated
1041  * assembly to make sure inline functions were actually inlined and that gcc
1042  * didn't emit calls to its own support functions). Also configuring MTD CFI
1043  * support to a single buswidth and a single interleave is also recommended.
1044  */
1045 
1046 static void xip_disable(struct map_info *map, struct flchip *chip,
1047 			unsigned long adr)
1048 {
1049 	/* TODO: chips with no XIP use should ignore and return */
1050 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1051 	local_irq_disable();
1052 }
1053 
1054 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1055 				unsigned long adr)
1056 {
1057 	struct cfi_private *cfi = map->fldrv_priv;
1058 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1059 		map_write(map, CMD(0xff), adr);
1060 		chip->state = FL_READY;
1061 	}
1062 	(void) map_read(map, adr);
1063 	xip_iprefetch();
1064 	local_irq_enable();
1065 }
1066 
1067 /*
1068  * When a delay is required for the flash operation to complete, the
1069  * xip_wait_for_operation() function is polling for both the given timeout
1070  * and pending (but still masked) hardware interrupts.  Whenever there is an
1071  * interrupt pending then the flash erase or write operation is suspended,
1072  * array mode restored and interrupts unmasked.  Task scheduling might also
1073  * happen at that point.  The CPU eventually returns from the interrupt or
1074  * the call to schedule() and the suspended flash operation is resumed for
1075  * the remaining of the delay period.
1076  *
1077  * Warning: this function _will_ fool interrupt latency tracing tools.
1078  */
1079 
1080 static int __xipram xip_wait_for_operation(
1081 		struct map_info *map, struct flchip *chip,
1082 		unsigned long adr, unsigned int chip_op_time_max)
1083 {
1084 	struct cfi_private *cfi = map->fldrv_priv;
1085 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1086 	map_word status, OK = CMD(0x80);
1087 	unsigned long usec, suspended, start, done;
1088 	flstate_t oldstate, newstate;
1089 
1090        	start = xip_currtime();
1091 	usec = chip_op_time_max;
1092 	if (usec == 0)
1093 		usec = 500000;
1094 	done = 0;
1095 
1096 	do {
1097 		cpu_relax();
1098 		if (xip_irqpending() && cfip &&
1099 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1100 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1101 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1102 			/*
1103 			 * Let's suspend the erase or write operation when
1104 			 * supported.  Note that we currently don't try to
1105 			 * suspend interleaved chips if there is already
1106 			 * another operation suspended (imagine what happens
1107 			 * when one chip was already done with the current
1108 			 * operation while another chip suspended it, then
1109 			 * we resume the whole thing at once).  Yes, it
1110 			 * can happen!
1111 			 */
1112 			usec -= done;
1113 			map_write(map, CMD(0xb0), adr);
1114 			map_write(map, CMD(0x70), adr);
1115 			suspended = xip_currtime();
1116 			do {
1117 				if (xip_elapsed_since(suspended) > 100000) {
1118 					/*
1119 					 * The chip doesn't want to suspend
1120 					 * after waiting for 100 msecs.
1121 					 * This is a critical error but there
1122 					 * is not much we can do here.
1123 					 */
1124 					return -EIO;
1125 				}
1126 				status = map_read(map, adr);
1127 			} while (!map_word_andequal(map, status, OK, OK));
1128 
1129 			/* Suspend succeeded */
1130 			oldstate = chip->state;
1131 			if (oldstate == FL_ERASING) {
1132 				if (!map_word_bitsset(map, status, CMD(0x40)))
1133 					break;
1134 				newstate = FL_XIP_WHILE_ERASING;
1135 				chip->erase_suspended = 1;
1136 			} else {
1137 				if (!map_word_bitsset(map, status, CMD(0x04)))
1138 					break;
1139 				newstate = FL_XIP_WHILE_WRITING;
1140 				chip->write_suspended = 1;
1141 			}
1142 			chip->state = newstate;
1143 			map_write(map, CMD(0xff), adr);
1144 			(void) map_read(map, adr);
1145 			xip_iprefetch();
1146 			local_irq_enable();
1147 			mutex_unlock(&chip->mutex);
1148 			xip_iprefetch();
1149 			cond_resched();
1150 
1151 			/*
1152 			 * We're back.  However someone else might have
1153 			 * decided to go write to the chip if we are in
1154 			 * a suspended erase state.  If so let's wait
1155 			 * until it's done.
1156 			 */
1157 			mutex_lock(&chip->mutex);
1158 			while (chip->state != newstate) {
1159 				DECLARE_WAITQUEUE(wait, current);
1160 				set_current_state(TASK_UNINTERRUPTIBLE);
1161 				add_wait_queue(&chip->wq, &wait);
1162 				mutex_unlock(&chip->mutex);
1163 				schedule();
1164 				remove_wait_queue(&chip->wq, &wait);
1165 				mutex_lock(&chip->mutex);
1166 			}
1167 			/* Disallow XIP again */
1168 			local_irq_disable();
1169 
1170 			/* Resume the write or erase operation */
1171 			map_write(map, CMD(0xd0), adr);
1172 			map_write(map, CMD(0x70), adr);
1173 			chip->state = oldstate;
1174 			start = xip_currtime();
1175 		} else if (usec >= 1000000/HZ) {
1176 			/*
1177 			 * Try to save on CPU power when waiting delay
1178 			 * is at least a system timer tick period.
1179 			 * No need to be extremely accurate here.
1180 			 */
1181 			xip_cpu_idle();
1182 		}
1183 		status = map_read(map, adr);
1184 		done = xip_elapsed_since(start);
1185 	} while (!map_word_andequal(map, status, OK, OK)
1186 		 && done < usec);
1187 
1188 	return (done >= usec) ? -ETIME : 0;
1189 }
1190 
1191 /*
1192  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1193  * the flash is actively programming or erasing since we have to poll for
1194  * the operation to complete anyway.  We can't do that in a generic way with
1195  * a XIP setup so do it before the actual flash operation in this case
1196  * and stub it out from INVAL_CACHE_AND_WAIT.
1197  */
1198 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1199 	INVALIDATE_CACHED_RANGE(map, from, size)
1200 
1201 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1202 	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1203 
1204 #else
1205 
1206 #define xip_disable(map, chip, adr)
1207 #define xip_enable(map, chip, adr)
1208 #define XIP_INVAL_CACHED_RANGE(x...)
1209 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1210 
1211 static int inval_cache_and_wait_for_operation(
1212 		struct map_info *map, struct flchip *chip,
1213 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1214 		unsigned int chip_op_time, unsigned int chip_op_time_max)
1215 {
1216 	struct cfi_private *cfi = map->fldrv_priv;
1217 	map_word status, status_OK = CMD(0x80);
1218 	int chip_state = chip->state;
1219 	unsigned int timeo, sleep_time, reset_timeo;
1220 
1221 	mutex_unlock(&chip->mutex);
1222 	if (inval_len)
1223 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1224 	mutex_lock(&chip->mutex);
1225 
1226 	timeo = chip_op_time_max;
1227 	if (!timeo)
1228 		timeo = 500000;
1229 	reset_timeo = timeo;
1230 	sleep_time = chip_op_time / 2;
1231 
1232 	for (;;) {
1233 		if (chip->state != chip_state) {
1234 			/* Someone's suspended the operation: sleep */
1235 			DECLARE_WAITQUEUE(wait, current);
1236 			set_current_state(TASK_UNINTERRUPTIBLE);
1237 			add_wait_queue(&chip->wq, &wait);
1238 			mutex_unlock(&chip->mutex);
1239 			schedule();
1240 			remove_wait_queue(&chip->wq, &wait);
1241 			mutex_lock(&chip->mutex);
1242 			continue;
1243 		}
1244 
1245 		status = map_read(map, cmd_adr);
1246 		if (map_word_andequal(map, status, status_OK, status_OK))
1247 			break;
1248 
1249 		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1250 			/* Erase suspend occurred while sleep: reset timeout */
1251 			timeo = reset_timeo;
1252 			chip->erase_suspended = 0;
1253 		}
1254 		if (chip->write_suspended && chip_state == FL_WRITING)  {
1255 			/* Write suspend occurred while sleep: reset timeout */
1256 			timeo = reset_timeo;
1257 			chip->write_suspended = 0;
1258 		}
1259 		if (!timeo) {
1260 			map_write(map, CMD(0x70), cmd_adr);
1261 			chip->state = FL_STATUS;
1262 			return -ETIME;
1263 		}
1264 
1265 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1266 		mutex_unlock(&chip->mutex);
1267 		if (sleep_time >= 1000000/HZ) {
1268 			/*
1269 			 * Half of the normal delay still remaining
1270 			 * can be performed with a sleeping delay instead
1271 			 * of busy waiting.
1272 			 */
1273 			msleep(sleep_time/1000);
1274 			timeo -= sleep_time;
1275 			sleep_time = 1000000/HZ;
1276 		} else {
1277 			udelay(1);
1278 			cond_resched();
1279 			timeo--;
1280 		}
1281 		mutex_lock(&chip->mutex);
1282 	}
1283 
1284 	/* Done and happy. */
1285  	chip->state = FL_STATUS;
1286 	return 0;
1287 }
1288 
1289 #endif
1290 
1291 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1292 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1293 
1294 
1295 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1296 {
1297 	unsigned long cmd_addr;
1298 	struct cfi_private *cfi = map->fldrv_priv;
1299 	int ret = 0;
1300 
1301 	adr += chip->start;
1302 
1303 	/* Ensure cmd read/writes are aligned. */
1304 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1305 
1306 	mutex_lock(&chip->mutex);
1307 
1308 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1309 
1310 	if (!ret) {
1311 		if (chip->state != FL_POINT && chip->state != FL_READY)
1312 			map_write(map, CMD(0xff), cmd_addr);
1313 
1314 		chip->state = FL_POINT;
1315 		chip->ref_point_counter++;
1316 	}
1317 	mutex_unlock(&chip->mutex);
1318 
1319 	return ret;
1320 }
1321 
1322 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1323 		size_t *retlen, void **virt, resource_size_t *phys)
1324 {
1325 	struct map_info *map = mtd->priv;
1326 	struct cfi_private *cfi = map->fldrv_priv;
1327 	unsigned long ofs, last_end = 0;
1328 	int chipnum;
1329 	int ret = 0;
1330 
1331 	if (!map->virt || (from + len > mtd->size))
1332 		return -EINVAL;
1333 
1334 	/* Now lock the chip(s) to POINT state */
1335 
1336 	/* ofs: offset within the first chip that the first read should start */
1337 	chipnum = (from >> cfi->chipshift);
1338 	ofs = from - (chipnum << cfi->chipshift);
1339 
1340 	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1341 	*retlen = 0;
1342 	if (phys)
1343 		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1344 
1345 	while (len) {
1346 		unsigned long thislen;
1347 
1348 		if (chipnum >= cfi->numchips)
1349 			break;
1350 
1351 		/* We cannot point across chips that are virtually disjoint */
1352 		if (!last_end)
1353 			last_end = cfi->chips[chipnum].start;
1354 		else if (cfi->chips[chipnum].start != last_end)
1355 			break;
1356 
1357 		if ((len + ofs -1) >> cfi->chipshift)
1358 			thislen = (1<<cfi->chipshift) - ofs;
1359 		else
1360 			thislen = len;
1361 
1362 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1363 		if (ret)
1364 			break;
1365 
1366 		*retlen += thislen;
1367 		len -= thislen;
1368 
1369 		ofs = 0;
1370 		last_end += 1 << cfi->chipshift;
1371 		chipnum++;
1372 	}
1373 	return 0;
1374 }
1375 
1376 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1377 {
1378 	struct map_info *map = mtd->priv;
1379 	struct cfi_private *cfi = map->fldrv_priv;
1380 	unsigned long ofs;
1381 	int chipnum;
1382 
1383 	/* Now unlock the chip(s) POINT state */
1384 
1385 	/* ofs: offset within the first chip that the first read should start */
1386 	chipnum = (from >> cfi->chipshift);
1387 	ofs = from - (chipnum <<  cfi->chipshift);
1388 
1389 	while (len) {
1390 		unsigned long thislen;
1391 		struct flchip *chip;
1392 
1393 		chip = &cfi->chips[chipnum];
1394 		if (chipnum >= cfi->numchips)
1395 			break;
1396 
1397 		if ((len + ofs -1) >> cfi->chipshift)
1398 			thislen = (1<<cfi->chipshift) - ofs;
1399 		else
1400 			thislen = len;
1401 
1402 		mutex_lock(&chip->mutex);
1403 		if (chip->state == FL_POINT) {
1404 			chip->ref_point_counter--;
1405 			if(chip->ref_point_counter == 0)
1406 				chip->state = FL_READY;
1407 		} else
1408 			printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1409 
1410 		put_chip(map, chip, chip->start);
1411 		mutex_unlock(&chip->mutex);
1412 
1413 		len -= thislen;
1414 		ofs = 0;
1415 		chipnum++;
1416 	}
1417 }
1418 
1419 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1420 {
1421 	unsigned long cmd_addr;
1422 	struct cfi_private *cfi = map->fldrv_priv;
1423 	int ret;
1424 
1425 	adr += chip->start;
1426 
1427 	/* Ensure cmd read/writes are aligned. */
1428 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1429 
1430 	mutex_lock(&chip->mutex);
1431 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1432 	if (ret) {
1433 		mutex_unlock(&chip->mutex);
1434 		return ret;
1435 	}
1436 
1437 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1438 		map_write(map, CMD(0xff), cmd_addr);
1439 
1440 		chip->state = FL_READY;
1441 	}
1442 
1443 	map_copy_from(map, buf, adr, len);
1444 
1445 	put_chip(map, chip, cmd_addr);
1446 
1447 	mutex_unlock(&chip->mutex);
1448 	return 0;
1449 }
1450 
1451 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1452 {
1453 	struct map_info *map = mtd->priv;
1454 	struct cfi_private *cfi = map->fldrv_priv;
1455 	unsigned long ofs;
1456 	int chipnum;
1457 	int ret = 0;
1458 
1459 	/* ofs: offset within the first chip that the first read should start */
1460 	chipnum = (from >> cfi->chipshift);
1461 	ofs = from - (chipnum <<  cfi->chipshift);
1462 
1463 	*retlen = 0;
1464 
1465 	while (len) {
1466 		unsigned long thislen;
1467 
1468 		if (chipnum >= cfi->numchips)
1469 			break;
1470 
1471 		if ((len + ofs -1) >> cfi->chipshift)
1472 			thislen = (1<<cfi->chipshift) - ofs;
1473 		else
1474 			thislen = len;
1475 
1476 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1477 		if (ret)
1478 			break;
1479 
1480 		*retlen += thislen;
1481 		len -= thislen;
1482 		buf += thislen;
1483 
1484 		ofs = 0;
1485 		chipnum++;
1486 	}
1487 	return ret;
1488 }
1489 
1490 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1491 				     unsigned long adr, map_word datum, int mode)
1492 {
1493 	struct cfi_private *cfi = map->fldrv_priv;
1494 	map_word status, write_cmd;
1495 	int ret=0;
1496 
1497 	adr += chip->start;
1498 
1499 	switch (mode) {
1500 	case FL_WRITING:
1501 		write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1502 		break;
1503 	case FL_OTP_WRITE:
1504 		write_cmd = CMD(0xc0);
1505 		break;
1506 	default:
1507 		return -EINVAL;
1508 	}
1509 
1510 	mutex_lock(&chip->mutex);
1511 	ret = get_chip(map, chip, adr, mode);
1512 	if (ret) {
1513 		mutex_unlock(&chip->mutex);
1514 		return ret;
1515 	}
1516 
1517 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1518 	ENABLE_VPP(map);
1519 	xip_disable(map, chip, adr);
1520 	map_write(map, write_cmd, adr);
1521 	map_write(map, datum, adr);
1522 	chip->state = mode;
1523 
1524 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1525 				   adr, map_bankwidth(map),
1526 				   chip->word_write_time,
1527 				   chip->word_write_time_max);
1528 	if (ret) {
1529 		xip_enable(map, chip, adr);
1530 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1531 		goto out;
1532 	}
1533 
1534 	/* check for errors */
1535 	status = map_read(map, adr);
1536 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1537 		unsigned long chipstatus = MERGESTATUS(status);
1538 
1539 		/* reset status */
1540 		map_write(map, CMD(0x50), adr);
1541 		map_write(map, CMD(0x70), adr);
1542 		xip_enable(map, chip, adr);
1543 
1544 		if (chipstatus & 0x02) {
1545 			ret = -EROFS;
1546 		} else if (chipstatus & 0x08) {
1547 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1548 			ret = -EIO;
1549 		} else {
1550 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1551 			ret = -EINVAL;
1552 		}
1553 
1554 		goto out;
1555 	}
1556 
1557 	xip_enable(map, chip, adr);
1558  out:	put_chip(map, chip, adr);
1559 	mutex_unlock(&chip->mutex);
1560 	return ret;
1561 }
1562 
1563 
1564 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1565 {
1566 	struct map_info *map = mtd->priv;
1567 	struct cfi_private *cfi = map->fldrv_priv;
1568 	int ret = 0;
1569 	int chipnum;
1570 	unsigned long ofs;
1571 
1572 	*retlen = 0;
1573 	if (!len)
1574 		return 0;
1575 
1576 	chipnum = to >> cfi->chipshift;
1577 	ofs = to  - (chipnum << cfi->chipshift);
1578 
1579 	/* If it's not bus-aligned, do the first byte write */
1580 	if (ofs & (map_bankwidth(map)-1)) {
1581 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1582 		int gap = ofs - bus_ofs;
1583 		int n;
1584 		map_word datum;
1585 
1586 		n = min_t(int, len, map_bankwidth(map)-gap);
1587 		datum = map_word_ff(map);
1588 		datum = map_word_load_partial(map, datum, buf, gap, n);
1589 
1590 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1591 					       bus_ofs, datum, FL_WRITING);
1592 		if (ret)
1593 			return ret;
1594 
1595 		len -= n;
1596 		ofs += n;
1597 		buf += n;
1598 		(*retlen) += n;
1599 
1600 		if (ofs >> cfi->chipshift) {
1601 			chipnum ++;
1602 			ofs = 0;
1603 			if (chipnum == cfi->numchips)
1604 				return 0;
1605 		}
1606 	}
1607 
1608 	while(len >= map_bankwidth(map)) {
1609 		map_word datum = map_word_load(map, buf);
1610 
1611 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1612 				       ofs, datum, FL_WRITING);
1613 		if (ret)
1614 			return ret;
1615 
1616 		ofs += map_bankwidth(map);
1617 		buf += map_bankwidth(map);
1618 		(*retlen) += map_bankwidth(map);
1619 		len -= map_bankwidth(map);
1620 
1621 		if (ofs >> cfi->chipshift) {
1622 			chipnum ++;
1623 			ofs = 0;
1624 			if (chipnum == cfi->numchips)
1625 				return 0;
1626 		}
1627 	}
1628 
1629 	if (len & (map_bankwidth(map)-1)) {
1630 		map_word datum;
1631 
1632 		datum = map_word_ff(map);
1633 		datum = map_word_load_partial(map, datum, buf, 0, len);
1634 
1635 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1636 				       ofs, datum, FL_WRITING);
1637 		if (ret)
1638 			return ret;
1639 
1640 		(*retlen) += len;
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 
1647 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1648 				    unsigned long adr, const struct kvec **pvec,
1649 				    unsigned long *pvec_seek, int len)
1650 {
1651 	struct cfi_private *cfi = map->fldrv_priv;
1652 	map_word status, write_cmd, datum;
1653 	unsigned long cmd_adr;
1654 	int ret, wbufsize, word_gap, words;
1655 	const struct kvec *vec;
1656 	unsigned long vec_seek;
1657 	unsigned long initial_adr;
1658 	int initial_len = len;
1659 
1660 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1661 	adr += chip->start;
1662 	initial_adr = adr;
1663 	cmd_adr = adr & ~(wbufsize-1);
1664 
1665 	/* Let's determine this according to the interleave only once */
1666 	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1667 
1668 	mutex_lock(&chip->mutex);
1669 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1670 	if (ret) {
1671 		mutex_unlock(&chip->mutex);
1672 		return ret;
1673 	}
1674 
1675 	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1676 	ENABLE_VPP(map);
1677 	xip_disable(map, chip, cmd_adr);
1678 
1679 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1680 	   [...], the device will not accept any more Write to Buffer commands".
1681 	   So we must check here and reset those bits if they're set. Otherwise
1682 	   we're just pissing in the wind */
1683 	if (chip->state != FL_STATUS) {
1684 		map_write(map, CMD(0x70), cmd_adr);
1685 		chip->state = FL_STATUS;
1686 	}
1687 	status = map_read(map, cmd_adr);
1688 	if (map_word_bitsset(map, status, CMD(0x30))) {
1689 		xip_enable(map, chip, cmd_adr);
1690 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1691 		xip_disable(map, chip, cmd_adr);
1692 		map_write(map, CMD(0x50), cmd_adr);
1693 		map_write(map, CMD(0x70), cmd_adr);
1694 	}
1695 
1696 	chip->state = FL_WRITING_TO_BUFFER;
1697 	map_write(map, write_cmd, cmd_adr);
1698 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1699 	if (ret) {
1700 		/* Argh. Not ready for write to buffer */
1701 		map_word Xstatus = map_read(map, cmd_adr);
1702 		map_write(map, CMD(0x70), cmd_adr);
1703 		chip->state = FL_STATUS;
1704 		status = map_read(map, cmd_adr);
1705 		map_write(map, CMD(0x50), cmd_adr);
1706 		map_write(map, CMD(0x70), cmd_adr);
1707 		xip_enable(map, chip, cmd_adr);
1708 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1709 				map->name, Xstatus.x[0], status.x[0]);
1710 		goto out;
1711 	}
1712 
1713 	/* Figure out the number of words to write */
1714 	word_gap = (-adr & (map_bankwidth(map)-1));
1715 	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1716 	if (!word_gap) {
1717 		words--;
1718 	} else {
1719 		word_gap = map_bankwidth(map) - word_gap;
1720 		adr -= word_gap;
1721 		datum = map_word_ff(map);
1722 	}
1723 
1724 	/* Write length of data to come */
1725 	map_write(map, CMD(words), cmd_adr );
1726 
1727 	/* Write data */
1728 	vec = *pvec;
1729 	vec_seek = *pvec_seek;
1730 	do {
1731 		int n = map_bankwidth(map) - word_gap;
1732 		if (n > vec->iov_len - vec_seek)
1733 			n = vec->iov_len - vec_seek;
1734 		if (n > len)
1735 			n = len;
1736 
1737 		if (!word_gap && len < map_bankwidth(map))
1738 			datum = map_word_ff(map);
1739 
1740 		datum = map_word_load_partial(map, datum,
1741 					      vec->iov_base + vec_seek,
1742 					      word_gap, n);
1743 
1744 		len -= n;
1745 		word_gap += n;
1746 		if (!len || word_gap == map_bankwidth(map)) {
1747 			map_write(map, datum, adr);
1748 			adr += map_bankwidth(map);
1749 			word_gap = 0;
1750 		}
1751 
1752 		vec_seek += n;
1753 		if (vec_seek == vec->iov_len) {
1754 			vec++;
1755 			vec_seek = 0;
1756 		}
1757 	} while (len);
1758 	*pvec = vec;
1759 	*pvec_seek = vec_seek;
1760 
1761 	/* GO GO GO */
1762 	map_write(map, CMD(0xd0), cmd_adr);
1763 	chip->state = FL_WRITING;
1764 
1765 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1766 				   initial_adr, initial_len,
1767 				   chip->buffer_write_time,
1768 				   chip->buffer_write_time_max);
1769 	if (ret) {
1770 		map_write(map, CMD(0x70), cmd_adr);
1771 		chip->state = FL_STATUS;
1772 		xip_enable(map, chip, cmd_adr);
1773 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1774 		goto out;
1775 	}
1776 
1777 	/* check for errors */
1778 	status = map_read(map, cmd_adr);
1779 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1780 		unsigned long chipstatus = MERGESTATUS(status);
1781 
1782 		/* reset status */
1783 		map_write(map, CMD(0x50), cmd_adr);
1784 		map_write(map, CMD(0x70), cmd_adr);
1785 		xip_enable(map, chip, cmd_adr);
1786 
1787 		if (chipstatus & 0x02) {
1788 			ret = -EROFS;
1789 		} else if (chipstatus & 0x08) {
1790 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1791 			ret = -EIO;
1792 		} else {
1793 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1794 			ret = -EINVAL;
1795 		}
1796 
1797 		goto out;
1798 	}
1799 
1800 	xip_enable(map, chip, cmd_adr);
1801  out:	put_chip(map, chip, cmd_adr);
1802 	mutex_unlock(&chip->mutex);
1803 	return ret;
1804 }
1805 
1806 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1807 				unsigned long count, loff_t to, size_t *retlen)
1808 {
1809 	struct map_info *map = mtd->priv;
1810 	struct cfi_private *cfi = map->fldrv_priv;
1811 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1812 	int ret = 0;
1813 	int chipnum;
1814 	unsigned long ofs, vec_seek, i;
1815 	size_t len = 0;
1816 
1817 	for (i = 0; i < count; i++)
1818 		len += vecs[i].iov_len;
1819 
1820 	*retlen = 0;
1821 	if (!len)
1822 		return 0;
1823 
1824 	chipnum = to >> cfi->chipshift;
1825 	ofs = to - (chipnum << cfi->chipshift);
1826 	vec_seek = 0;
1827 
1828 	do {
1829 		/* We must not cross write block boundaries */
1830 		int size = wbufsize - (ofs & (wbufsize-1));
1831 
1832 		if (size > len)
1833 			size = len;
1834 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1835 				      ofs, &vecs, &vec_seek, size);
1836 		if (ret)
1837 			return ret;
1838 
1839 		ofs += size;
1840 		(*retlen) += size;
1841 		len -= size;
1842 
1843 		if (ofs >> cfi->chipshift) {
1844 			chipnum ++;
1845 			ofs = 0;
1846 			if (chipnum == cfi->numchips)
1847 				return 0;
1848 		}
1849 
1850 		/* Be nice and reschedule with the chip in a usable state for other
1851 		   processes. */
1852 		cond_resched();
1853 
1854 	} while (len);
1855 
1856 	return 0;
1857 }
1858 
1859 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1860 				       size_t len, size_t *retlen, const u_char *buf)
1861 {
1862 	struct kvec vec;
1863 
1864 	vec.iov_base = (void *) buf;
1865 	vec.iov_len = len;
1866 
1867 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1868 }
1869 
1870 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1871 				      unsigned long adr, int len, void *thunk)
1872 {
1873 	struct cfi_private *cfi = map->fldrv_priv;
1874 	map_word status;
1875 	int retries = 3;
1876 	int ret;
1877 
1878 	adr += chip->start;
1879 
1880  retry:
1881 	mutex_lock(&chip->mutex);
1882 	ret = get_chip(map, chip, adr, FL_ERASING);
1883 	if (ret) {
1884 		mutex_unlock(&chip->mutex);
1885 		return ret;
1886 	}
1887 
1888 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1889 	ENABLE_VPP(map);
1890 	xip_disable(map, chip, adr);
1891 
1892 	/* Clear the status register first */
1893 	map_write(map, CMD(0x50), adr);
1894 
1895 	/* Now erase */
1896 	map_write(map, CMD(0x20), adr);
1897 	map_write(map, CMD(0xD0), adr);
1898 	chip->state = FL_ERASING;
1899 	chip->erase_suspended = 0;
1900 
1901 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1902 				   adr, len,
1903 				   chip->erase_time,
1904 				   chip->erase_time_max);
1905 	if (ret) {
1906 		map_write(map, CMD(0x70), adr);
1907 		chip->state = FL_STATUS;
1908 		xip_enable(map, chip, adr);
1909 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1910 		goto out;
1911 	}
1912 
1913 	/* We've broken this before. It doesn't hurt to be safe */
1914 	map_write(map, CMD(0x70), adr);
1915 	chip->state = FL_STATUS;
1916 	status = map_read(map, adr);
1917 
1918 	/* check for errors */
1919 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1920 		unsigned long chipstatus = MERGESTATUS(status);
1921 
1922 		/* Reset the error bits */
1923 		map_write(map, CMD(0x50), adr);
1924 		map_write(map, CMD(0x70), adr);
1925 		xip_enable(map, chip, adr);
1926 
1927 		if ((chipstatus & 0x30) == 0x30) {
1928 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1929 			ret = -EINVAL;
1930 		} else if (chipstatus & 0x02) {
1931 			/* Protection bit set */
1932 			ret = -EROFS;
1933 		} else if (chipstatus & 0x8) {
1934 			/* Voltage */
1935 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1936 			ret = -EIO;
1937 		} else if (chipstatus & 0x20 && retries--) {
1938 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1939 			put_chip(map, chip, adr);
1940 			mutex_unlock(&chip->mutex);
1941 			goto retry;
1942 		} else {
1943 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1944 			ret = -EIO;
1945 		}
1946 
1947 		goto out;
1948 	}
1949 
1950 	xip_enable(map, chip, adr);
1951  out:	put_chip(map, chip, adr);
1952 	mutex_unlock(&chip->mutex);
1953 	return ret;
1954 }
1955 
1956 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1957 {
1958 	unsigned long ofs, len;
1959 	int ret;
1960 
1961 	ofs = instr->addr;
1962 	len = instr->len;
1963 
1964 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1965 	if (ret)
1966 		return ret;
1967 
1968 	instr->state = MTD_ERASE_DONE;
1969 	mtd_erase_callback(instr);
1970 
1971 	return 0;
1972 }
1973 
1974 static void cfi_intelext_sync (struct mtd_info *mtd)
1975 {
1976 	struct map_info *map = mtd->priv;
1977 	struct cfi_private *cfi = map->fldrv_priv;
1978 	int i;
1979 	struct flchip *chip;
1980 	int ret = 0;
1981 
1982 	for (i=0; !ret && i<cfi->numchips; i++) {
1983 		chip = &cfi->chips[i];
1984 
1985 		mutex_lock(&chip->mutex);
1986 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
1987 
1988 		if (!ret) {
1989 			chip->oldstate = chip->state;
1990 			chip->state = FL_SYNCING;
1991 			/* No need to wake_up() on this state change -
1992 			 * as the whole point is that nobody can do anything
1993 			 * with the chip now anyway.
1994 			 */
1995 		}
1996 		mutex_unlock(&chip->mutex);
1997 	}
1998 
1999 	/* Unlock the chips again */
2000 
2001 	for (i--; i >=0; i--) {
2002 		chip = &cfi->chips[i];
2003 
2004 		mutex_lock(&chip->mutex);
2005 
2006 		if (chip->state == FL_SYNCING) {
2007 			chip->state = chip->oldstate;
2008 			chip->oldstate = FL_READY;
2009 			wake_up(&chip->wq);
2010 		}
2011 		mutex_unlock(&chip->mutex);
2012 	}
2013 }
2014 
2015 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2016 						struct flchip *chip,
2017 						unsigned long adr,
2018 						int len, void *thunk)
2019 {
2020 	struct cfi_private *cfi = map->fldrv_priv;
2021 	int status, ofs_factor = cfi->interleave * cfi->device_type;
2022 
2023 	adr += chip->start;
2024 	xip_disable(map, chip, adr+(2*ofs_factor));
2025 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2026 	chip->state = FL_JEDEC_QUERY;
2027 	status = cfi_read_query(map, adr+(2*ofs_factor));
2028 	xip_enable(map, chip, 0);
2029 	return status;
2030 }
2031 
2032 #ifdef DEBUG_LOCK_BITS
2033 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2034 						struct flchip *chip,
2035 						unsigned long adr,
2036 						int len, void *thunk)
2037 {
2038 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2039 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2040 	return 0;
2041 }
2042 #endif
2043 
2044 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2045 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2046 
2047 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2048 				       unsigned long adr, int len, void *thunk)
2049 {
2050 	struct cfi_private *cfi = map->fldrv_priv;
2051 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2052 	int udelay;
2053 	int ret;
2054 
2055 	adr += chip->start;
2056 
2057 	mutex_lock(&chip->mutex);
2058 	ret = get_chip(map, chip, adr, FL_LOCKING);
2059 	if (ret) {
2060 		mutex_unlock(&chip->mutex);
2061 		return ret;
2062 	}
2063 
2064 	ENABLE_VPP(map);
2065 	xip_disable(map, chip, adr);
2066 
2067 	map_write(map, CMD(0x60), adr);
2068 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2069 		map_write(map, CMD(0x01), adr);
2070 		chip->state = FL_LOCKING;
2071 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2072 		map_write(map, CMD(0xD0), adr);
2073 		chip->state = FL_UNLOCKING;
2074 	} else
2075 		BUG();
2076 
2077 	/*
2078 	 * If Instant Individual Block Locking supported then no need
2079 	 * to delay.
2080 	 */
2081 	udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2082 
2083 	ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2084 	if (ret) {
2085 		map_write(map, CMD(0x70), adr);
2086 		chip->state = FL_STATUS;
2087 		xip_enable(map, chip, adr);
2088 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2089 		goto out;
2090 	}
2091 
2092 	xip_enable(map, chip, adr);
2093 out:	put_chip(map, chip, adr);
2094 	mutex_unlock(&chip->mutex);
2095 	return ret;
2096 }
2097 
2098 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2099 {
2100 	int ret;
2101 
2102 #ifdef DEBUG_LOCK_BITS
2103 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2104 	       __func__, ofs, len);
2105 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2106 		ofs, len, NULL);
2107 #endif
2108 
2109 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2110 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2111 
2112 #ifdef DEBUG_LOCK_BITS
2113 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2114 	       __func__, ret);
2115 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2116 		ofs, len, NULL);
2117 #endif
2118 
2119 	return ret;
2120 }
2121 
2122 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2123 {
2124 	int ret;
2125 
2126 #ifdef DEBUG_LOCK_BITS
2127 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2128 	       __func__, ofs, len);
2129 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2130 		ofs, len, NULL);
2131 #endif
2132 
2133 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2134 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2135 
2136 #ifdef DEBUG_LOCK_BITS
2137 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2138 	       __func__, ret);
2139 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2140 		ofs, len, NULL);
2141 #endif
2142 
2143 	return ret;
2144 }
2145 
2146 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2147 				  uint64_t len)
2148 {
2149 	return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2150 				ofs, len, NULL) ? 1 : 0;
2151 }
2152 
2153 #ifdef CONFIG_MTD_OTP
2154 
2155 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2156 			u_long data_offset, u_char *buf, u_int size,
2157 			u_long prot_offset, u_int groupno, u_int groupsize);
2158 
2159 static int __xipram
2160 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2161 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2162 {
2163 	struct cfi_private *cfi = map->fldrv_priv;
2164 	int ret;
2165 
2166 	mutex_lock(&chip->mutex);
2167 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2168 	if (ret) {
2169 		mutex_unlock(&chip->mutex);
2170 		return ret;
2171 	}
2172 
2173 	/* let's ensure we're not reading back cached data from array mode */
2174 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2175 
2176 	xip_disable(map, chip, chip->start);
2177 	if (chip->state != FL_JEDEC_QUERY) {
2178 		map_write(map, CMD(0x90), chip->start);
2179 		chip->state = FL_JEDEC_QUERY;
2180 	}
2181 	map_copy_from(map, buf, chip->start + offset, size);
2182 	xip_enable(map, chip, chip->start);
2183 
2184 	/* then ensure we don't keep OTP data in the cache */
2185 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2186 
2187 	put_chip(map, chip, chip->start);
2188 	mutex_unlock(&chip->mutex);
2189 	return 0;
2190 }
2191 
2192 static int
2193 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2194 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2195 {
2196 	int ret;
2197 
2198 	while (size) {
2199 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2200 		int gap = offset - bus_ofs;
2201 		int n = min_t(int, size, map_bankwidth(map)-gap);
2202 		map_word datum = map_word_ff(map);
2203 
2204 		datum = map_word_load_partial(map, datum, buf, gap, n);
2205 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2206 		if (ret)
2207 			return ret;
2208 
2209 		offset += n;
2210 		buf += n;
2211 		size -= n;
2212 	}
2213 
2214 	return 0;
2215 }
2216 
2217 static int
2218 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2219 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2220 {
2221 	struct cfi_private *cfi = map->fldrv_priv;
2222 	map_word datum;
2223 
2224 	/* make sure area matches group boundaries */
2225 	if (size != grpsz)
2226 		return -EXDEV;
2227 
2228 	datum = map_word_ff(map);
2229 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2230 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2231 }
2232 
2233 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2234 				 size_t *retlen, u_char *buf,
2235 				 otp_op_t action, int user_regs)
2236 {
2237 	struct map_info *map = mtd->priv;
2238 	struct cfi_private *cfi = map->fldrv_priv;
2239 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2240 	struct flchip *chip;
2241 	struct cfi_intelext_otpinfo *otp;
2242 	u_long devsize, reg_prot_offset, data_offset;
2243 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2244 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2245 	int ret;
2246 
2247 	*retlen = 0;
2248 
2249 	/* Check that we actually have some OTP registers */
2250 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2251 		return -ENODATA;
2252 
2253 	/* we need real chips here not virtual ones */
2254 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2255 	chip_step = devsize >> cfi->chipshift;
2256 	chip_num = 0;
2257 
2258 	/* Some chips have OTP located in the _top_ partition only.
2259 	   For example: Intel 28F256L18T (T means top-parameter device) */
2260 	if (cfi->mfr == CFI_MFR_INTEL) {
2261 		switch (cfi->id) {
2262 		case 0x880b:
2263 		case 0x880c:
2264 		case 0x880d:
2265 			chip_num = chip_step - 1;
2266 		}
2267 	}
2268 
2269 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2270 		chip = &cfi->chips[chip_num];
2271 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2272 
2273 		/* first OTP region */
2274 		field = 0;
2275 		reg_prot_offset = extp->ProtRegAddr;
2276 		reg_fact_groups = 1;
2277 		reg_fact_size = 1 << extp->FactProtRegSize;
2278 		reg_user_groups = 1;
2279 		reg_user_size = 1 << extp->UserProtRegSize;
2280 
2281 		while (len > 0) {
2282 			/* flash geometry fixup */
2283 			data_offset = reg_prot_offset + 1;
2284 			data_offset *= cfi->interleave * cfi->device_type;
2285 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2286 			reg_fact_size *= cfi->interleave;
2287 			reg_user_size *= cfi->interleave;
2288 
2289 			if (user_regs) {
2290 				groups = reg_user_groups;
2291 				groupsize = reg_user_size;
2292 				/* skip over factory reg area */
2293 				groupno = reg_fact_groups;
2294 				data_offset += reg_fact_groups * reg_fact_size;
2295 			} else {
2296 				groups = reg_fact_groups;
2297 				groupsize = reg_fact_size;
2298 				groupno = 0;
2299 			}
2300 
2301 			while (len > 0 && groups > 0) {
2302 				if (!action) {
2303 					/*
2304 					 * Special case: if action is NULL
2305 					 * we fill buf with otp_info records.
2306 					 */
2307 					struct otp_info *otpinfo;
2308 					map_word lockword;
2309 					len -= sizeof(struct otp_info);
2310 					if (len <= 0)
2311 						return -ENOSPC;
2312 					ret = do_otp_read(map, chip,
2313 							  reg_prot_offset,
2314 							  (u_char *)&lockword,
2315 							  map_bankwidth(map),
2316 							  0, 0,  0);
2317 					if (ret)
2318 						return ret;
2319 					otpinfo = (struct otp_info *)buf;
2320 					otpinfo->start = from;
2321 					otpinfo->length = groupsize;
2322 					otpinfo->locked =
2323 					   !map_word_bitsset(map, lockword,
2324 							     CMD(1 << groupno));
2325 					from += groupsize;
2326 					buf += sizeof(*otpinfo);
2327 					*retlen += sizeof(*otpinfo);
2328 				} else if (from >= groupsize) {
2329 					from -= groupsize;
2330 					data_offset += groupsize;
2331 				} else {
2332 					int size = groupsize;
2333 					data_offset += from;
2334 					size -= from;
2335 					from = 0;
2336 					if (size > len)
2337 						size = len;
2338 					ret = action(map, chip, data_offset,
2339 						     buf, size, reg_prot_offset,
2340 						     groupno, groupsize);
2341 					if (ret < 0)
2342 						return ret;
2343 					buf += size;
2344 					len -= size;
2345 					*retlen += size;
2346 					data_offset += size;
2347 				}
2348 				groupno++;
2349 				groups--;
2350 			}
2351 
2352 			/* next OTP region */
2353 			if (++field == extp->NumProtectionFields)
2354 				break;
2355 			reg_prot_offset = otp->ProtRegAddr;
2356 			reg_fact_groups = otp->FactGroups;
2357 			reg_fact_size = 1 << otp->FactProtRegSize;
2358 			reg_user_groups = otp->UserGroups;
2359 			reg_user_size = 1 << otp->UserProtRegSize;
2360 			otp++;
2361 		}
2362 	}
2363 
2364 	return 0;
2365 }
2366 
2367 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2368 					   size_t len, size_t *retlen,
2369 					    u_char *buf)
2370 {
2371 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2372 				     buf, do_otp_read, 0);
2373 }
2374 
2375 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2376 					   size_t len, size_t *retlen,
2377 					    u_char *buf)
2378 {
2379 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2380 				     buf, do_otp_read, 1);
2381 }
2382 
2383 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2384 					    size_t len, size_t *retlen,
2385 					     u_char *buf)
2386 {
2387 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2388 				     buf, do_otp_write, 1);
2389 }
2390 
2391 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2392 					   loff_t from, size_t len)
2393 {
2394 	size_t retlen;
2395 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2396 				     NULL, do_otp_lock, 1);
2397 }
2398 
2399 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2400 					   struct otp_info *buf, size_t len)
2401 {
2402 	size_t retlen;
2403 	int ret;
2404 
2405 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2406 	return ret ? : retlen;
2407 }
2408 
2409 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2410 					   struct otp_info *buf, size_t len)
2411 {
2412 	size_t retlen;
2413 	int ret;
2414 
2415 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2416 	return ret ? : retlen;
2417 }
2418 
2419 #endif
2420 
2421 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2422 {
2423 	struct mtd_erase_region_info *region;
2424 	int block, status, i;
2425 	unsigned long adr;
2426 	size_t len;
2427 
2428 	for (i = 0; i < mtd->numeraseregions; i++) {
2429 		region = &mtd->eraseregions[i];
2430 		if (!region->lockmap)
2431 			continue;
2432 
2433 		for (block = 0; block < region->numblocks; block++){
2434 			len = region->erasesize;
2435 			adr = region->offset + block * len;
2436 
2437 			status = cfi_varsize_frob(mtd,
2438 					do_getlockstatus_oneblock, adr, len, NULL);
2439 			if (status)
2440 				set_bit(block, region->lockmap);
2441 			else
2442 				clear_bit(block, region->lockmap);
2443 		}
2444 	}
2445 }
2446 
2447 static int cfi_intelext_suspend(struct mtd_info *mtd)
2448 {
2449 	struct map_info *map = mtd->priv;
2450 	struct cfi_private *cfi = map->fldrv_priv;
2451 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2452 	int i;
2453 	struct flchip *chip;
2454 	int ret = 0;
2455 
2456 	if ((mtd->flags & MTD_POWERUP_LOCK)
2457 	    && extp && (extp->FeatureSupport & (1 << 5)))
2458 		cfi_intelext_save_locks(mtd);
2459 
2460 	for (i=0; !ret && i<cfi->numchips; i++) {
2461 		chip = &cfi->chips[i];
2462 
2463 		mutex_lock(&chip->mutex);
2464 
2465 		switch (chip->state) {
2466 		case FL_READY:
2467 		case FL_STATUS:
2468 		case FL_CFI_QUERY:
2469 		case FL_JEDEC_QUERY:
2470 			if (chip->oldstate == FL_READY) {
2471 				/* place the chip in a known state before suspend */
2472 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2473 				chip->oldstate = chip->state;
2474 				chip->state = FL_PM_SUSPENDED;
2475 				/* No need to wake_up() on this state change -
2476 				 * as the whole point is that nobody can do anything
2477 				 * with the chip now anyway.
2478 				 */
2479 			} else {
2480 				/* There seems to be an operation pending. We must wait for it. */
2481 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2482 				ret = -EAGAIN;
2483 			}
2484 			break;
2485 		default:
2486 			/* Should we actually wait? Once upon a time these routines weren't
2487 			   allowed to. Or should we return -EAGAIN, because the upper layers
2488 			   ought to have already shut down anything which was using the device
2489 			   anyway? The latter for now. */
2490 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2491 			ret = -EAGAIN;
2492 		case FL_PM_SUSPENDED:
2493 			break;
2494 		}
2495 		mutex_unlock(&chip->mutex);
2496 	}
2497 
2498 	/* Unlock the chips again */
2499 
2500 	if (ret) {
2501 		for (i--; i >=0; i--) {
2502 			chip = &cfi->chips[i];
2503 
2504 			mutex_lock(&chip->mutex);
2505 
2506 			if (chip->state == FL_PM_SUSPENDED) {
2507 				/* No need to force it into a known state here,
2508 				   because we're returning failure, and it didn't
2509 				   get power cycled */
2510 				chip->state = chip->oldstate;
2511 				chip->oldstate = FL_READY;
2512 				wake_up(&chip->wq);
2513 			}
2514 			mutex_unlock(&chip->mutex);
2515 		}
2516 	}
2517 
2518 	return ret;
2519 }
2520 
2521 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2522 {
2523 	struct mtd_erase_region_info *region;
2524 	int block, i;
2525 	unsigned long adr;
2526 	size_t len;
2527 
2528 	for (i = 0; i < mtd->numeraseregions; i++) {
2529 		region = &mtd->eraseregions[i];
2530 		if (!region->lockmap)
2531 			continue;
2532 
2533 		for (block = 0; block < region->numblocks; block++) {
2534 			len = region->erasesize;
2535 			adr = region->offset + block * len;
2536 
2537 			if (!test_bit(block, region->lockmap))
2538 				cfi_intelext_unlock(mtd, adr, len);
2539 		}
2540 	}
2541 }
2542 
2543 static void cfi_intelext_resume(struct mtd_info *mtd)
2544 {
2545 	struct map_info *map = mtd->priv;
2546 	struct cfi_private *cfi = map->fldrv_priv;
2547 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2548 	int i;
2549 	struct flchip *chip;
2550 
2551 	for (i=0; i<cfi->numchips; i++) {
2552 
2553 		chip = &cfi->chips[i];
2554 
2555 		mutex_lock(&chip->mutex);
2556 
2557 		/* Go to known state. Chip may have been power cycled */
2558 		if (chip->state == FL_PM_SUSPENDED) {
2559 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2560 			chip->oldstate = chip->state = FL_READY;
2561 			wake_up(&chip->wq);
2562 		}
2563 
2564 		mutex_unlock(&chip->mutex);
2565 	}
2566 
2567 	if ((mtd->flags & MTD_POWERUP_LOCK)
2568 	    && extp && (extp->FeatureSupport & (1 << 5)))
2569 		cfi_intelext_restore_locks(mtd);
2570 }
2571 
2572 static int cfi_intelext_reset(struct mtd_info *mtd)
2573 {
2574 	struct map_info *map = mtd->priv;
2575 	struct cfi_private *cfi = map->fldrv_priv;
2576 	int i, ret;
2577 
2578 	for (i=0; i < cfi->numchips; i++) {
2579 		struct flchip *chip = &cfi->chips[i];
2580 
2581 		/* force the completion of any ongoing operation
2582 		   and switch to array mode so any bootloader in
2583 		   flash is accessible for soft reboot. */
2584 		mutex_lock(&chip->mutex);
2585 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2586 		if (!ret) {
2587 			map_write(map, CMD(0xff), chip->start);
2588 			chip->state = FL_SHUTDOWN;
2589 			put_chip(map, chip, chip->start);
2590 		}
2591 		mutex_unlock(&chip->mutex);
2592 	}
2593 
2594 	return 0;
2595 }
2596 
2597 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2598 			       void *v)
2599 {
2600 	struct mtd_info *mtd;
2601 
2602 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2603 	cfi_intelext_reset(mtd);
2604 	return NOTIFY_DONE;
2605 }
2606 
2607 static void cfi_intelext_destroy(struct mtd_info *mtd)
2608 {
2609 	struct map_info *map = mtd->priv;
2610 	struct cfi_private *cfi = map->fldrv_priv;
2611 	struct mtd_erase_region_info *region;
2612 	int i;
2613 	cfi_intelext_reset(mtd);
2614 	unregister_reboot_notifier(&mtd->reboot_notifier);
2615 	kfree(cfi->cmdset_priv);
2616 	kfree(cfi->cfiq);
2617 	kfree(cfi->chips[0].priv);
2618 	kfree(cfi);
2619 	for (i = 0; i < mtd->numeraseregions; i++) {
2620 		region = &mtd->eraseregions[i];
2621 		if (region->lockmap)
2622 			kfree(region->lockmap);
2623 	}
2624 	kfree(mtd->eraseregions);
2625 }
2626 
2627 MODULE_LICENSE("GPL");
2628 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2629 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2630 MODULE_ALIAS("cfi_cmdset_0003");
2631 MODULE_ALIAS("cfi_cmdset_0200");
2632