1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Flash Interface support:
4  *   Intel Extended Vendor Command Set (ID 0x0001)
5  *
6  * (C) 2000 Red Hat.
7  *
8  *
9  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
10  * 	- completely revamped method functions so they are aware and
11  * 	  independent of the flash geometry (buswidth, interleave, etc.)
12  * 	- scalability vs code size is completely set at compile-time
13  * 	  (see include/linux/mtd/cfi.h for selection)
14  *	- optimized write buffer method
15  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
16  *	- reworked lock/unlock/erase support for var size flash
17  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
18  * 	- auto unlock sectors on resume for auto locking flash on power up
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38 
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44 
45 /* Intel chips */
46 #define I82802AB	0x00ad
47 #define I82802AC	0x00ac
48 #define PF38F4476	0x881c
49 #define M28F00AP30	0x8963
50 /* STMicroelectronics chips */
51 #define M50LPW080       0x002F
52 #define M50FLW080A	0x0080
53 #define M50FLW080B	0x0081
54 /* Atmel chips */
55 #define AT49BV640D	0x02de
56 #define AT49BV640DT	0x02db
57 /* Sharp chips */
58 #define LH28F640BFHE_PTTL90	0x00b0
59 #define LH28F640BFHE_PBTL90	0x00b1
60 #define LH28F640BFHE_PTTL70A	0x00b2
61 #define LH28F640BFHE_PBTL70A	0x00b3
62 
63 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
66 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
67 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
68 static void cfi_intelext_sync (struct mtd_info *);
69 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
71 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
72 				  uint64_t len);
73 #ifdef CONFIG_MTD_OTP
74 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
77 					    size_t *, const u_char *);
78 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
79 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
80 					   size_t *, struct otp_info *);
81 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
82 					   size_t *, struct otp_info *);
83 #endif
84 static int cfi_intelext_suspend (struct mtd_info *);
85 static void cfi_intelext_resume (struct mtd_info *);
86 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
87 
88 static void cfi_intelext_destroy(struct mtd_info *);
89 
90 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
91 
92 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
93 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
94 
95 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
96 		     size_t *retlen, void **virt, resource_size_t *phys);
97 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
98 
99 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
100 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
101 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
102 #include "fwh_lock.h"
103 
104 
105 
106 /*
107  *  *********** SETUP AND PROBE BITS  ***********
108  */
109 
110 static struct mtd_chip_driver cfi_intelext_chipdrv = {
111 	.probe		= NULL, /* Not usable directly */
112 	.destroy	= cfi_intelext_destroy,
113 	.name		= "cfi_cmdset_0001",
114 	.module		= THIS_MODULE
115 };
116 
117 /* #define DEBUG_LOCK_BITS */
118 /* #define DEBUG_CFI_FEATURES */
119 
120 #ifdef DEBUG_CFI_FEATURES
121 static void cfi_tell_features(struct cfi_pri_intelext *extp)
122 {
123 	int i;
124 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
125 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
126 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
127 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
128 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
129 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
130 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
131 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
132 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
133 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
134 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
135 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
136 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
137 	for (i=11; i<32; i++) {
138 		if (extp->FeatureSupport & (1<<i))
139 			printk("     - Unknown Bit %X:      supported\n", i);
140 	}
141 
142 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
143 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
144 	for (i=1; i<8; i++) {
145 		if (extp->SuspendCmdSupport & (1<<i))
146 			printk("     - Unknown Bit %X:               supported\n", i);
147 	}
148 
149 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
150 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
151 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
152 	for (i=2; i<3; i++) {
153 		if (extp->BlkStatusRegMask & (1<<i))
154 			printk("     - Unknown Bit %X Active: yes\n",i);
155 	}
156 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
157 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
158 	for (i=6; i<16; i++) {
159 		if (extp->BlkStatusRegMask & (1<<i))
160 			printk("     - Unknown Bit %X Active: yes\n",i);
161 	}
162 
163 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
164 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
165 	if (extp->VppOptimal)
166 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
167 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
168 }
169 #endif
170 
171 /* Atmel chips don't use the same PRI format as Intel chips */
172 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
173 {
174 	struct map_info *map = mtd->priv;
175 	struct cfi_private *cfi = map->fldrv_priv;
176 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
177 	struct cfi_pri_atmel atmel_pri;
178 	uint32_t features = 0;
179 
180 	/* Reverse byteswapping */
181 	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
182 	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
183 	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
184 
185 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
186 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
187 
188 	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
189 
190 	if (atmel_pri.Features & 0x01) /* chip erase supported */
191 		features |= (1<<0);
192 	if (atmel_pri.Features & 0x02) /* erase suspend supported */
193 		features |= (1<<1);
194 	if (atmel_pri.Features & 0x04) /* program suspend supported */
195 		features |= (1<<2);
196 	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
197 		features |= (1<<9);
198 	if (atmel_pri.Features & 0x20) /* page mode read supported */
199 		features |= (1<<7);
200 	if (atmel_pri.Features & 0x40) /* queued erase supported */
201 		features |= (1<<4);
202 	if (atmel_pri.Features & 0x80) /* Protection bits supported */
203 		features |= (1<<6);
204 
205 	extp->FeatureSupport = features;
206 
207 	/* burst write mode not supported */
208 	cfi->cfiq->BufWriteTimeoutTyp = 0;
209 	cfi->cfiq->BufWriteTimeoutMax = 0;
210 }
211 
212 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
213 {
214 	struct map_info *map = mtd->priv;
215 	struct cfi_private *cfi = map->fldrv_priv;
216 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
217 
218 	cfip->FeatureSupport |= (1 << 5);
219 	mtd->flags |= MTD_POWERUP_LOCK;
220 }
221 
222 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
223 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
224 static void fixup_intel_strataflash(struct mtd_info *mtd)
225 {
226 	struct map_info *map = mtd->priv;
227 	struct cfi_private *cfi = map->fldrv_priv;
228 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
229 
230 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
231 	                    "erase on write disabled.\n");
232 	extp->SuspendCmdSupport &= ~1;
233 }
234 #endif
235 
236 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
237 static void fixup_no_write_suspend(struct mtd_info *mtd)
238 {
239 	struct map_info *map = mtd->priv;
240 	struct cfi_private *cfi = map->fldrv_priv;
241 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
242 
243 	if (cfip && (cfip->FeatureSupport&4)) {
244 		cfip->FeatureSupport &= ~4;
245 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
246 	}
247 }
248 #endif
249 
250 static void fixup_st_m28w320ct(struct mtd_info *mtd)
251 {
252 	struct map_info *map = mtd->priv;
253 	struct cfi_private *cfi = map->fldrv_priv;
254 
255 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
256 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
257 }
258 
259 static void fixup_st_m28w320cb(struct mtd_info *mtd)
260 {
261 	struct map_info *map = mtd->priv;
262 	struct cfi_private *cfi = map->fldrv_priv;
263 
264 	/* Note this is done after the region info is endian swapped */
265 	cfi->cfiq->EraseRegionInfo[1] =
266 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
267 };
268 
269 static int is_LH28F640BF(struct cfi_private *cfi)
270 {
271 	/* Sharp LH28F640BF Family */
272 	if (cfi->mfr == CFI_MFR_SHARP && (
273 	    cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
274 	    cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
275 		return 1;
276 	return 0;
277 }
278 
279 static void fixup_LH28F640BF(struct mtd_info *mtd)
280 {
281 	struct map_info *map = mtd->priv;
282 	struct cfi_private *cfi = map->fldrv_priv;
283 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
284 
285 	/* Reset the Partition Configuration Register on LH28F640BF
286 	 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
287 	if (is_LH28F640BF(cfi)) {
288 		printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
289 		map_write(map, CMD(0x60), 0);
290 		map_write(map, CMD(0x04), 0);
291 
292 		/* We have set one single partition thus
293 		 * Simultaneous Operations are not allowed */
294 		printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
295 		extp->FeatureSupport &= ~512;
296 	}
297 }
298 
299 static void fixup_use_point(struct mtd_info *mtd)
300 {
301 	struct map_info *map = mtd->priv;
302 	if (!mtd->_point && map_is_linear(map)) {
303 		mtd->_point   = cfi_intelext_point;
304 		mtd->_unpoint = cfi_intelext_unpoint;
305 	}
306 }
307 
308 static void fixup_use_write_buffers(struct mtd_info *mtd)
309 {
310 	struct map_info *map = mtd->priv;
311 	struct cfi_private *cfi = map->fldrv_priv;
312 	if (cfi->cfiq->BufWriteTimeoutTyp) {
313 		printk(KERN_INFO "Using buffer write method\n" );
314 		mtd->_write = cfi_intelext_write_buffers;
315 		mtd->_writev = cfi_intelext_writev;
316 	}
317 }
318 
319 /*
320  * Some chips power-up with all sectors locked by default.
321  */
322 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
323 {
324 	struct map_info *map = mtd->priv;
325 	struct cfi_private *cfi = map->fldrv_priv;
326 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
327 
328 	if (cfip->FeatureSupport&32) {
329 		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
330 		mtd->flags |= MTD_POWERUP_LOCK;
331 	}
332 }
333 
334 static struct cfi_fixup cfi_fixup_table[] = {
335 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
336 	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
337 	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
338 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
339 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
340 #endif
341 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
342 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
343 #endif
344 #if !FORCE_WORD_WRITE
345 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
346 #endif
347 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
348 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
349 	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
350 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
351 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
352 	{ 0, 0, NULL }
353 };
354 
355 static struct cfi_fixup jedec_fixup_table[] = {
356 	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
357 	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
358 	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
359 	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
360 	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
361 	{ 0, 0, NULL }
362 };
363 static struct cfi_fixup fixup_table[] = {
364 	/* The CFI vendor ids and the JEDEC vendor IDs appear
365 	 * to be common.  It is like the devices id's are as
366 	 * well.  This table is to pick all cases where
367 	 * we know that is the case.
368 	 */
369 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
370 	{ 0, 0, NULL }
371 };
372 
373 static void cfi_fixup_major_minor(struct cfi_private *cfi,
374 						struct cfi_pri_intelext *extp)
375 {
376 	if (cfi->mfr == CFI_MFR_INTEL &&
377 			cfi->id == PF38F4476 && extp->MinorVersion == '3')
378 		extp->MinorVersion = '1';
379 }
380 
381 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
382 {
383 	/*
384 	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
385 	 * Erase Supend for their small Erase Blocks(0x8000)
386 	 */
387 	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
388 		return 1;
389 	return 0;
390 }
391 
392 static inline struct cfi_pri_intelext *
393 read_pri_intelext(struct map_info *map, __u16 adr)
394 {
395 	struct cfi_private *cfi = map->fldrv_priv;
396 	struct cfi_pri_intelext *extp;
397 	unsigned int extra_size = 0;
398 	unsigned int extp_size = sizeof(*extp);
399 
400  again:
401 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
402 	if (!extp)
403 		return NULL;
404 
405 	cfi_fixup_major_minor(cfi, extp);
406 
407 	if (extp->MajorVersion != '1' ||
408 	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
409 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
410 		       "version %c.%c.\n",  extp->MajorVersion,
411 		       extp->MinorVersion);
412 		kfree(extp);
413 		return NULL;
414 	}
415 
416 	/* Do some byteswapping if necessary */
417 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
418 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
419 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
420 
421 	if (extp->MinorVersion >= '0') {
422 		extra_size = 0;
423 
424 		/* Protection Register info */
425 		if (extp->NumProtectionFields)
426 			extra_size += (extp->NumProtectionFields - 1) *
427 				      sizeof(struct cfi_intelext_otpinfo);
428 	}
429 
430 	if (extp->MinorVersion >= '1') {
431 		/* Burst Read info */
432 		extra_size += 2;
433 		if (extp_size < sizeof(*extp) + extra_size)
434 			goto need_more;
435 		extra_size += extp->extra[extra_size - 1];
436 	}
437 
438 	if (extp->MinorVersion >= '3') {
439 		int nb_parts, i;
440 
441 		/* Number of hardware-partitions */
442 		extra_size += 1;
443 		if (extp_size < sizeof(*extp) + extra_size)
444 			goto need_more;
445 		nb_parts = extp->extra[extra_size - 1];
446 
447 		/* skip the sizeof(partregion) field in CFI 1.4 */
448 		if (extp->MinorVersion >= '4')
449 			extra_size += 2;
450 
451 		for (i = 0; i < nb_parts; i++) {
452 			struct cfi_intelext_regioninfo *rinfo;
453 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
454 			extra_size += sizeof(*rinfo);
455 			if (extp_size < sizeof(*extp) + extra_size)
456 				goto need_more;
457 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
458 			extra_size += (rinfo->NumBlockTypes - 1)
459 				      * sizeof(struct cfi_intelext_blockinfo);
460 		}
461 
462 		if (extp->MinorVersion >= '4')
463 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
464 
465 		if (extp_size < sizeof(*extp) + extra_size) {
466 			need_more:
467 			extp_size = sizeof(*extp) + extra_size;
468 			kfree(extp);
469 			if (extp_size > 4096) {
470 				printk(KERN_ERR
471 					"%s: cfi_pri_intelext is too fat\n",
472 					__func__);
473 				return NULL;
474 			}
475 			goto again;
476 		}
477 	}
478 
479 	return extp;
480 }
481 
482 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
483 {
484 	struct cfi_private *cfi = map->fldrv_priv;
485 	struct mtd_info *mtd;
486 	int i;
487 
488 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
489 	if (!mtd)
490 		return NULL;
491 	mtd->priv = map;
492 	mtd->type = MTD_NORFLASH;
493 
494 	/* Fill in the default mtd operations */
495 	mtd->_erase   = cfi_intelext_erase_varsize;
496 	mtd->_read    = cfi_intelext_read;
497 	mtd->_write   = cfi_intelext_write_words;
498 	mtd->_sync    = cfi_intelext_sync;
499 	mtd->_lock    = cfi_intelext_lock;
500 	mtd->_unlock  = cfi_intelext_unlock;
501 	mtd->_is_locked = cfi_intelext_is_locked;
502 	mtd->_suspend = cfi_intelext_suspend;
503 	mtd->_resume  = cfi_intelext_resume;
504 	mtd->flags   = MTD_CAP_NORFLASH;
505 	mtd->name    = map->name;
506 	mtd->writesize = 1;
507 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
508 
509 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
510 
511 	if (cfi->cfi_mode == CFI_MODE_CFI) {
512 		/*
513 		 * It's a real CFI chip, not one for which the probe
514 		 * routine faked a CFI structure. So we read the feature
515 		 * table from it.
516 		 */
517 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
518 		struct cfi_pri_intelext *extp;
519 
520 		extp = read_pri_intelext(map, adr);
521 		if (!extp) {
522 			kfree(mtd);
523 			return NULL;
524 		}
525 
526 		/* Install our own private info structure */
527 		cfi->cmdset_priv = extp;
528 
529 		cfi_fixup(mtd, cfi_fixup_table);
530 
531 #ifdef DEBUG_CFI_FEATURES
532 		/* Tell the user about it in lots of lovely detail */
533 		cfi_tell_features(extp);
534 #endif
535 
536 		if(extp->SuspendCmdSupport & 1) {
537 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
538 		}
539 	}
540 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
541 		/* Apply jedec specific fixups */
542 		cfi_fixup(mtd, jedec_fixup_table);
543 	}
544 	/* Apply generic fixups */
545 	cfi_fixup(mtd, fixup_table);
546 
547 	for (i=0; i< cfi->numchips; i++) {
548 		if (cfi->cfiq->WordWriteTimeoutTyp)
549 			cfi->chips[i].word_write_time =
550 				1<<cfi->cfiq->WordWriteTimeoutTyp;
551 		else
552 			cfi->chips[i].word_write_time = 50000;
553 
554 		if (cfi->cfiq->BufWriteTimeoutTyp)
555 			cfi->chips[i].buffer_write_time =
556 				1<<cfi->cfiq->BufWriteTimeoutTyp;
557 		/* No default; if it isn't specified, we won't use it */
558 
559 		if (cfi->cfiq->BlockEraseTimeoutTyp)
560 			cfi->chips[i].erase_time =
561 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
562 		else
563 			cfi->chips[i].erase_time = 2000000;
564 
565 		if (cfi->cfiq->WordWriteTimeoutTyp &&
566 		    cfi->cfiq->WordWriteTimeoutMax)
567 			cfi->chips[i].word_write_time_max =
568 				1<<(cfi->cfiq->WordWriteTimeoutTyp +
569 				    cfi->cfiq->WordWriteTimeoutMax);
570 		else
571 			cfi->chips[i].word_write_time_max = 50000 * 8;
572 
573 		if (cfi->cfiq->BufWriteTimeoutTyp &&
574 		    cfi->cfiq->BufWriteTimeoutMax)
575 			cfi->chips[i].buffer_write_time_max =
576 				1<<(cfi->cfiq->BufWriteTimeoutTyp +
577 				    cfi->cfiq->BufWriteTimeoutMax);
578 
579 		if (cfi->cfiq->BlockEraseTimeoutTyp &&
580 		    cfi->cfiq->BlockEraseTimeoutMax)
581 			cfi->chips[i].erase_time_max =
582 				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
583 				       cfi->cfiq->BlockEraseTimeoutMax);
584 		else
585 			cfi->chips[i].erase_time_max = 2000000 * 8;
586 
587 		cfi->chips[i].ref_point_counter = 0;
588 		init_waitqueue_head(&(cfi->chips[i].wq));
589 	}
590 
591 	map->fldrv = &cfi_intelext_chipdrv;
592 
593 	return cfi_intelext_setup(mtd);
594 }
595 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
596 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
597 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
598 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
599 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
600 
601 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
602 {
603 	struct map_info *map = mtd->priv;
604 	struct cfi_private *cfi = map->fldrv_priv;
605 	unsigned long offset = 0;
606 	int i,j;
607 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
608 
609 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
610 
611 	mtd->size = devsize * cfi->numchips;
612 
613 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
614 	mtd->eraseregions = kcalloc(mtd->numeraseregions,
615 				    sizeof(struct mtd_erase_region_info),
616 				    GFP_KERNEL);
617 	if (!mtd->eraseregions)
618 		goto setup_err;
619 
620 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
621 		unsigned long ernum, ersize;
622 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
623 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
624 
625 		if (mtd->erasesize < ersize) {
626 			mtd->erasesize = ersize;
627 		}
628 		for (j=0; j<cfi->numchips; j++) {
629 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
630 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
631 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
632 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
633 			if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
634 				goto setup_err;
635 		}
636 		offset += (ersize * ernum);
637 	}
638 
639 	if (offset != devsize) {
640 		/* Argh */
641 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
642 		goto setup_err;
643 	}
644 
645 	for (i=0; i<mtd->numeraseregions;i++){
646 		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
647 		       i,(unsigned long long)mtd->eraseregions[i].offset,
648 		       mtd->eraseregions[i].erasesize,
649 		       mtd->eraseregions[i].numblocks);
650 	}
651 
652 #ifdef CONFIG_MTD_OTP
653 	mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
654 	mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
655 	mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
656 	mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
657 	mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
658 	mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
659 #endif
660 
661 	/* This function has the potential to distort the reality
662 	   a bit and therefore should be called last. */
663 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
664 		goto setup_err;
665 
666 	__module_get(THIS_MODULE);
667 	register_reboot_notifier(&mtd->reboot_notifier);
668 	return mtd;
669 
670  setup_err:
671 	if (mtd->eraseregions)
672 		for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
673 			for (j=0; j<cfi->numchips; j++)
674 				kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
675 	kfree(mtd->eraseregions);
676 	kfree(mtd);
677 	kfree(cfi->cmdset_priv);
678 	return NULL;
679 }
680 
681 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
682 					struct cfi_private **pcfi)
683 {
684 	struct map_info *map = mtd->priv;
685 	struct cfi_private *cfi = *pcfi;
686 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
687 
688 	/*
689 	 * Probing of multi-partition flash chips.
690 	 *
691 	 * To support multiple partitions when available, we simply arrange
692 	 * for each of them to have their own flchip structure even if they
693 	 * are on the same physical chip.  This means completely recreating
694 	 * a new cfi_private structure right here which is a blatent code
695 	 * layering violation, but this is still the least intrusive
696 	 * arrangement at this point. This can be rearranged in the future
697 	 * if someone feels motivated enough.  --nico
698 	 */
699 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
700 	    && extp->FeatureSupport & (1 << 9)) {
701 		int offs = 0;
702 		struct cfi_private *newcfi;
703 		struct flchip *chip;
704 		struct flchip_shared *shared;
705 		int numregions, numparts, partshift, numvirtchips, i, j;
706 
707 		/* Protection Register info */
708 		if (extp->NumProtectionFields)
709 			offs = (extp->NumProtectionFields - 1) *
710 			       sizeof(struct cfi_intelext_otpinfo);
711 
712 		/* Burst Read info */
713 		offs += extp->extra[offs+1]+2;
714 
715 		/* Number of partition regions */
716 		numregions = extp->extra[offs];
717 		offs += 1;
718 
719 		/* skip the sizeof(partregion) field in CFI 1.4 */
720 		if (extp->MinorVersion >= '4')
721 			offs += 2;
722 
723 		/* Number of hardware partitions */
724 		numparts = 0;
725 		for (i = 0; i < numregions; i++) {
726 			struct cfi_intelext_regioninfo *rinfo;
727 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
728 			numparts += rinfo->NumIdentPartitions;
729 			offs += sizeof(*rinfo)
730 				+ (rinfo->NumBlockTypes - 1) *
731 				  sizeof(struct cfi_intelext_blockinfo);
732 		}
733 
734 		if (!numparts)
735 			numparts = 1;
736 
737 		/* Programming Region info */
738 		if (extp->MinorVersion >= '4') {
739 			struct cfi_intelext_programming_regioninfo *prinfo;
740 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
741 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
742 			mtd->flags &= ~MTD_BIT_WRITEABLE;
743 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
744 			       map->name, mtd->writesize,
745 			       cfi->interleave * prinfo->ControlValid,
746 			       cfi->interleave * prinfo->ControlInvalid);
747 		}
748 
749 		/*
750 		 * All functions below currently rely on all chips having
751 		 * the same geometry so we'll just assume that all hardware
752 		 * partitions are of the same size too.
753 		 */
754 		partshift = cfi->chipshift - __ffs(numparts);
755 
756 		if ((1 << partshift) < mtd->erasesize) {
757 			printk( KERN_ERR
758 				"%s: bad number of hw partitions (%d)\n",
759 				__func__, numparts);
760 			return -EINVAL;
761 		}
762 
763 		numvirtchips = cfi->numchips * numparts;
764 		newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
765 				 GFP_KERNEL);
766 		if (!newcfi)
767 			return -ENOMEM;
768 		shared = kmalloc_array(cfi->numchips,
769 				       sizeof(struct flchip_shared),
770 				       GFP_KERNEL);
771 		if (!shared) {
772 			kfree(newcfi);
773 			return -ENOMEM;
774 		}
775 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
776 		newcfi->numchips = numvirtchips;
777 		newcfi->chipshift = partshift;
778 
779 		chip = &newcfi->chips[0];
780 		for (i = 0; i < cfi->numchips; i++) {
781 			shared[i].writing = shared[i].erasing = NULL;
782 			mutex_init(&shared[i].lock);
783 			for (j = 0; j < numparts; j++) {
784 				*chip = cfi->chips[i];
785 				chip->start += j << partshift;
786 				chip->priv = &shared[i];
787 				/* those should be reset too since
788 				   they create memory references. */
789 				init_waitqueue_head(&chip->wq);
790 				mutex_init(&chip->mutex);
791 				chip++;
792 			}
793 		}
794 
795 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
796 				  "--> %d partitions of %d KiB\n",
797 				  map->name, cfi->numchips, cfi->interleave,
798 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
799 
800 		map->fldrv_priv = newcfi;
801 		*pcfi = newcfi;
802 		kfree(cfi);
803 	}
804 
805 	return 0;
806 }
807 
808 /*
809  *  *********** CHIP ACCESS FUNCTIONS ***********
810  */
811 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
812 {
813 	DECLARE_WAITQUEUE(wait, current);
814 	struct cfi_private *cfi = map->fldrv_priv;
815 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
816 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
817 	unsigned long timeo = jiffies + HZ;
818 
819 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
820 	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
821 		goto sleep;
822 
823 	switch (chip->state) {
824 
825 	case FL_STATUS:
826 		for (;;) {
827 			status = map_read(map, adr);
828 			if (map_word_andequal(map, status, status_OK, status_OK))
829 				break;
830 
831 			/* At this point we're fine with write operations
832 			   in other partitions as they don't conflict. */
833 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
834 				break;
835 
836 			mutex_unlock(&chip->mutex);
837 			cfi_udelay(1);
838 			mutex_lock(&chip->mutex);
839 			/* Someone else might have been playing with it. */
840 			return -EAGAIN;
841 		}
842 		fallthrough;
843 	case FL_READY:
844 	case FL_CFI_QUERY:
845 	case FL_JEDEC_QUERY:
846 		return 0;
847 
848 	case FL_ERASING:
849 		if (!cfip ||
850 		    !(cfip->FeatureSupport & 2) ||
851 		    !(mode == FL_READY || mode == FL_POINT ||
852 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
853 			goto sleep;
854 
855 		/* Do not allow suspend iff read/write to EB address */
856 		if ((adr & chip->in_progress_block_mask) ==
857 		    chip->in_progress_block_addr)
858 			goto sleep;
859 
860 		/* do not suspend small EBs, buggy Micron Chips */
861 		if (cfi_is_micron_28F00AP30(cfi, chip) &&
862 		    (chip->in_progress_block_mask == ~(0x8000-1)))
863 			goto sleep;
864 
865 		/* Erase suspend */
866 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
867 
868 		/* If the flash has finished erasing, then 'erase suspend'
869 		 * appears to make some (28F320) flash devices switch to
870 		 * 'read' mode.  Make sure that we switch to 'read status'
871 		 * mode so we get the right data. --rmk
872 		 */
873 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
874 		chip->oldstate = FL_ERASING;
875 		chip->state = FL_ERASE_SUSPENDING;
876 		chip->erase_suspended = 1;
877 		for (;;) {
878 			status = map_read(map, chip->in_progress_block_addr);
879 			if (map_word_andequal(map, status, status_OK, status_OK))
880 			        break;
881 
882 			if (time_after(jiffies, timeo)) {
883 				/* Urgh. Resume and pretend we weren't here.
884 				 * Make sure we're in 'read status' mode if it had finished */
885 				put_chip(map, chip, adr);
886 				printk(KERN_ERR "%s: Chip not ready after erase "
887 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
888 				return -EIO;
889 			}
890 
891 			mutex_unlock(&chip->mutex);
892 			cfi_udelay(1);
893 			mutex_lock(&chip->mutex);
894 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
895 			   So we can just loop here. */
896 		}
897 		chip->state = FL_STATUS;
898 		return 0;
899 
900 	case FL_XIP_WHILE_ERASING:
901 		if (mode != FL_READY && mode != FL_POINT &&
902 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
903 			goto sleep;
904 		chip->oldstate = chip->state;
905 		chip->state = FL_READY;
906 		return 0;
907 
908 	case FL_SHUTDOWN:
909 		/* The machine is rebooting now,so no one can get chip anymore */
910 		return -EIO;
911 	case FL_POINT:
912 		/* Only if there's no operation suspended... */
913 		if (mode == FL_READY && chip->oldstate == FL_READY)
914 			return 0;
915 		fallthrough;
916 	default:
917 	sleep:
918 		set_current_state(TASK_UNINTERRUPTIBLE);
919 		add_wait_queue(&chip->wq, &wait);
920 		mutex_unlock(&chip->mutex);
921 		schedule();
922 		remove_wait_queue(&chip->wq, &wait);
923 		mutex_lock(&chip->mutex);
924 		return -EAGAIN;
925 	}
926 }
927 
928 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
929 {
930 	int ret;
931 	DECLARE_WAITQUEUE(wait, current);
932 
933  retry:
934 	if (chip->priv &&
935 	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
936 	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
937 		/*
938 		 * OK. We have possibility for contention on the write/erase
939 		 * operations which are global to the real chip and not per
940 		 * partition.  So let's fight it over in the partition which
941 		 * currently has authority on the operation.
942 		 *
943 		 * The rules are as follows:
944 		 *
945 		 * - any write operation must own shared->writing.
946 		 *
947 		 * - any erase operation must own _both_ shared->writing and
948 		 *   shared->erasing.
949 		 *
950 		 * - contention arbitration is handled in the owner's context.
951 		 *
952 		 * The 'shared' struct can be read and/or written only when
953 		 * its lock is taken.
954 		 */
955 		struct flchip_shared *shared = chip->priv;
956 		struct flchip *contender;
957 		mutex_lock(&shared->lock);
958 		contender = shared->writing;
959 		if (contender && contender != chip) {
960 			/*
961 			 * The engine to perform desired operation on this
962 			 * partition is already in use by someone else.
963 			 * Let's fight over it in the context of the chip
964 			 * currently using it.  If it is possible to suspend,
965 			 * that other partition will do just that, otherwise
966 			 * it'll happily send us to sleep.  In any case, when
967 			 * get_chip returns success we're clear to go ahead.
968 			 */
969 			ret = mutex_trylock(&contender->mutex);
970 			mutex_unlock(&shared->lock);
971 			if (!ret)
972 				goto retry;
973 			mutex_unlock(&chip->mutex);
974 			ret = chip_ready(map, contender, contender->start, mode);
975 			mutex_lock(&chip->mutex);
976 
977 			if (ret == -EAGAIN) {
978 				mutex_unlock(&contender->mutex);
979 				goto retry;
980 			}
981 			if (ret) {
982 				mutex_unlock(&contender->mutex);
983 				return ret;
984 			}
985 			mutex_lock(&shared->lock);
986 
987 			/* We should not own chip if it is already
988 			 * in FL_SYNCING state. Put contender and retry. */
989 			if (chip->state == FL_SYNCING) {
990 				put_chip(map, contender, contender->start);
991 				mutex_unlock(&contender->mutex);
992 				goto retry;
993 			}
994 			mutex_unlock(&contender->mutex);
995 		}
996 
997 		/* Check if we already have suspended erase
998 		 * on this chip. Sleep. */
999 		if (mode == FL_ERASING && shared->erasing
1000 		    && shared->erasing->oldstate == FL_ERASING) {
1001 			mutex_unlock(&shared->lock);
1002 			set_current_state(TASK_UNINTERRUPTIBLE);
1003 			add_wait_queue(&chip->wq, &wait);
1004 			mutex_unlock(&chip->mutex);
1005 			schedule();
1006 			remove_wait_queue(&chip->wq, &wait);
1007 			mutex_lock(&chip->mutex);
1008 			goto retry;
1009 		}
1010 
1011 		/* We now own it */
1012 		shared->writing = chip;
1013 		if (mode == FL_ERASING)
1014 			shared->erasing = chip;
1015 		mutex_unlock(&shared->lock);
1016 	}
1017 	ret = chip_ready(map, chip, adr, mode);
1018 	if (ret == -EAGAIN)
1019 		goto retry;
1020 
1021 	return ret;
1022 }
1023 
1024 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1025 {
1026 	struct cfi_private *cfi = map->fldrv_priv;
1027 
1028 	if (chip->priv) {
1029 		struct flchip_shared *shared = chip->priv;
1030 		mutex_lock(&shared->lock);
1031 		if (shared->writing == chip && chip->oldstate == FL_READY) {
1032 			/* We own the ability to write, but we're done */
1033 			shared->writing = shared->erasing;
1034 			if (shared->writing && shared->writing != chip) {
1035 				/* give back ownership to who we loaned it from */
1036 				struct flchip *loaner = shared->writing;
1037 				mutex_lock(&loaner->mutex);
1038 				mutex_unlock(&shared->lock);
1039 				mutex_unlock(&chip->mutex);
1040 				put_chip(map, loaner, loaner->start);
1041 				mutex_lock(&chip->mutex);
1042 				mutex_unlock(&loaner->mutex);
1043 				wake_up(&chip->wq);
1044 				return;
1045 			}
1046 			shared->erasing = NULL;
1047 			shared->writing = NULL;
1048 		} else if (shared->erasing == chip && shared->writing != chip) {
1049 			/*
1050 			 * We own the ability to erase without the ability
1051 			 * to write, which means the erase was suspended
1052 			 * and some other partition is currently writing.
1053 			 * Don't let the switch below mess things up since
1054 			 * we don't have ownership to resume anything.
1055 			 */
1056 			mutex_unlock(&shared->lock);
1057 			wake_up(&chip->wq);
1058 			return;
1059 		}
1060 		mutex_unlock(&shared->lock);
1061 	}
1062 
1063 	switch(chip->oldstate) {
1064 	case FL_ERASING:
1065 		/* What if one interleaved chip has finished and the
1066 		   other hasn't? The old code would leave the finished
1067 		   one in READY mode. That's bad, and caused -EROFS
1068 		   errors to be returned from do_erase_oneblock because
1069 		   that's the only bit it checked for at the time.
1070 		   As the state machine appears to explicitly allow
1071 		   sending the 0x70 (Read Status) command to an erasing
1072 		   chip and expecting it to be ignored, that's what we
1073 		   do. */
1074 		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1075 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
1076 		chip->oldstate = FL_READY;
1077 		chip->state = FL_ERASING;
1078 		break;
1079 
1080 	case FL_XIP_WHILE_ERASING:
1081 		chip->state = chip->oldstate;
1082 		chip->oldstate = FL_READY;
1083 		break;
1084 
1085 	case FL_READY:
1086 	case FL_STATUS:
1087 	case FL_JEDEC_QUERY:
1088 		break;
1089 	default:
1090 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1091 	}
1092 	wake_up(&chip->wq);
1093 }
1094 
1095 #ifdef CONFIG_MTD_XIP
1096 
1097 /*
1098  * No interrupt what so ever can be serviced while the flash isn't in array
1099  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1100  * enclosing any code path where the flash is known not to be in array mode.
1101  * And within a XIP disabled code path, only functions marked with __xipram
1102  * may be called and nothing else (it's a good thing to inspect generated
1103  * assembly to make sure inline functions were actually inlined and that gcc
1104  * didn't emit calls to its own support functions). Also configuring MTD CFI
1105  * support to a single buswidth and a single interleave is also recommended.
1106  */
1107 
1108 static void xip_disable(struct map_info *map, struct flchip *chip,
1109 			unsigned long adr)
1110 {
1111 	/* TODO: chips with no XIP use should ignore and return */
1112 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1113 	local_irq_disable();
1114 }
1115 
1116 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1117 				unsigned long adr)
1118 {
1119 	struct cfi_private *cfi = map->fldrv_priv;
1120 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1121 		map_write(map, CMD(0xff), adr);
1122 		chip->state = FL_READY;
1123 	}
1124 	(void) map_read(map, adr);
1125 	xip_iprefetch();
1126 	local_irq_enable();
1127 }
1128 
1129 /*
1130  * When a delay is required for the flash operation to complete, the
1131  * xip_wait_for_operation() function is polling for both the given timeout
1132  * and pending (but still masked) hardware interrupts.  Whenever there is an
1133  * interrupt pending then the flash erase or write operation is suspended,
1134  * array mode restored and interrupts unmasked.  Task scheduling might also
1135  * happen at that point.  The CPU eventually returns from the interrupt or
1136  * the call to schedule() and the suspended flash operation is resumed for
1137  * the remaining of the delay period.
1138  *
1139  * Warning: this function _will_ fool interrupt latency tracing tools.
1140  */
1141 
1142 static int __xipram xip_wait_for_operation(
1143 		struct map_info *map, struct flchip *chip,
1144 		unsigned long adr, unsigned int chip_op_time_max)
1145 {
1146 	struct cfi_private *cfi = map->fldrv_priv;
1147 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1148 	map_word status, OK = CMD(0x80);
1149 	unsigned long usec, suspended, start, done;
1150 	flstate_t oldstate, newstate;
1151 
1152        	start = xip_currtime();
1153 	usec = chip_op_time_max;
1154 	if (usec == 0)
1155 		usec = 500000;
1156 	done = 0;
1157 
1158 	do {
1159 		cpu_relax();
1160 		if (xip_irqpending() && cfip &&
1161 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1162 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1163 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1164 			/*
1165 			 * Let's suspend the erase or write operation when
1166 			 * supported.  Note that we currently don't try to
1167 			 * suspend interleaved chips if there is already
1168 			 * another operation suspended (imagine what happens
1169 			 * when one chip was already done with the current
1170 			 * operation while another chip suspended it, then
1171 			 * we resume the whole thing at once).  Yes, it
1172 			 * can happen!
1173 			 */
1174 			usec -= done;
1175 			map_write(map, CMD(0xb0), adr);
1176 			map_write(map, CMD(0x70), adr);
1177 			suspended = xip_currtime();
1178 			do {
1179 				if (xip_elapsed_since(suspended) > 100000) {
1180 					/*
1181 					 * The chip doesn't want to suspend
1182 					 * after waiting for 100 msecs.
1183 					 * This is a critical error but there
1184 					 * is not much we can do here.
1185 					 */
1186 					return -EIO;
1187 				}
1188 				status = map_read(map, adr);
1189 			} while (!map_word_andequal(map, status, OK, OK));
1190 
1191 			/* Suspend succeeded */
1192 			oldstate = chip->state;
1193 			if (oldstate == FL_ERASING) {
1194 				if (!map_word_bitsset(map, status, CMD(0x40)))
1195 					break;
1196 				newstate = FL_XIP_WHILE_ERASING;
1197 				chip->erase_suspended = 1;
1198 			} else {
1199 				if (!map_word_bitsset(map, status, CMD(0x04)))
1200 					break;
1201 				newstate = FL_XIP_WHILE_WRITING;
1202 				chip->write_suspended = 1;
1203 			}
1204 			chip->state = newstate;
1205 			map_write(map, CMD(0xff), adr);
1206 			(void) map_read(map, adr);
1207 			xip_iprefetch();
1208 			local_irq_enable();
1209 			mutex_unlock(&chip->mutex);
1210 			xip_iprefetch();
1211 			cond_resched();
1212 
1213 			/*
1214 			 * We're back.  However someone else might have
1215 			 * decided to go write to the chip if we are in
1216 			 * a suspended erase state.  If so let's wait
1217 			 * until it's done.
1218 			 */
1219 			mutex_lock(&chip->mutex);
1220 			while (chip->state != newstate) {
1221 				DECLARE_WAITQUEUE(wait, current);
1222 				set_current_state(TASK_UNINTERRUPTIBLE);
1223 				add_wait_queue(&chip->wq, &wait);
1224 				mutex_unlock(&chip->mutex);
1225 				schedule();
1226 				remove_wait_queue(&chip->wq, &wait);
1227 				mutex_lock(&chip->mutex);
1228 			}
1229 			/* Disallow XIP again */
1230 			local_irq_disable();
1231 
1232 			/* Resume the write or erase operation */
1233 			map_write(map, CMD(0xd0), adr);
1234 			map_write(map, CMD(0x70), adr);
1235 			chip->state = oldstate;
1236 			start = xip_currtime();
1237 		} else if (usec >= 1000000/HZ) {
1238 			/*
1239 			 * Try to save on CPU power when waiting delay
1240 			 * is at least a system timer tick period.
1241 			 * No need to be extremely accurate here.
1242 			 */
1243 			xip_cpu_idle();
1244 		}
1245 		status = map_read(map, adr);
1246 		done = xip_elapsed_since(start);
1247 	} while (!map_word_andequal(map, status, OK, OK)
1248 		 && done < usec);
1249 
1250 	return (done >= usec) ? -ETIME : 0;
1251 }
1252 
1253 /*
1254  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1255  * the flash is actively programming or erasing since we have to poll for
1256  * the operation to complete anyway.  We can't do that in a generic way with
1257  * a XIP setup so do it before the actual flash operation in this case
1258  * and stub it out from INVAL_CACHE_AND_WAIT.
1259  */
1260 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1261 	INVALIDATE_CACHED_RANGE(map, from, size)
1262 
1263 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1264 	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1265 
1266 #else
1267 
1268 #define xip_disable(map, chip, adr)
1269 #define xip_enable(map, chip, adr)
1270 #define XIP_INVAL_CACHED_RANGE(x...)
1271 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1272 
1273 static int inval_cache_and_wait_for_operation(
1274 		struct map_info *map, struct flchip *chip,
1275 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1276 		unsigned int chip_op_time, unsigned int chip_op_time_max)
1277 {
1278 	struct cfi_private *cfi = map->fldrv_priv;
1279 	map_word status, status_OK = CMD(0x80);
1280 	int chip_state = chip->state;
1281 	unsigned int timeo, sleep_time, reset_timeo;
1282 
1283 	mutex_unlock(&chip->mutex);
1284 	if (inval_len)
1285 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1286 	mutex_lock(&chip->mutex);
1287 
1288 	timeo = chip_op_time_max;
1289 	if (!timeo)
1290 		timeo = 500000;
1291 	reset_timeo = timeo;
1292 	sleep_time = chip_op_time / 2;
1293 
1294 	for (;;) {
1295 		if (chip->state != chip_state) {
1296 			/* Someone's suspended the operation: sleep */
1297 			DECLARE_WAITQUEUE(wait, current);
1298 			set_current_state(TASK_UNINTERRUPTIBLE);
1299 			add_wait_queue(&chip->wq, &wait);
1300 			mutex_unlock(&chip->mutex);
1301 			schedule();
1302 			remove_wait_queue(&chip->wq, &wait);
1303 			mutex_lock(&chip->mutex);
1304 			continue;
1305 		}
1306 
1307 		status = map_read(map, cmd_adr);
1308 		if (map_word_andequal(map, status, status_OK, status_OK))
1309 			break;
1310 
1311 		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1312 			/* Erase suspend occurred while sleep: reset timeout */
1313 			timeo = reset_timeo;
1314 			chip->erase_suspended = 0;
1315 		}
1316 		if (chip->write_suspended && chip_state == FL_WRITING)  {
1317 			/* Write suspend occurred while sleep: reset timeout */
1318 			timeo = reset_timeo;
1319 			chip->write_suspended = 0;
1320 		}
1321 		if (!timeo) {
1322 			map_write(map, CMD(0x70), cmd_adr);
1323 			chip->state = FL_STATUS;
1324 			return -ETIME;
1325 		}
1326 
1327 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1328 		mutex_unlock(&chip->mutex);
1329 		if (sleep_time >= 1000000/HZ) {
1330 			/*
1331 			 * Half of the normal delay still remaining
1332 			 * can be performed with a sleeping delay instead
1333 			 * of busy waiting.
1334 			 */
1335 			msleep(sleep_time/1000);
1336 			timeo -= sleep_time;
1337 			sleep_time = 1000000/HZ;
1338 		} else {
1339 			udelay(1);
1340 			cond_resched();
1341 			timeo--;
1342 		}
1343 		mutex_lock(&chip->mutex);
1344 	}
1345 
1346 	/* Done and happy. */
1347  	chip->state = FL_STATUS;
1348 	return 0;
1349 }
1350 
1351 #endif
1352 
1353 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1354 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1355 
1356 
1357 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1358 {
1359 	unsigned long cmd_addr;
1360 	struct cfi_private *cfi = map->fldrv_priv;
1361 	int ret;
1362 
1363 	adr += chip->start;
1364 
1365 	/* Ensure cmd read/writes are aligned. */
1366 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1367 
1368 	mutex_lock(&chip->mutex);
1369 
1370 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1371 
1372 	if (!ret) {
1373 		if (chip->state != FL_POINT && chip->state != FL_READY)
1374 			map_write(map, CMD(0xff), cmd_addr);
1375 
1376 		chip->state = FL_POINT;
1377 		chip->ref_point_counter++;
1378 	}
1379 	mutex_unlock(&chip->mutex);
1380 
1381 	return ret;
1382 }
1383 
1384 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1385 		size_t *retlen, void **virt, resource_size_t *phys)
1386 {
1387 	struct map_info *map = mtd->priv;
1388 	struct cfi_private *cfi = map->fldrv_priv;
1389 	unsigned long ofs, last_end = 0;
1390 	int chipnum;
1391 	int ret;
1392 
1393 	if (!map->virt)
1394 		return -EINVAL;
1395 
1396 	/* Now lock the chip(s) to POINT state */
1397 
1398 	/* ofs: offset within the first chip that the first read should start */
1399 	chipnum = (from >> cfi->chipshift);
1400 	ofs = from - (chipnum << cfi->chipshift);
1401 
1402 	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1403 	if (phys)
1404 		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1405 
1406 	while (len) {
1407 		unsigned long thislen;
1408 
1409 		if (chipnum >= cfi->numchips)
1410 			break;
1411 
1412 		/* We cannot point across chips that are virtually disjoint */
1413 		if (!last_end)
1414 			last_end = cfi->chips[chipnum].start;
1415 		else if (cfi->chips[chipnum].start != last_end)
1416 			break;
1417 
1418 		if ((len + ofs -1) >> cfi->chipshift)
1419 			thislen = (1<<cfi->chipshift) - ofs;
1420 		else
1421 			thislen = len;
1422 
1423 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1424 		if (ret)
1425 			break;
1426 
1427 		*retlen += thislen;
1428 		len -= thislen;
1429 
1430 		ofs = 0;
1431 		last_end += 1 << cfi->chipshift;
1432 		chipnum++;
1433 	}
1434 	return 0;
1435 }
1436 
1437 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1438 {
1439 	struct map_info *map = mtd->priv;
1440 	struct cfi_private *cfi = map->fldrv_priv;
1441 	unsigned long ofs;
1442 	int chipnum, err = 0;
1443 
1444 	/* Now unlock the chip(s) POINT state */
1445 
1446 	/* ofs: offset within the first chip that the first read should start */
1447 	chipnum = (from >> cfi->chipshift);
1448 	ofs = from - (chipnum <<  cfi->chipshift);
1449 
1450 	while (len && !err) {
1451 		unsigned long thislen;
1452 		struct flchip *chip;
1453 
1454 		chip = &cfi->chips[chipnum];
1455 		if (chipnum >= cfi->numchips)
1456 			break;
1457 
1458 		if ((len + ofs -1) >> cfi->chipshift)
1459 			thislen = (1<<cfi->chipshift) - ofs;
1460 		else
1461 			thislen = len;
1462 
1463 		mutex_lock(&chip->mutex);
1464 		if (chip->state == FL_POINT) {
1465 			chip->ref_point_counter--;
1466 			if(chip->ref_point_counter == 0)
1467 				chip->state = FL_READY;
1468 		} else {
1469 			printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1470 			err = -EINVAL;
1471 		}
1472 
1473 		put_chip(map, chip, chip->start);
1474 		mutex_unlock(&chip->mutex);
1475 
1476 		len -= thislen;
1477 		ofs = 0;
1478 		chipnum++;
1479 	}
1480 
1481 	return err;
1482 }
1483 
1484 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1485 {
1486 	unsigned long cmd_addr;
1487 	struct cfi_private *cfi = map->fldrv_priv;
1488 	int ret;
1489 
1490 	adr += chip->start;
1491 
1492 	/* Ensure cmd read/writes are aligned. */
1493 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1494 
1495 	mutex_lock(&chip->mutex);
1496 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1497 	if (ret) {
1498 		mutex_unlock(&chip->mutex);
1499 		return ret;
1500 	}
1501 
1502 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1503 		map_write(map, CMD(0xff), cmd_addr);
1504 
1505 		chip->state = FL_READY;
1506 	}
1507 
1508 	map_copy_from(map, buf, adr, len);
1509 
1510 	put_chip(map, chip, cmd_addr);
1511 
1512 	mutex_unlock(&chip->mutex);
1513 	return 0;
1514 }
1515 
1516 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1517 {
1518 	struct map_info *map = mtd->priv;
1519 	struct cfi_private *cfi = map->fldrv_priv;
1520 	unsigned long ofs;
1521 	int chipnum;
1522 	int ret = 0;
1523 
1524 	/* ofs: offset within the first chip that the first read should start */
1525 	chipnum = (from >> cfi->chipshift);
1526 	ofs = from - (chipnum <<  cfi->chipshift);
1527 
1528 	while (len) {
1529 		unsigned long thislen;
1530 
1531 		if (chipnum >= cfi->numchips)
1532 			break;
1533 
1534 		if ((len + ofs -1) >> cfi->chipshift)
1535 			thislen = (1<<cfi->chipshift) - ofs;
1536 		else
1537 			thislen = len;
1538 
1539 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1540 		if (ret)
1541 			break;
1542 
1543 		*retlen += thislen;
1544 		len -= thislen;
1545 		buf += thislen;
1546 
1547 		ofs = 0;
1548 		chipnum++;
1549 	}
1550 	return ret;
1551 }
1552 
1553 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1554 				     unsigned long adr, map_word datum, int mode)
1555 {
1556 	struct cfi_private *cfi = map->fldrv_priv;
1557 	map_word status, write_cmd;
1558 	int ret;
1559 
1560 	adr += chip->start;
1561 
1562 	switch (mode) {
1563 	case FL_WRITING:
1564 		write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1565 		break;
1566 	case FL_OTP_WRITE:
1567 		write_cmd = CMD(0xc0);
1568 		break;
1569 	default:
1570 		return -EINVAL;
1571 	}
1572 
1573 	mutex_lock(&chip->mutex);
1574 	ret = get_chip(map, chip, adr, mode);
1575 	if (ret) {
1576 		mutex_unlock(&chip->mutex);
1577 		return ret;
1578 	}
1579 
1580 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1581 	ENABLE_VPP(map);
1582 	xip_disable(map, chip, adr);
1583 	map_write(map, write_cmd, adr);
1584 	map_write(map, datum, adr);
1585 	chip->state = mode;
1586 
1587 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1588 				   adr, map_bankwidth(map),
1589 				   chip->word_write_time,
1590 				   chip->word_write_time_max);
1591 	if (ret) {
1592 		xip_enable(map, chip, adr);
1593 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1594 		goto out;
1595 	}
1596 
1597 	/* check for errors */
1598 	status = map_read(map, adr);
1599 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1600 		unsigned long chipstatus = MERGESTATUS(status);
1601 
1602 		/* reset status */
1603 		map_write(map, CMD(0x50), adr);
1604 		map_write(map, CMD(0x70), adr);
1605 		xip_enable(map, chip, adr);
1606 
1607 		if (chipstatus & 0x02) {
1608 			ret = -EROFS;
1609 		} else if (chipstatus & 0x08) {
1610 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1611 			ret = -EIO;
1612 		} else {
1613 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1614 			ret = -EINVAL;
1615 		}
1616 
1617 		goto out;
1618 	}
1619 
1620 	xip_enable(map, chip, adr);
1621  out:	DISABLE_VPP(map);
1622 	put_chip(map, chip, adr);
1623 	mutex_unlock(&chip->mutex);
1624 	return ret;
1625 }
1626 
1627 
1628 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1629 {
1630 	struct map_info *map = mtd->priv;
1631 	struct cfi_private *cfi = map->fldrv_priv;
1632 	int ret;
1633 	int chipnum;
1634 	unsigned long ofs;
1635 
1636 	chipnum = to >> cfi->chipshift;
1637 	ofs = to  - (chipnum << cfi->chipshift);
1638 
1639 	/* If it's not bus-aligned, do the first byte write */
1640 	if (ofs & (map_bankwidth(map)-1)) {
1641 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1642 		int gap = ofs - bus_ofs;
1643 		int n;
1644 		map_word datum;
1645 
1646 		n = min_t(int, len, map_bankwidth(map)-gap);
1647 		datum = map_word_ff(map);
1648 		datum = map_word_load_partial(map, datum, buf, gap, n);
1649 
1650 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1651 					       bus_ofs, datum, FL_WRITING);
1652 		if (ret)
1653 			return ret;
1654 
1655 		len -= n;
1656 		ofs += n;
1657 		buf += n;
1658 		(*retlen) += n;
1659 
1660 		if (ofs >> cfi->chipshift) {
1661 			chipnum ++;
1662 			ofs = 0;
1663 			if (chipnum == cfi->numchips)
1664 				return 0;
1665 		}
1666 	}
1667 
1668 	while(len >= map_bankwidth(map)) {
1669 		map_word datum = map_word_load(map, buf);
1670 
1671 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1672 				       ofs, datum, FL_WRITING);
1673 		if (ret)
1674 			return ret;
1675 
1676 		ofs += map_bankwidth(map);
1677 		buf += map_bankwidth(map);
1678 		(*retlen) += map_bankwidth(map);
1679 		len -= map_bankwidth(map);
1680 
1681 		if (ofs >> cfi->chipshift) {
1682 			chipnum ++;
1683 			ofs = 0;
1684 			if (chipnum == cfi->numchips)
1685 				return 0;
1686 		}
1687 	}
1688 
1689 	if (len & (map_bankwidth(map)-1)) {
1690 		map_word datum;
1691 
1692 		datum = map_word_ff(map);
1693 		datum = map_word_load_partial(map, datum, buf, 0, len);
1694 
1695 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1696 				       ofs, datum, FL_WRITING);
1697 		if (ret)
1698 			return ret;
1699 
1700 		(*retlen) += len;
1701 	}
1702 
1703 	return 0;
1704 }
1705 
1706 
1707 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1708 				    unsigned long adr, const struct kvec **pvec,
1709 				    unsigned long *pvec_seek, int len)
1710 {
1711 	struct cfi_private *cfi = map->fldrv_priv;
1712 	map_word status, write_cmd, datum;
1713 	unsigned long cmd_adr;
1714 	int ret, wbufsize, word_gap, words;
1715 	const struct kvec *vec;
1716 	unsigned long vec_seek;
1717 	unsigned long initial_adr;
1718 	int initial_len = len;
1719 
1720 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1721 	adr += chip->start;
1722 	initial_adr = adr;
1723 	cmd_adr = adr & ~(wbufsize-1);
1724 
1725 	/* Sharp LH28F640BF chips need the first address for the
1726 	 * Page Buffer Program command. See Table 5 of
1727 	 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1728 	if (is_LH28F640BF(cfi))
1729 		cmd_adr = adr;
1730 
1731 	/* Let's determine this according to the interleave only once */
1732 	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1733 
1734 	mutex_lock(&chip->mutex);
1735 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1736 	if (ret) {
1737 		mutex_unlock(&chip->mutex);
1738 		return ret;
1739 	}
1740 
1741 	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1742 	ENABLE_VPP(map);
1743 	xip_disable(map, chip, cmd_adr);
1744 
1745 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1746 	   [...], the device will not accept any more Write to Buffer commands".
1747 	   So we must check here and reset those bits if they're set. Otherwise
1748 	   we're just pissing in the wind */
1749 	if (chip->state != FL_STATUS) {
1750 		map_write(map, CMD(0x70), cmd_adr);
1751 		chip->state = FL_STATUS;
1752 	}
1753 	status = map_read(map, cmd_adr);
1754 	if (map_word_bitsset(map, status, CMD(0x30))) {
1755 		xip_enable(map, chip, cmd_adr);
1756 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1757 		xip_disable(map, chip, cmd_adr);
1758 		map_write(map, CMD(0x50), cmd_adr);
1759 		map_write(map, CMD(0x70), cmd_adr);
1760 	}
1761 
1762 	chip->state = FL_WRITING_TO_BUFFER;
1763 	map_write(map, write_cmd, cmd_adr);
1764 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1765 	if (ret) {
1766 		/* Argh. Not ready for write to buffer */
1767 		map_word Xstatus = map_read(map, cmd_adr);
1768 		map_write(map, CMD(0x70), cmd_adr);
1769 		chip->state = FL_STATUS;
1770 		status = map_read(map, cmd_adr);
1771 		map_write(map, CMD(0x50), cmd_adr);
1772 		map_write(map, CMD(0x70), cmd_adr);
1773 		xip_enable(map, chip, cmd_adr);
1774 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1775 				map->name, Xstatus.x[0], status.x[0]);
1776 		goto out;
1777 	}
1778 
1779 	/* Figure out the number of words to write */
1780 	word_gap = (-adr & (map_bankwidth(map)-1));
1781 	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1782 	if (!word_gap) {
1783 		words--;
1784 	} else {
1785 		word_gap = map_bankwidth(map) - word_gap;
1786 		adr -= word_gap;
1787 		datum = map_word_ff(map);
1788 	}
1789 
1790 	/* Write length of data to come */
1791 	map_write(map, CMD(words), cmd_adr );
1792 
1793 	/* Write data */
1794 	vec = *pvec;
1795 	vec_seek = *pvec_seek;
1796 	do {
1797 		int n = map_bankwidth(map) - word_gap;
1798 		if (n > vec->iov_len - vec_seek)
1799 			n = vec->iov_len - vec_seek;
1800 		if (n > len)
1801 			n = len;
1802 
1803 		if (!word_gap && len < map_bankwidth(map))
1804 			datum = map_word_ff(map);
1805 
1806 		datum = map_word_load_partial(map, datum,
1807 					      vec->iov_base + vec_seek,
1808 					      word_gap, n);
1809 
1810 		len -= n;
1811 		word_gap += n;
1812 		if (!len || word_gap == map_bankwidth(map)) {
1813 			map_write(map, datum, adr);
1814 			adr += map_bankwidth(map);
1815 			word_gap = 0;
1816 		}
1817 
1818 		vec_seek += n;
1819 		if (vec_seek == vec->iov_len) {
1820 			vec++;
1821 			vec_seek = 0;
1822 		}
1823 	} while (len);
1824 	*pvec = vec;
1825 	*pvec_seek = vec_seek;
1826 
1827 	/* GO GO GO */
1828 	map_write(map, CMD(0xd0), cmd_adr);
1829 	chip->state = FL_WRITING;
1830 
1831 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1832 				   initial_adr, initial_len,
1833 				   chip->buffer_write_time,
1834 				   chip->buffer_write_time_max);
1835 	if (ret) {
1836 		map_write(map, CMD(0x70), cmd_adr);
1837 		chip->state = FL_STATUS;
1838 		xip_enable(map, chip, cmd_adr);
1839 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1840 		goto out;
1841 	}
1842 
1843 	/* check for errors */
1844 	status = map_read(map, cmd_adr);
1845 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1846 		unsigned long chipstatus = MERGESTATUS(status);
1847 
1848 		/* reset status */
1849 		map_write(map, CMD(0x50), cmd_adr);
1850 		map_write(map, CMD(0x70), cmd_adr);
1851 		xip_enable(map, chip, cmd_adr);
1852 
1853 		if (chipstatus & 0x02) {
1854 			ret = -EROFS;
1855 		} else if (chipstatus & 0x08) {
1856 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1857 			ret = -EIO;
1858 		} else {
1859 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1860 			ret = -EINVAL;
1861 		}
1862 
1863 		goto out;
1864 	}
1865 
1866 	xip_enable(map, chip, cmd_adr);
1867  out:	DISABLE_VPP(map);
1868 	put_chip(map, chip, cmd_adr);
1869 	mutex_unlock(&chip->mutex);
1870 	return ret;
1871 }
1872 
1873 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1874 				unsigned long count, loff_t to, size_t *retlen)
1875 {
1876 	struct map_info *map = mtd->priv;
1877 	struct cfi_private *cfi = map->fldrv_priv;
1878 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1879 	int ret;
1880 	int chipnum;
1881 	unsigned long ofs, vec_seek, i;
1882 	size_t len = 0;
1883 
1884 	for (i = 0; i < count; i++)
1885 		len += vecs[i].iov_len;
1886 
1887 	if (!len)
1888 		return 0;
1889 
1890 	chipnum = to >> cfi->chipshift;
1891 	ofs = to - (chipnum << cfi->chipshift);
1892 	vec_seek = 0;
1893 
1894 	do {
1895 		/* We must not cross write block boundaries */
1896 		int size = wbufsize - (ofs & (wbufsize-1));
1897 
1898 		if (size > len)
1899 			size = len;
1900 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1901 				      ofs, &vecs, &vec_seek, size);
1902 		if (ret)
1903 			return ret;
1904 
1905 		ofs += size;
1906 		(*retlen) += size;
1907 		len -= size;
1908 
1909 		if (ofs >> cfi->chipshift) {
1910 			chipnum ++;
1911 			ofs = 0;
1912 			if (chipnum == cfi->numchips)
1913 				return 0;
1914 		}
1915 
1916 		/* Be nice and reschedule with the chip in a usable state for other
1917 		   processes. */
1918 		cond_resched();
1919 
1920 	} while (len);
1921 
1922 	return 0;
1923 }
1924 
1925 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1926 				       size_t len, size_t *retlen, const u_char *buf)
1927 {
1928 	struct kvec vec;
1929 
1930 	vec.iov_base = (void *) buf;
1931 	vec.iov_len = len;
1932 
1933 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1934 }
1935 
1936 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1937 				      unsigned long adr, int len, void *thunk)
1938 {
1939 	struct cfi_private *cfi = map->fldrv_priv;
1940 	map_word status;
1941 	int retries = 3;
1942 	int ret;
1943 
1944 	adr += chip->start;
1945 
1946  retry:
1947 	mutex_lock(&chip->mutex);
1948 	ret = get_chip(map, chip, adr, FL_ERASING);
1949 	if (ret) {
1950 		mutex_unlock(&chip->mutex);
1951 		return ret;
1952 	}
1953 
1954 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1955 	ENABLE_VPP(map);
1956 	xip_disable(map, chip, adr);
1957 
1958 	/* Clear the status register first */
1959 	map_write(map, CMD(0x50), adr);
1960 
1961 	/* Now erase */
1962 	map_write(map, CMD(0x20), adr);
1963 	map_write(map, CMD(0xD0), adr);
1964 	chip->state = FL_ERASING;
1965 	chip->erase_suspended = 0;
1966 	chip->in_progress_block_addr = adr;
1967 	chip->in_progress_block_mask = ~(len - 1);
1968 
1969 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1970 				   adr, len,
1971 				   chip->erase_time,
1972 				   chip->erase_time_max);
1973 	if (ret) {
1974 		map_write(map, CMD(0x70), adr);
1975 		chip->state = FL_STATUS;
1976 		xip_enable(map, chip, adr);
1977 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1978 		goto out;
1979 	}
1980 
1981 	/* We've broken this before. It doesn't hurt to be safe */
1982 	map_write(map, CMD(0x70), adr);
1983 	chip->state = FL_STATUS;
1984 	status = map_read(map, adr);
1985 
1986 	/* check for errors */
1987 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1988 		unsigned long chipstatus = MERGESTATUS(status);
1989 
1990 		/* Reset the error bits */
1991 		map_write(map, CMD(0x50), adr);
1992 		map_write(map, CMD(0x70), adr);
1993 		xip_enable(map, chip, adr);
1994 
1995 		if ((chipstatus & 0x30) == 0x30) {
1996 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1997 			ret = -EINVAL;
1998 		} else if (chipstatus & 0x02) {
1999 			/* Protection bit set */
2000 			ret = -EROFS;
2001 		} else if (chipstatus & 0x8) {
2002 			/* Voltage */
2003 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2004 			ret = -EIO;
2005 		} else if (chipstatus & 0x20 && retries--) {
2006 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2007 			DISABLE_VPP(map);
2008 			put_chip(map, chip, adr);
2009 			mutex_unlock(&chip->mutex);
2010 			goto retry;
2011 		} else {
2012 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2013 			ret = -EIO;
2014 		}
2015 
2016 		goto out;
2017 	}
2018 
2019 	xip_enable(map, chip, adr);
2020  out:	DISABLE_VPP(map);
2021 	put_chip(map, chip, adr);
2022 	mutex_unlock(&chip->mutex);
2023 	return ret;
2024 }
2025 
2026 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2027 {
2028 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2029 				instr->len, NULL);
2030 }
2031 
2032 static void cfi_intelext_sync (struct mtd_info *mtd)
2033 {
2034 	struct map_info *map = mtd->priv;
2035 	struct cfi_private *cfi = map->fldrv_priv;
2036 	int i;
2037 	struct flchip *chip;
2038 	int ret = 0;
2039 
2040 	for (i=0; !ret && i<cfi->numchips; i++) {
2041 		chip = &cfi->chips[i];
2042 
2043 		mutex_lock(&chip->mutex);
2044 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2045 
2046 		if (!ret) {
2047 			chip->oldstate = chip->state;
2048 			chip->state = FL_SYNCING;
2049 			/* No need to wake_up() on this state change -
2050 			 * as the whole point is that nobody can do anything
2051 			 * with the chip now anyway.
2052 			 */
2053 		}
2054 		mutex_unlock(&chip->mutex);
2055 	}
2056 
2057 	/* Unlock the chips again */
2058 
2059 	for (i--; i >=0; i--) {
2060 		chip = &cfi->chips[i];
2061 
2062 		mutex_lock(&chip->mutex);
2063 
2064 		if (chip->state == FL_SYNCING) {
2065 			chip->state = chip->oldstate;
2066 			chip->oldstate = FL_READY;
2067 			wake_up(&chip->wq);
2068 		}
2069 		mutex_unlock(&chip->mutex);
2070 	}
2071 }
2072 
2073 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2074 						struct flchip *chip,
2075 						unsigned long adr,
2076 						int len, void *thunk)
2077 {
2078 	struct cfi_private *cfi = map->fldrv_priv;
2079 	int status, ofs_factor = cfi->interleave * cfi->device_type;
2080 
2081 	adr += chip->start;
2082 	xip_disable(map, chip, adr+(2*ofs_factor));
2083 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2084 	chip->state = FL_JEDEC_QUERY;
2085 	status = cfi_read_query(map, adr+(2*ofs_factor));
2086 	xip_enable(map, chip, 0);
2087 	return status;
2088 }
2089 
2090 #ifdef DEBUG_LOCK_BITS
2091 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2092 						struct flchip *chip,
2093 						unsigned long adr,
2094 						int len, void *thunk)
2095 {
2096 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2097 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2098 	return 0;
2099 }
2100 #endif
2101 
2102 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2103 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2104 
2105 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2106 				       unsigned long adr, int len, void *thunk)
2107 {
2108 	struct cfi_private *cfi = map->fldrv_priv;
2109 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2110 	int mdelay;
2111 	int ret;
2112 
2113 	adr += chip->start;
2114 
2115 	mutex_lock(&chip->mutex);
2116 	ret = get_chip(map, chip, adr, FL_LOCKING);
2117 	if (ret) {
2118 		mutex_unlock(&chip->mutex);
2119 		return ret;
2120 	}
2121 
2122 	ENABLE_VPP(map);
2123 	xip_disable(map, chip, adr);
2124 
2125 	map_write(map, CMD(0x60), adr);
2126 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2127 		map_write(map, CMD(0x01), adr);
2128 		chip->state = FL_LOCKING;
2129 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2130 		map_write(map, CMD(0xD0), adr);
2131 		chip->state = FL_UNLOCKING;
2132 	} else
2133 		BUG();
2134 
2135 	/*
2136 	 * If Instant Individual Block Locking supported then no need
2137 	 * to delay.
2138 	 */
2139 	/*
2140 	 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2141 	 * lets use a max of 1.5 seconds (1500ms) as timeout.
2142 	 *
2143 	 * See "Clear Block Lock-Bits Time" on page 40 in
2144 	 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2145 	 * from February 2003
2146 	 */
2147 	mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2148 
2149 	ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2150 	if (ret) {
2151 		map_write(map, CMD(0x70), adr);
2152 		chip->state = FL_STATUS;
2153 		xip_enable(map, chip, adr);
2154 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2155 		goto out;
2156 	}
2157 
2158 	xip_enable(map, chip, adr);
2159  out:	DISABLE_VPP(map);
2160 	put_chip(map, chip, adr);
2161 	mutex_unlock(&chip->mutex);
2162 	return ret;
2163 }
2164 
2165 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2166 {
2167 	int ret;
2168 
2169 #ifdef DEBUG_LOCK_BITS
2170 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2171 	       __func__, ofs, len);
2172 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2173 		ofs, len, NULL);
2174 #endif
2175 
2176 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2177 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2178 
2179 #ifdef DEBUG_LOCK_BITS
2180 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2181 	       __func__, ret);
2182 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2183 		ofs, len, NULL);
2184 #endif
2185 
2186 	return ret;
2187 }
2188 
2189 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2190 {
2191 	int ret;
2192 
2193 #ifdef DEBUG_LOCK_BITS
2194 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2195 	       __func__, ofs, len);
2196 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2197 		ofs, len, NULL);
2198 #endif
2199 
2200 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2201 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2202 
2203 #ifdef DEBUG_LOCK_BITS
2204 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2205 	       __func__, ret);
2206 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2207 		ofs, len, NULL);
2208 #endif
2209 
2210 	return ret;
2211 }
2212 
2213 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2214 				  uint64_t len)
2215 {
2216 	return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2217 				ofs, len, NULL) ? 1 : 0;
2218 }
2219 
2220 #ifdef CONFIG_MTD_OTP
2221 
2222 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2223 			u_long data_offset, u_char *buf, u_int size,
2224 			u_long prot_offset, u_int groupno, u_int groupsize);
2225 
2226 static int __xipram
2227 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2228 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2229 {
2230 	struct cfi_private *cfi = map->fldrv_priv;
2231 	int ret;
2232 
2233 	mutex_lock(&chip->mutex);
2234 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2235 	if (ret) {
2236 		mutex_unlock(&chip->mutex);
2237 		return ret;
2238 	}
2239 
2240 	/* let's ensure we're not reading back cached data from array mode */
2241 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2242 
2243 	xip_disable(map, chip, chip->start);
2244 	if (chip->state != FL_JEDEC_QUERY) {
2245 		map_write(map, CMD(0x90), chip->start);
2246 		chip->state = FL_JEDEC_QUERY;
2247 	}
2248 	map_copy_from(map, buf, chip->start + offset, size);
2249 	xip_enable(map, chip, chip->start);
2250 
2251 	/* then ensure we don't keep OTP data in the cache */
2252 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2253 
2254 	put_chip(map, chip, chip->start);
2255 	mutex_unlock(&chip->mutex);
2256 	return 0;
2257 }
2258 
2259 static int
2260 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2261 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2262 {
2263 	int ret;
2264 
2265 	while (size) {
2266 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2267 		int gap = offset - bus_ofs;
2268 		int n = min_t(int, size, map_bankwidth(map)-gap);
2269 		map_word datum = map_word_ff(map);
2270 
2271 		datum = map_word_load_partial(map, datum, buf, gap, n);
2272 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2273 		if (ret)
2274 			return ret;
2275 
2276 		offset += n;
2277 		buf += n;
2278 		size -= n;
2279 	}
2280 
2281 	return 0;
2282 }
2283 
2284 static int
2285 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2286 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2287 {
2288 	struct cfi_private *cfi = map->fldrv_priv;
2289 	map_word datum;
2290 
2291 	/* make sure area matches group boundaries */
2292 	if (size != grpsz)
2293 		return -EXDEV;
2294 
2295 	datum = map_word_ff(map);
2296 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2297 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2298 }
2299 
2300 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2301 				 size_t *retlen, u_char *buf,
2302 				 otp_op_t action, int user_regs)
2303 {
2304 	struct map_info *map = mtd->priv;
2305 	struct cfi_private *cfi = map->fldrv_priv;
2306 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2307 	struct flchip *chip;
2308 	struct cfi_intelext_otpinfo *otp;
2309 	u_long devsize, reg_prot_offset, data_offset;
2310 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2311 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2312 	int ret;
2313 
2314 	*retlen = 0;
2315 
2316 	/* Check that we actually have some OTP registers */
2317 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2318 		return -ENODATA;
2319 
2320 	/* we need real chips here not virtual ones */
2321 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2322 	chip_step = devsize >> cfi->chipshift;
2323 	chip_num = 0;
2324 
2325 	/* Some chips have OTP located in the _top_ partition only.
2326 	   For example: Intel 28F256L18T (T means top-parameter device) */
2327 	if (cfi->mfr == CFI_MFR_INTEL) {
2328 		switch (cfi->id) {
2329 		case 0x880b:
2330 		case 0x880c:
2331 		case 0x880d:
2332 			chip_num = chip_step - 1;
2333 		}
2334 	}
2335 
2336 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2337 		chip = &cfi->chips[chip_num];
2338 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2339 
2340 		/* first OTP region */
2341 		field = 0;
2342 		reg_prot_offset = extp->ProtRegAddr;
2343 		reg_fact_groups = 1;
2344 		reg_fact_size = 1 << extp->FactProtRegSize;
2345 		reg_user_groups = 1;
2346 		reg_user_size = 1 << extp->UserProtRegSize;
2347 
2348 		while (len > 0) {
2349 			/* flash geometry fixup */
2350 			data_offset = reg_prot_offset + 1;
2351 			data_offset *= cfi->interleave * cfi->device_type;
2352 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2353 			reg_fact_size *= cfi->interleave;
2354 			reg_user_size *= cfi->interleave;
2355 
2356 			if (user_regs) {
2357 				groups = reg_user_groups;
2358 				groupsize = reg_user_size;
2359 				/* skip over factory reg area */
2360 				groupno = reg_fact_groups;
2361 				data_offset += reg_fact_groups * reg_fact_size;
2362 			} else {
2363 				groups = reg_fact_groups;
2364 				groupsize = reg_fact_size;
2365 				groupno = 0;
2366 			}
2367 
2368 			while (len > 0 && groups > 0) {
2369 				if (!action) {
2370 					/*
2371 					 * Special case: if action is NULL
2372 					 * we fill buf with otp_info records.
2373 					 */
2374 					struct otp_info *otpinfo;
2375 					map_word lockword;
2376 					len -= sizeof(struct otp_info);
2377 					if (len <= 0)
2378 						return -ENOSPC;
2379 					ret = do_otp_read(map, chip,
2380 							  reg_prot_offset,
2381 							  (u_char *)&lockword,
2382 							  map_bankwidth(map),
2383 							  0, 0,  0);
2384 					if (ret)
2385 						return ret;
2386 					otpinfo = (struct otp_info *)buf;
2387 					otpinfo->start = from;
2388 					otpinfo->length = groupsize;
2389 					otpinfo->locked =
2390 					   !map_word_bitsset(map, lockword,
2391 							     CMD(1 << groupno));
2392 					from += groupsize;
2393 					buf += sizeof(*otpinfo);
2394 					*retlen += sizeof(*otpinfo);
2395 				} else if (from >= groupsize) {
2396 					from -= groupsize;
2397 					data_offset += groupsize;
2398 				} else {
2399 					int size = groupsize;
2400 					data_offset += from;
2401 					size -= from;
2402 					from = 0;
2403 					if (size > len)
2404 						size = len;
2405 					ret = action(map, chip, data_offset,
2406 						     buf, size, reg_prot_offset,
2407 						     groupno, groupsize);
2408 					if (ret < 0)
2409 						return ret;
2410 					buf += size;
2411 					len -= size;
2412 					*retlen += size;
2413 					data_offset += size;
2414 				}
2415 				groupno++;
2416 				groups--;
2417 			}
2418 
2419 			/* next OTP region */
2420 			if (++field == extp->NumProtectionFields)
2421 				break;
2422 			reg_prot_offset = otp->ProtRegAddr;
2423 			reg_fact_groups = otp->FactGroups;
2424 			reg_fact_size = 1 << otp->FactProtRegSize;
2425 			reg_user_groups = otp->UserGroups;
2426 			reg_user_size = 1 << otp->UserProtRegSize;
2427 			otp++;
2428 		}
2429 	}
2430 
2431 	return 0;
2432 }
2433 
2434 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2435 					   size_t len, size_t *retlen,
2436 					    u_char *buf)
2437 {
2438 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2439 				     buf, do_otp_read, 0);
2440 }
2441 
2442 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2443 					   size_t len, size_t *retlen,
2444 					    u_char *buf)
2445 {
2446 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2447 				     buf, do_otp_read, 1);
2448 }
2449 
2450 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2451 					    size_t len, size_t *retlen,
2452 					    const u_char *buf)
2453 {
2454 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2455 				     (u_char *)buf, do_otp_write, 1);
2456 }
2457 
2458 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2459 					   loff_t from, size_t len)
2460 {
2461 	size_t retlen;
2462 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2463 				     NULL, do_otp_lock, 1);
2464 }
2465 
2466 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2467 					   size_t *retlen, struct otp_info *buf)
2468 
2469 {
2470 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2471 				     NULL, 0);
2472 }
2473 
2474 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2475 					   size_t *retlen, struct otp_info *buf)
2476 {
2477 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2478 				     NULL, 1);
2479 }
2480 
2481 #endif
2482 
2483 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2484 {
2485 	struct mtd_erase_region_info *region;
2486 	int block, status, i;
2487 	unsigned long adr;
2488 	size_t len;
2489 
2490 	for (i = 0; i < mtd->numeraseregions; i++) {
2491 		region = &mtd->eraseregions[i];
2492 		if (!region->lockmap)
2493 			continue;
2494 
2495 		for (block = 0; block < region->numblocks; block++){
2496 			len = region->erasesize;
2497 			adr = region->offset + block * len;
2498 
2499 			status = cfi_varsize_frob(mtd,
2500 					do_getlockstatus_oneblock, adr, len, NULL);
2501 			if (status)
2502 				set_bit(block, region->lockmap);
2503 			else
2504 				clear_bit(block, region->lockmap);
2505 		}
2506 	}
2507 }
2508 
2509 static int cfi_intelext_suspend(struct mtd_info *mtd)
2510 {
2511 	struct map_info *map = mtd->priv;
2512 	struct cfi_private *cfi = map->fldrv_priv;
2513 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2514 	int i;
2515 	struct flchip *chip;
2516 	int ret = 0;
2517 
2518 	if ((mtd->flags & MTD_POWERUP_LOCK)
2519 	    && extp && (extp->FeatureSupport & (1 << 5)))
2520 		cfi_intelext_save_locks(mtd);
2521 
2522 	for (i=0; !ret && i<cfi->numchips; i++) {
2523 		chip = &cfi->chips[i];
2524 
2525 		mutex_lock(&chip->mutex);
2526 
2527 		switch (chip->state) {
2528 		case FL_READY:
2529 		case FL_STATUS:
2530 		case FL_CFI_QUERY:
2531 		case FL_JEDEC_QUERY:
2532 			if (chip->oldstate == FL_READY) {
2533 				/* place the chip in a known state before suspend */
2534 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2535 				chip->oldstate = chip->state;
2536 				chip->state = FL_PM_SUSPENDED;
2537 				/* No need to wake_up() on this state change -
2538 				 * as the whole point is that nobody can do anything
2539 				 * with the chip now anyway.
2540 				 */
2541 			} else {
2542 				/* There seems to be an operation pending. We must wait for it. */
2543 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2544 				ret = -EAGAIN;
2545 			}
2546 			break;
2547 		default:
2548 			/* Should we actually wait? Once upon a time these routines weren't
2549 			   allowed to. Or should we return -EAGAIN, because the upper layers
2550 			   ought to have already shut down anything which was using the device
2551 			   anyway? The latter for now. */
2552 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2553 			ret = -EAGAIN;
2554 			break;
2555 		case FL_PM_SUSPENDED:
2556 			break;
2557 		}
2558 		mutex_unlock(&chip->mutex);
2559 	}
2560 
2561 	/* Unlock the chips again */
2562 
2563 	if (ret) {
2564 		for (i--; i >=0; i--) {
2565 			chip = &cfi->chips[i];
2566 
2567 			mutex_lock(&chip->mutex);
2568 
2569 			if (chip->state == FL_PM_SUSPENDED) {
2570 				/* No need to force it into a known state here,
2571 				   because we're returning failure, and it didn't
2572 				   get power cycled */
2573 				chip->state = chip->oldstate;
2574 				chip->oldstate = FL_READY;
2575 				wake_up(&chip->wq);
2576 			}
2577 			mutex_unlock(&chip->mutex);
2578 		}
2579 	}
2580 
2581 	return ret;
2582 }
2583 
2584 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2585 {
2586 	struct mtd_erase_region_info *region;
2587 	int block, i;
2588 	unsigned long adr;
2589 	size_t len;
2590 
2591 	for (i = 0; i < mtd->numeraseregions; i++) {
2592 		region = &mtd->eraseregions[i];
2593 		if (!region->lockmap)
2594 			continue;
2595 
2596 		for_each_clear_bit(block, region->lockmap, region->numblocks) {
2597 			len = region->erasesize;
2598 			adr = region->offset + block * len;
2599 			cfi_intelext_unlock(mtd, adr, len);
2600 		}
2601 	}
2602 }
2603 
2604 static void cfi_intelext_resume(struct mtd_info *mtd)
2605 {
2606 	struct map_info *map = mtd->priv;
2607 	struct cfi_private *cfi = map->fldrv_priv;
2608 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2609 	int i;
2610 	struct flchip *chip;
2611 
2612 	for (i=0; i<cfi->numchips; i++) {
2613 
2614 		chip = &cfi->chips[i];
2615 
2616 		mutex_lock(&chip->mutex);
2617 
2618 		/* Go to known state. Chip may have been power cycled */
2619 		if (chip->state == FL_PM_SUSPENDED) {
2620 			/* Refresh LH28F640BF Partition Config. Register */
2621 			fixup_LH28F640BF(mtd);
2622 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2623 			chip->oldstate = chip->state = FL_READY;
2624 			wake_up(&chip->wq);
2625 		}
2626 
2627 		mutex_unlock(&chip->mutex);
2628 	}
2629 
2630 	if ((mtd->flags & MTD_POWERUP_LOCK)
2631 	    && extp && (extp->FeatureSupport & (1 << 5)))
2632 		cfi_intelext_restore_locks(mtd);
2633 }
2634 
2635 static int cfi_intelext_reset(struct mtd_info *mtd)
2636 {
2637 	struct map_info *map = mtd->priv;
2638 	struct cfi_private *cfi = map->fldrv_priv;
2639 	int i, ret;
2640 
2641 	for (i=0; i < cfi->numchips; i++) {
2642 		struct flchip *chip = &cfi->chips[i];
2643 
2644 		/* force the completion of any ongoing operation
2645 		   and switch to array mode so any bootloader in
2646 		   flash is accessible for soft reboot. */
2647 		mutex_lock(&chip->mutex);
2648 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2649 		if (!ret) {
2650 			map_write(map, CMD(0xff), chip->start);
2651 			chip->state = FL_SHUTDOWN;
2652 			put_chip(map, chip, chip->start);
2653 		}
2654 		mutex_unlock(&chip->mutex);
2655 	}
2656 
2657 	return 0;
2658 }
2659 
2660 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2661 			       void *v)
2662 {
2663 	struct mtd_info *mtd;
2664 
2665 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2666 	cfi_intelext_reset(mtd);
2667 	return NOTIFY_DONE;
2668 }
2669 
2670 static void cfi_intelext_destroy(struct mtd_info *mtd)
2671 {
2672 	struct map_info *map = mtd->priv;
2673 	struct cfi_private *cfi = map->fldrv_priv;
2674 	struct mtd_erase_region_info *region;
2675 	int i;
2676 	cfi_intelext_reset(mtd);
2677 	unregister_reboot_notifier(&mtd->reboot_notifier);
2678 	kfree(cfi->cmdset_priv);
2679 	kfree(cfi->cfiq);
2680 	kfree(cfi->chips[0].priv);
2681 	kfree(cfi);
2682 	for (i = 0; i < mtd->numeraseregions; i++) {
2683 		region = &mtd->eraseregions[i];
2684 		kfree(region->lockmap);
2685 	}
2686 	kfree(mtd->eraseregions);
2687 }
2688 
2689 MODULE_LICENSE("GPL");
2690 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2691 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2692 MODULE_ALIAS("cfi_cmdset_0003");
2693 MODULE_ALIAS("cfi_cmdset_0200");
2694