1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
8  * 	- completely revamped method functions so they are aware and
9  * 	  independent of the flash geometry (buswidth, interleave, etc.)
10  * 	- scalability vs code size is completely set at compile-time
11  * 	  (see include/linux/mtd/cfi.h for selection)
12  *	- optimized write buffer method
13  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
14  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
15  *	  (command set 0x0020)
16  *	- added a writev function
17  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
18  * 	- Plugged memory leak in cfi_staa_writev().
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/cfi.h>
34 #include <linux/mtd/mtd.h>
35 
36 
37 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
38 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
39 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
40 		unsigned long count, loff_t to, size_t *retlen);
41 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
42 static void cfi_staa_sync (struct mtd_info *);
43 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
44 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45 static int cfi_staa_suspend (struct mtd_info *);
46 static void cfi_staa_resume (struct mtd_info *);
47 
48 static void cfi_staa_destroy(struct mtd_info *);
49 
50 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
51 
52 static struct mtd_info *cfi_staa_setup (struct map_info *);
53 
54 static struct mtd_chip_driver cfi_staa_chipdrv = {
55 	.probe		= NULL, /* Not usable directly */
56 	.destroy	= cfi_staa_destroy,
57 	.name		= "cfi_cmdset_0020",
58 	.module		= THIS_MODULE
59 };
60 
61 /* #define DEBUG_LOCK_BITS */
62 //#define DEBUG_CFI_FEATURES
63 
64 #ifdef DEBUG_CFI_FEATURES
65 static void cfi_tell_features(struct cfi_pri_intelext *extp)
66 {
67         int i;
68         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
69 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
70 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
71 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
72 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
73 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
74 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
75 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
76 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
77 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
78 	for (i=9; i<32; i++) {
79 		if (extp->FeatureSupport & (1<<i))
80 			printk("     - Unknown Bit %X:      supported\n", i);
81 	}
82 
83 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
84 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
85 	for (i=1; i<8; i++) {
86 		if (extp->SuspendCmdSupport & (1<<i))
87 			printk("     - Unknown Bit %X:               supported\n", i);
88 	}
89 
90 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
91 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
92 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
93 	for (i=2; i<16; i++) {
94 		if (extp->BlkStatusRegMask & (1<<i))
95 			printk("     - Unknown Bit %X Active: yes\n",i);
96 	}
97 
98 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
99 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
100 	if (extp->VppOptimal)
101 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
103 }
104 #endif
105 
106 /* This routine is made available to other mtd code via
107  * inter_module_register.  It must only be accessed through
108  * inter_module_get which will bump the use count of this module.  The
109  * addresses passed back in cfi are valid as long as the use count of
110  * this module is non-zero, i.e. between inter_module_get and
111  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
112  */
113 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
114 {
115 	struct cfi_private *cfi = map->fldrv_priv;
116 	int i;
117 
118 	if (cfi->cfi_mode) {
119 		/*
120 		 * It's a real CFI chip, not one for which the probe
121 		 * routine faked a CFI structure. So we read the feature
122 		 * table from it.
123 		 */
124 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
125 		struct cfi_pri_intelext *extp;
126 
127 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
128 		if (!extp)
129 			return NULL;
130 
131 		if (extp->MajorVersion != '1' ||
132 		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
133 			printk(KERN_ERR "  Unknown ST Microelectronics"
134 			       " Extended Query version %c.%c.\n",
135 			       extp->MajorVersion, extp->MinorVersion);
136 			kfree(extp);
137 			return NULL;
138 		}
139 
140 		/* Do some byteswapping if necessary */
141 		extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
142 		extp->BlkStatusRegMask = cfi32_to_cpu(map,
143 						extp->BlkStatusRegMask);
144 
145 #ifdef DEBUG_CFI_FEATURES
146 		/* Tell the user about it in lots of lovely detail */
147 		cfi_tell_features(extp);
148 #endif
149 
150 		/* Install our own private info structure */
151 		cfi->cmdset_priv = extp;
152 	}
153 
154 	for (i=0; i< cfi->numchips; i++) {
155 		cfi->chips[i].word_write_time = 128;
156 		cfi->chips[i].buffer_write_time = 128;
157 		cfi->chips[i].erase_time = 1024;
158 		cfi->chips[i].ref_point_counter = 0;
159 		init_waitqueue_head(&(cfi->chips[i].wq));
160 	}
161 
162 	return cfi_staa_setup(map);
163 }
164 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165 
166 static struct mtd_info *cfi_staa_setup(struct map_info *map)
167 {
168 	struct cfi_private *cfi = map->fldrv_priv;
169 	struct mtd_info *mtd;
170 	unsigned long offset = 0;
171 	int i,j;
172 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173 
174 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
175 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
176 
177 	if (!mtd) {
178 		kfree(cfi->cmdset_priv);
179 		return NULL;
180 	}
181 
182 	mtd->priv = map;
183 	mtd->type = MTD_NORFLASH;
184 	mtd->size = devsize * cfi->numchips;
185 
186 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
187 	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
188 					  sizeof(struct mtd_erase_region_info),
189 					  GFP_KERNEL);
190 	if (!mtd->eraseregions) {
191 		kfree(cfi->cmdset_priv);
192 		kfree(mtd);
193 		return NULL;
194 	}
195 
196 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
197 		unsigned long ernum, ersize;
198 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
199 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
200 
201 		if (mtd->erasesize < ersize) {
202 			mtd->erasesize = ersize;
203 		}
204 		for (j=0; j<cfi->numchips; j++) {
205 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
206 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
207 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
208 		}
209 		offset += (ersize * ernum);
210 	}
211 
212 	if (offset != devsize) {
213 		/* Argh */
214 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
215 		kfree(mtd->eraseregions);
216 		kfree(cfi->cmdset_priv);
217 		kfree(mtd);
218 		return NULL;
219 	}
220 
221 	for (i=0; i<mtd->numeraseregions;i++){
222 		printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
223 		       i, (unsigned long long)mtd->eraseregions[i].offset,
224 		       mtd->eraseregions[i].erasesize,
225 		       mtd->eraseregions[i].numblocks);
226 	}
227 
228 	/* Also select the correct geometry setup too */
229 	mtd->_erase = cfi_staa_erase_varsize;
230 	mtd->_read = cfi_staa_read;
231 	mtd->_write = cfi_staa_write_buffers;
232 	mtd->_writev = cfi_staa_writev;
233 	mtd->_sync = cfi_staa_sync;
234 	mtd->_lock = cfi_staa_lock;
235 	mtd->_unlock = cfi_staa_unlock;
236 	mtd->_suspend = cfi_staa_suspend;
237 	mtd->_resume = cfi_staa_resume;
238 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
239 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
240 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
241 	map->fldrv = &cfi_staa_chipdrv;
242 	__module_get(THIS_MODULE);
243 	mtd->name = map->name;
244 	return mtd;
245 }
246 
247 
248 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
249 {
250 	map_word status, status_OK;
251 	unsigned long timeo;
252 	DECLARE_WAITQUEUE(wait, current);
253 	int suspended = 0;
254 	unsigned long cmd_addr;
255 	struct cfi_private *cfi = map->fldrv_priv;
256 
257 	adr += chip->start;
258 
259 	/* Ensure cmd read/writes are aligned. */
260 	cmd_addr = adr & ~(map_bankwidth(map)-1);
261 
262 	/* Let's determine this according to the interleave only once */
263 	status_OK = CMD(0x80);
264 
265 	timeo = jiffies + HZ;
266  retry:
267 	mutex_lock(&chip->mutex);
268 
269 	/* Check that the chip's ready to talk to us.
270 	 * If it's in FL_ERASING state, suspend it and make it talk now.
271 	 */
272 	switch (chip->state) {
273 	case FL_ERASING:
274 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
275 			goto sleep; /* We don't support erase suspend */
276 
277 		map_write (map, CMD(0xb0), cmd_addr);
278 		/* If the flash has finished erasing, then 'erase suspend'
279 		 * appears to make some (28F320) flash devices switch to
280 		 * 'read' mode.  Make sure that we switch to 'read status'
281 		 * mode so we get the right data. --rmk
282 		 */
283 		map_write(map, CMD(0x70), cmd_addr);
284 		chip->oldstate = FL_ERASING;
285 		chip->state = FL_ERASE_SUSPENDING;
286 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
287 		for (;;) {
288 			status = map_read(map, cmd_addr);
289 			if (map_word_andequal(map, status, status_OK, status_OK))
290 				break;
291 
292 			if (time_after(jiffies, timeo)) {
293 				/* Urgh */
294 				map_write(map, CMD(0xd0), cmd_addr);
295 				/* make sure we're in 'read status' mode */
296 				map_write(map, CMD(0x70), cmd_addr);
297 				chip->state = FL_ERASING;
298 				wake_up(&chip->wq);
299 				mutex_unlock(&chip->mutex);
300 				printk(KERN_ERR "Chip not ready after erase "
301 				       "suspended: status = 0x%lx\n", status.x[0]);
302 				return -EIO;
303 			}
304 
305 			mutex_unlock(&chip->mutex);
306 			cfi_udelay(1);
307 			mutex_lock(&chip->mutex);
308 		}
309 
310 		suspended = 1;
311 		map_write(map, CMD(0xff), cmd_addr);
312 		chip->state = FL_READY;
313 		break;
314 
315 #if 0
316 	case FL_WRITING:
317 		/* Not quite yet */
318 #endif
319 
320 	case FL_READY:
321 		break;
322 
323 	case FL_CFI_QUERY:
324 	case FL_JEDEC_QUERY:
325 		map_write(map, CMD(0x70), cmd_addr);
326 		chip->state = FL_STATUS;
327 		/* Fall through */
328 
329 	case FL_STATUS:
330 		status = map_read(map, cmd_addr);
331 		if (map_word_andequal(map, status, status_OK, status_OK)) {
332 			map_write(map, CMD(0xff), cmd_addr);
333 			chip->state = FL_READY;
334 			break;
335 		}
336 
337 		/* Urgh. Chip not yet ready to talk to us. */
338 		if (time_after(jiffies, timeo)) {
339 			mutex_unlock(&chip->mutex);
340 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
341 			return -EIO;
342 		}
343 
344 		/* Latency issues. Drop the lock, wait a while and retry */
345 		mutex_unlock(&chip->mutex);
346 		cfi_udelay(1);
347 		goto retry;
348 
349 	default:
350 	sleep:
351 		/* Stick ourselves on a wait queue to be woken when
352 		   someone changes the status */
353 		set_current_state(TASK_UNINTERRUPTIBLE);
354 		add_wait_queue(&chip->wq, &wait);
355 		mutex_unlock(&chip->mutex);
356 		schedule();
357 		remove_wait_queue(&chip->wq, &wait);
358 		timeo = jiffies + HZ;
359 		goto retry;
360 	}
361 
362 	map_copy_from(map, buf, adr, len);
363 
364 	if (suspended) {
365 		chip->state = chip->oldstate;
366 		/* What if one interleaved chip has finished and the
367 		   other hasn't? The old code would leave the finished
368 		   one in READY mode. That's bad, and caused -EROFS
369 		   errors to be returned from do_erase_oneblock because
370 		   that's the only bit it checked for at the time.
371 		   As the state machine appears to explicitly allow
372 		   sending the 0x70 (Read Status) command to an erasing
373 		   chip and expecting it to be ignored, that's what we
374 		   do. */
375 		map_write(map, CMD(0xd0), cmd_addr);
376 		map_write(map, CMD(0x70), cmd_addr);
377 	}
378 
379 	wake_up(&chip->wq);
380 	mutex_unlock(&chip->mutex);
381 	return 0;
382 }
383 
384 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
385 {
386 	struct map_info *map = mtd->priv;
387 	struct cfi_private *cfi = map->fldrv_priv;
388 	unsigned long ofs;
389 	int chipnum;
390 	int ret = 0;
391 
392 	/* ofs: offset within the first chip that the first read should start */
393 	chipnum = (from >> cfi->chipshift);
394 	ofs = from - (chipnum <<  cfi->chipshift);
395 
396 	while (len) {
397 		unsigned long thislen;
398 
399 		if (chipnum >= cfi->numchips)
400 			break;
401 
402 		if ((len + ofs -1) >> cfi->chipshift)
403 			thislen = (1<<cfi->chipshift) - ofs;
404 		else
405 			thislen = len;
406 
407 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
408 		if (ret)
409 			break;
410 
411 		*retlen += thislen;
412 		len -= thislen;
413 		buf += thislen;
414 
415 		ofs = 0;
416 		chipnum++;
417 	}
418 	return ret;
419 }
420 
421 static int do_write_buffer(struct map_info *map, struct flchip *chip,
422 				  unsigned long adr, const u_char *buf, int len)
423 {
424 	struct cfi_private *cfi = map->fldrv_priv;
425 	map_word status, status_OK;
426 	unsigned long cmd_adr, timeo;
427 	DECLARE_WAITQUEUE(wait, current);
428 	int wbufsize, z;
429 
430         /* M58LW064A requires bus alignment for buffer wriets -- saw */
431         if (adr & (map_bankwidth(map)-1))
432             return -EINVAL;
433 
434         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
435         adr += chip->start;
436 	cmd_adr = adr & ~(wbufsize-1);
437 
438 	/* Let's determine this according to the interleave only once */
439         status_OK = CMD(0x80);
440 
441 	timeo = jiffies + HZ;
442  retry:
443 
444 #ifdef DEBUG_CFI_FEATURES
445        printk("%s: chip->state[%d]\n", __func__, chip->state);
446 #endif
447 	mutex_lock(&chip->mutex);
448 
449 	/* Check that the chip's ready to talk to us.
450 	 * Later, we can actually think about interrupting it
451 	 * if it's in FL_ERASING state.
452 	 * Not just yet, though.
453 	 */
454 	switch (chip->state) {
455 	case FL_READY:
456 		break;
457 
458 	case FL_CFI_QUERY:
459 	case FL_JEDEC_QUERY:
460 		map_write(map, CMD(0x70), cmd_adr);
461                 chip->state = FL_STATUS;
462 #ifdef DEBUG_CFI_FEATURES
463 	printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
464 #endif
465 		/* Fall through */
466 
467 	case FL_STATUS:
468 		status = map_read(map, cmd_adr);
469 		if (map_word_andequal(map, status, status_OK, status_OK))
470 			break;
471 		/* Urgh. Chip not yet ready to talk to us. */
472 		if (time_after(jiffies, timeo)) {
473 			mutex_unlock(&chip->mutex);
474                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
475                                status.x[0], map_read(map, cmd_adr).x[0]);
476 			return -EIO;
477 		}
478 
479 		/* Latency issues. Drop the lock, wait a while and retry */
480 		mutex_unlock(&chip->mutex);
481 		cfi_udelay(1);
482 		goto retry;
483 
484 	default:
485 		/* Stick ourselves on a wait queue to be woken when
486 		   someone changes the status */
487 		set_current_state(TASK_UNINTERRUPTIBLE);
488 		add_wait_queue(&chip->wq, &wait);
489 		mutex_unlock(&chip->mutex);
490 		schedule();
491 		remove_wait_queue(&chip->wq, &wait);
492 		timeo = jiffies + HZ;
493 		goto retry;
494 	}
495 
496 	ENABLE_VPP(map);
497 	map_write(map, CMD(0xe8), cmd_adr);
498 	chip->state = FL_WRITING_TO_BUFFER;
499 
500 	z = 0;
501 	for (;;) {
502 		status = map_read(map, cmd_adr);
503 		if (map_word_andequal(map, status, status_OK, status_OK))
504 			break;
505 
506 		mutex_unlock(&chip->mutex);
507 		cfi_udelay(1);
508 		mutex_lock(&chip->mutex);
509 
510 		if (++z > 100) {
511 			/* Argh. Not ready for write to buffer */
512 			DISABLE_VPP(map);
513                         map_write(map, CMD(0x70), cmd_adr);
514 			chip->state = FL_STATUS;
515 			mutex_unlock(&chip->mutex);
516 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
517 			return -EIO;
518 		}
519 	}
520 
521 	/* Write length of data to come */
522 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
523 
524 	/* Write data */
525 	for (z = 0; z < len;
526 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
527 		map_word d;
528 		d = map_word_load(map, buf);
529 		map_write(map, d, adr+z);
530 	}
531 	/* GO GO GO */
532 	map_write(map, CMD(0xd0), cmd_adr);
533 	chip->state = FL_WRITING;
534 
535 	mutex_unlock(&chip->mutex);
536 	cfi_udelay(chip->buffer_write_time);
537 	mutex_lock(&chip->mutex);
538 
539 	timeo = jiffies + (HZ/2);
540 	z = 0;
541 	for (;;) {
542 		if (chip->state != FL_WRITING) {
543 			/* Someone's suspended the write. Sleep */
544 			set_current_state(TASK_UNINTERRUPTIBLE);
545 			add_wait_queue(&chip->wq, &wait);
546 			mutex_unlock(&chip->mutex);
547 			schedule();
548 			remove_wait_queue(&chip->wq, &wait);
549 			timeo = jiffies + (HZ / 2); /* FIXME */
550 			mutex_lock(&chip->mutex);
551 			continue;
552 		}
553 
554 		status = map_read(map, cmd_adr);
555 		if (map_word_andequal(map, status, status_OK, status_OK))
556 			break;
557 
558 		/* OK Still waiting */
559 		if (time_after(jiffies, timeo)) {
560                         /* clear status */
561                         map_write(map, CMD(0x50), cmd_adr);
562                         /* put back into read status register mode */
563                         map_write(map, CMD(0x70), adr);
564 			chip->state = FL_STATUS;
565 			DISABLE_VPP(map);
566 			mutex_unlock(&chip->mutex);
567 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
568 			return -EIO;
569 		}
570 
571 		/* Latency issues. Drop the lock, wait a while and retry */
572 		mutex_unlock(&chip->mutex);
573 		cfi_udelay(1);
574 		z++;
575 		mutex_lock(&chip->mutex);
576 	}
577 	if (!z) {
578 		chip->buffer_write_time--;
579 		if (!chip->buffer_write_time)
580 			chip->buffer_write_time++;
581 	}
582 	if (z > 1)
583 		chip->buffer_write_time++;
584 
585 	/* Done and happy. */
586 	DISABLE_VPP(map);
587 	chip->state = FL_STATUS;
588 
589         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
590         if (map_word_bitsset(map, status, CMD(0x3a))) {
591 #ifdef DEBUG_CFI_FEATURES
592 		printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
593 #endif
594 		/* clear status */
595 		map_write(map, CMD(0x50), cmd_adr);
596 		/* put back into read status register mode */
597 		map_write(map, CMD(0x70), adr);
598 		wake_up(&chip->wq);
599 		mutex_unlock(&chip->mutex);
600 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
601 	}
602 	wake_up(&chip->wq);
603 	mutex_unlock(&chip->mutex);
604 
605         return 0;
606 }
607 
608 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
609 				       size_t len, size_t *retlen, const u_char *buf)
610 {
611 	struct map_info *map = mtd->priv;
612 	struct cfi_private *cfi = map->fldrv_priv;
613 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
614 	int ret;
615 	int chipnum;
616 	unsigned long ofs;
617 
618 	chipnum = to >> cfi->chipshift;
619 	ofs = to  - (chipnum << cfi->chipshift);
620 
621 #ifdef DEBUG_CFI_FEATURES
622 	printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
623 	printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
624 	printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
625 #endif
626 
627         /* Write buffer is worth it only if more than one word to write... */
628         while (len > 0) {
629 		/* We must not cross write block boundaries */
630 		int size = wbufsize - (ofs & (wbufsize-1));
631 
632                 if (size > len)
633                     size = len;
634 
635                 ret = do_write_buffer(map, &cfi->chips[chipnum],
636 				      ofs, buf, size);
637 		if (ret)
638 			return ret;
639 
640 		ofs += size;
641 		buf += size;
642 		(*retlen) += size;
643 		len -= size;
644 
645 		if (ofs >> cfi->chipshift) {
646 			chipnum ++;
647 			ofs = 0;
648 			if (chipnum == cfi->numchips)
649 				return 0;
650 		}
651 	}
652 
653 	return 0;
654 }
655 
656 /*
657  * Writev for ECC-Flashes is a little more complicated. We need to maintain
658  * a small buffer for this.
659  * XXX: If the buffer size is not a multiple of 2, this will break
660  */
661 #define ECCBUF_SIZE (mtd->writesize)
662 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
663 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
664 static int
665 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
666 		unsigned long count, loff_t to, size_t *retlen)
667 {
668 	unsigned long i;
669 	size_t	 totlen = 0, thislen;
670 	int	 ret = 0;
671 	size_t	 buflen = 0;
672 	char *buffer;
673 
674 	if (!ECCBUF_SIZE) {
675 		/* We should fall back to a general writev implementation.
676 		 * Until that is written, just break.
677 		 */
678 		return -EIO;
679 	}
680 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
681 	if (!buffer)
682 		return -ENOMEM;
683 
684 	for (i=0; i<count; i++) {
685 		size_t elem_len = vecs[i].iov_len;
686 		void *elem_base = vecs[i].iov_base;
687 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
688 			continue;
689 		if (buflen) { /* cut off head */
690 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
691 				memcpy(buffer+buflen, elem_base, elem_len);
692 				buflen += elem_len;
693 				continue;
694 			}
695 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
696 			ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
697 					buffer);
698 			totlen += thislen;
699 			if (ret || thislen != ECCBUF_SIZE)
700 				goto write_error;
701 			elem_len -= thislen-buflen;
702 			elem_base += thislen-buflen;
703 			to += ECCBUF_SIZE;
704 		}
705 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
706 			ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
707 					&thislen, elem_base);
708 			totlen += thislen;
709 			if (ret || thislen != ECCBUF_DIV(elem_len))
710 				goto write_error;
711 			to += thislen;
712 		}
713 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
714 		if (buflen) {
715 			memset(buffer, 0xff, ECCBUF_SIZE);
716 			memcpy(buffer, elem_base + thislen, buflen);
717 		}
718 	}
719 	if (buflen) { /* flush last page, even if not full */
720 		/* This is sometimes intended behaviour, really */
721 		ret = mtd_write(mtd, to, buflen, &thislen, buffer);
722 		totlen += thislen;
723 		if (ret || thislen != ECCBUF_SIZE)
724 			goto write_error;
725 	}
726 write_error:
727 	if (retlen)
728 		*retlen = totlen;
729 	kfree(buffer);
730 	return ret;
731 }
732 
733 
734 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
735 {
736 	struct cfi_private *cfi = map->fldrv_priv;
737 	map_word status, status_OK;
738 	unsigned long timeo;
739 	int retries = 3;
740 	DECLARE_WAITQUEUE(wait, current);
741 	int ret = 0;
742 
743 	adr += chip->start;
744 
745 	/* Let's determine this according to the interleave only once */
746 	status_OK = CMD(0x80);
747 
748 	timeo = jiffies + HZ;
749 retry:
750 	mutex_lock(&chip->mutex);
751 
752 	/* Check that the chip's ready to talk to us. */
753 	switch (chip->state) {
754 	case FL_CFI_QUERY:
755 	case FL_JEDEC_QUERY:
756 	case FL_READY:
757 		map_write(map, CMD(0x70), adr);
758 		chip->state = FL_STATUS;
759 		/* Fall through */
760 
761 	case FL_STATUS:
762 		status = map_read(map, adr);
763 		if (map_word_andequal(map, status, status_OK, status_OK))
764 			break;
765 
766 		/* Urgh. Chip not yet ready to talk to us. */
767 		if (time_after(jiffies, timeo)) {
768 			mutex_unlock(&chip->mutex);
769 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
770 			return -EIO;
771 		}
772 
773 		/* Latency issues. Drop the lock, wait a while and retry */
774 		mutex_unlock(&chip->mutex);
775 		cfi_udelay(1);
776 		goto retry;
777 
778 	default:
779 		/* Stick ourselves on a wait queue to be woken when
780 		   someone changes the status */
781 		set_current_state(TASK_UNINTERRUPTIBLE);
782 		add_wait_queue(&chip->wq, &wait);
783 		mutex_unlock(&chip->mutex);
784 		schedule();
785 		remove_wait_queue(&chip->wq, &wait);
786 		timeo = jiffies + HZ;
787 		goto retry;
788 	}
789 
790 	ENABLE_VPP(map);
791 	/* Clear the status register first */
792 	map_write(map, CMD(0x50), adr);
793 
794 	/* Now erase */
795 	map_write(map, CMD(0x20), adr);
796 	map_write(map, CMD(0xD0), adr);
797 	chip->state = FL_ERASING;
798 
799 	mutex_unlock(&chip->mutex);
800 	msleep(1000);
801 	mutex_lock(&chip->mutex);
802 
803 	/* FIXME. Use a timer to check this, and return immediately. */
804 	/* Once the state machine's known to be working I'll do that */
805 
806 	timeo = jiffies + (HZ*20);
807 	for (;;) {
808 		if (chip->state != FL_ERASING) {
809 			/* Someone's suspended the erase. Sleep */
810 			set_current_state(TASK_UNINTERRUPTIBLE);
811 			add_wait_queue(&chip->wq, &wait);
812 			mutex_unlock(&chip->mutex);
813 			schedule();
814 			remove_wait_queue(&chip->wq, &wait);
815 			timeo = jiffies + (HZ*20); /* FIXME */
816 			mutex_lock(&chip->mutex);
817 			continue;
818 		}
819 
820 		status = map_read(map, adr);
821 		if (map_word_andequal(map, status, status_OK, status_OK))
822 			break;
823 
824 		/* OK Still waiting */
825 		if (time_after(jiffies, timeo)) {
826 			map_write(map, CMD(0x70), adr);
827 			chip->state = FL_STATUS;
828 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
829 			DISABLE_VPP(map);
830 			mutex_unlock(&chip->mutex);
831 			return -EIO;
832 		}
833 
834 		/* Latency issues. Drop the lock, wait a while and retry */
835 		mutex_unlock(&chip->mutex);
836 		cfi_udelay(1);
837 		mutex_lock(&chip->mutex);
838 	}
839 
840 	DISABLE_VPP(map);
841 	ret = 0;
842 
843 	/* We've broken this before. It doesn't hurt to be safe */
844 	map_write(map, CMD(0x70), adr);
845 	chip->state = FL_STATUS;
846 	status = map_read(map, adr);
847 
848 	/* check for lock bit */
849 	if (map_word_bitsset(map, status, CMD(0x3a))) {
850 		unsigned char chipstatus = status.x[0];
851 		if (!map_word_equal(map, status, CMD(chipstatus))) {
852 			int i, w;
853 			for (w=0; w<map_words(map); w++) {
854 				for (i = 0; i<cfi_interleave(cfi); i++) {
855 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
856 				}
857 			}
858 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
859 			       status.x[0], chipstatus);
860 		}
861 		/* Reset the error bits */
862 		map_write(map, CMD(0x50), adr);
863 		map_write(map, CMD(0x70), adr);
864 
865 		if ((chipstatus & 0x30) == 0x30) {
866 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
867 			ret = -EIO;
868 		} else if (chipstatus & 0x02) {
869 			/* Protection bit set */
870 			ret = -EROFS;
871 		} else if (chipstatus & 0x8) {
872 			/* Voltage */
873 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
874 			ret = -EIO;
875 		} else if (chipstatus & 0x20) {
876 			if (retries--) {
877 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
878 				timeo = jiffies + HZ;
879 				chip->state = FL_STATUS;
880 				mutex_unlock(&chip->mutex);
881 				goto retry;
882 			}
883 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
884 			ret = -EIO;
885 		}
886 	}
887 
888 	wake_up(&chip->wq);
889 	mutex_unlock(&chip->mutex);
890 	return ret;
891 }
892 
893 static int cfi_staa_erase_varsize(struct mtd_info *mtd,
894 				  struct erase_info *instr)
895 {	struct map_info *map = mtd->priv;
896 	struct cfi_private *cfi = map->fldrv_priv;
897 	unsigned long adr, len;
898 	int chipnum, ret;
899 	int i, first;
900 	struct mtd_erase_region_info *regions = mtd->eraseregions;
901 
902 	/* Check that both start and end of the requested erase are
903 	 * aligned with the erasesize at the appropriate addresses.
904 	 */
905 
906 	i = 0;
907 
908 	/* Skip all erase regions which are ended before the start of
909 	   the requested erase. Actually, to save on the calculations,
910 	   we skip to the first erase region which starts after the
911 	   start of the requested erase, and then go back one.
912 	*/
913 
914 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
915 	       i++;
916 	i--;
917 
918 	/* OK, now i is pointing at the erase region in which this
919 	   erase request starts. Check the start of the requested
920 	   erase range is aligned with the erase size which is in
921 	   effect here.
922 	*/
923 
924 	if (instr->addr & (regions[i].erasesize-1))
925 		return -EINVAL;
926 
927 	/* Remember the erase region we start on */
928 	first = i;
929 
930 	/* Next, check that the end of the requested erase is aligned
931 	 * with the erase region at that address.
932 	 */
933 
934 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
935 		i++;
936 
937 	/* As before, drop back one to point at the region in which
938 	   the address actually falls
939 	*/
940 	i--;
941 
942 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
943 		return -EINVAL;
944 
945 	chipnum = instr->addr >> cfi->chipshift;
946 	adr = instr->addr - (chipnum << cfi->chipshift);
947 	len = instr->len;
948 
949 	i=first;
950 
951 	while(len) {
952 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
953 
954 		if (ret)
955 			return ret;
956 
957 		adr += regions[i].erasesize;
958 		len -= regions[i].erasesize;
959 
960 		if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
961 			i++;
962 
963 		if (adr >> cfi->chipshift) {
964 			adr = 0;
965 			chipnum++;
966 
967 			if (chipnum >= cfi->numchips)
968 				break;
969 		}
970 	}
971 
972 	return 0;
973 }
974 
975 static void cfi_staa_sync (struct mtd_info *mtd)
976 {
977 	struct map_info *map = mtd->priv;
978 	struct cfi_private *cfi = map->fldrv_priv;
979 	int i;
980 	struct flchip *chip;
981 	int ret = 0;
982 	DECLARE_WAITQUEUE(wait, current);
983 
984 	for (i=0; !ret && i<cfi->numchips; i++) {
985 		chip = &cfi->chips[i];
986 
987 	retry:
988 		mutex_lock(&chip->mutex);
989 
990 		switch(chip->state) {
991 		case FL_READY:
992 		case FL_STATUS:
993 		case FL_CFI_QUERY:
994 		case FL_JEDEC_QUERY:
995 			chip->oldstate = chip->state;
996 			chip->state = FL_SYNCING;
997 			/* No need to wake_up() on this state change -
998 			 * as the whole point is that nobody can do anything
999 			 * with the chip now anyway.
1000 			 */
1001 			/* Fall through */
1002 		case FL_SYNCING:
1003 			mutex_unlock(&chip->mutex);
1004 			break;
1005 
1006 		default:
1007 			/* Not an idle state */
1008 			set_current_state(TASK_UNINTERRUPTIBLE);
1009 			add_wait_queue(&chip->wq, &wait);
1010 
1011 			mutex_unlock(&chip->mutex);
1012 			schedule();
1013 		        remove_wait_queue(&chip->wq, &wait);
1014 
1015 			goto retry;
1016 		}
1017 	}
1018 
1019 	/* Unlock the chips again */
1020 
1021 	for (i--; i >=0; i--) {
1022 		chip = &cfi->chips[i];
1023 
1024 		mutex_lock(&chip->mutex);
1025 
1026 		if (chip->state == FL_SYNCING) {
1027 			chip->state = chip->oldstate;
1028 			wake_up(&chip->wq);
1029 		}
1030 		mutex_unlock(&chip->mutex);
1031 	}
1032 }
1033 
1034 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1035 {
1036 	struct cfi_private *cfi = map->fldrv_priv;
1037 	map_word status, status_OK;
1038 	unsigned long timeo = jiffies + HZ;
1039 	DECLARE_WAITQUEUE(wait, current);
1040 
1041 	adr += chip->start;
1042 
1043 	/* Let's determine this according to the interleave only once */
1044 	status_OK = CMD(0x80);
1045 
1046 	timeo = jiffies + HZ;
1047 retry:
1048 	mutex_lock(&chip->mutex);
1049 
1050 	/* Check that the chip's ready to talk to us. */
1051 	switch (chip->state) {
1052 	case FL_CFI_QUERY:
1053 	case FL_JEDEC_QUERY:
1054 	case FL_READY:
1055 		map_write(map, CMD(0x70), adr);
1056 		chip->state = FL_STATUS;
1057 		/* Fall through */
1058 
1059 	case FL_STATUS:
1060 		status = map_read(map, adr);
1061 		if (map_word_andequal(map, status, status_OK, status_OK))
1062 			break;
1063 
1064 		/* Urgh. Chip not yet ready to talk to us. */
1065 		if (time_after(jiffies, timeo)) {
1066 			mutex_unlock(&chip->mutex);
1067 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1068 			return -EIO;
1069 		}
1070 
1071 		/* Latency issues. Drop the lock, wait a while and retry */
1072 		mutex_unlock(&chip->mutex);
1073 		cfi_udelay(1);
1074 		goto retry;
1075 
1076 	default:
1077 		/* Stick ourselves on a wait queue to be woken when
1078 		   someone changes the status */
1079 		set_current_state(TASK_UNINTERRUPTIBLE);
1080 		add_wait_queue(&chip->wq, &wait);
1081 		mutex_unlock(&chip->mutex);
1082 		schedule();
1083 		remove_wait_queue(&chip->wq, &wait);
1084 		timeo = jiffies + HZ;
1085 		goto retry;
1086 	}
1087 
1088 	ENABLE_VPP(map);
1089 	map_write(map, CMD(0x60), adr);
1090 	map_write(map, CMD(0x01), adr);
1091 	chip->state = FL_LOCKING;
1092 
1093 	mutex_unlock(&chip->mutex);
1094 	msleep(1000);
1095 	mutex_lock(&chip->mutex);
1096 
1097 	/* FIXME. Use a timer to check this, and return immediately. */
1098 	/* Once the state machine's known to be working I'll do that */
1099 
1100 	timeo = jiffies + (HZ*2);
1101 	for (;;) {
1102 
1103 		status = map_read(map, adr);
1104 		if (map_word_andequal(map, status, status_OK, status_OK))
1105 			break;
1106 
1107 		/* OK Still waiting */
1108 		if (time_after(jiffies, timeo)) {
1109 			map_write(map, CMD(0x70), adr);
1110 			chip->state = FL_STATUS;
1111 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1112 			DISABLE_VPP(map);
1113 			mutex_unlock(&chip->mutex);
1114 			return -EIO;
1115 		}
1116 
1117 		/* Latency issues. Drop the lock, wait a while and retry */
1118 		mutex_unlock(&chip->mutex);
1119 		cfi_udelay(1);
1120 		mutex_lock(&chip->mutex);
1121 	}
1122 
1123 	/* Done and happy. */
1124 	chip->state = FL_STATUS;
1125 	DISABLE_VPP(map);
1126 	wake_up(&chip->wq);
1127 	mutex_unlock(&chip->mutex);
1128 	return 0;
1129 }
1130 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1131 {
1132 	struct map_info *map = mtd->priv;
1133 	struct cfi_private *cfi = map->fldrv_priv;
1134 	unsigned long adr;
1135 	int chipnum, ret;
1136 #ifdef DEBUG_LOCK_BITS
1137 	int ofs_factor = cfi->interleave * cfi->device_type;
1138 #endif
1139 
1140 	if (ofs & (mtd->erasesize - 1))
1141 		return -EINVAL;
1142 
1143 	if (len & (mtd->erasesize -1))
1144 		return -EINVAL;
1145 
1146 	chipnum = ofs >> cfi->chipshift;
1147 	adr = ofs - (chipnum << cfi->chipshift);
1148 
1149 	while(len) {
1150 
1151 #ifdef DEBUG_LOCK_BITS
1152 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1153 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1154 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1155 #endif
1156 
1157 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1158 
1159 #ifdef DEBUG_LOCK_BITS
1160 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1161 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1162 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1163 #endif
1164 
1165 		if (ret)
1166 			return ret;
1167 
1168 		adr += mtd->erasesize;
1169 		len -= mtd->erasesize;
1170 
1171 		if (adr >> cfi->chipshift) {
1172 			adr = 0;
1173 			chipnum++;
1174 
1175 			if (chipnum >= cfi->numchips)
1176 				break;
1177 		}
1178 	}
1179 	return 0;
1180 }
1181 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1182 {
1183 	struct cfi_private *cfi = map->fldrv_priv;
1184 	map_word status, status_OK;
1185 	unsigned long timeo = jiffies + HZ;
1186 	DECLARE_WAITQUEUE(wait, current);
1187 
1188 	adr += chip->start;
1189 
1190 	/* Let's determine this according to the interleave only once */
1191 	status_OK = CMD(0x80);
1192 
1193 	timeo = jiffies + HZ;
1194 retry:
1195 	mutex_lock(&chip->mutex);
1196 
1197 	/* Check that the chip's ready to talk to us. */
1198 	switch (chip->state) {
1199 	case FL_CFI_QUERY:
1200 	case FL_JEDEC_QUERY:
1201 	case FL_READY:
1202 		map_write(map, CMD(0x70), adr);
1203 		chip->state = FL_STATUS;
1204 		/* Fall through */
1205 
1206 	case FL_STATUS:
1207 		status = map_read(map, adr);
1208 		if (map_word_andequal(map, status, status_OK, status_OK))
1209 			break;
1210 
1211 		/* Urgh. Chip not yet ready to talk to us. */
1212 		if (time_after(jiffies, timeo)) {
1213 			mutex_unlock(&chip->mutex);
1214 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1215 			return -EIO;
1216 		}
1217 
1218 		/* Latency issues. Drop the lock, wait a while and retry */
1219 		mutex_unlock(&chip->mutex);
1220 		cfi_udelay(1);
1221 		goto retry;
1222 
1223 	default:
1224 		/* Stick ourselves on a wait queue to be woken when
1225 		   someone changes the status */
1226 		set_current_state(TASK_UNINTERRUPTIBLE);
1227 		add_wait_queue(&chip->wq, &wait);
1228 		mutex_unlock(&chip->mutex);
1229 		schedule();
1230 		remove_wait_queue(&chip->wq, &wait);
1231 		timeo = jiffies + HZ;
1232 		goto retry;
1233 	}
1234 
1235 	ENABLE_VPP(map);
1236 	map_write(map, CMD(0x60), adr);
1237 	map_write(map, CMD(0xD0), adr);
1238 	chip->state = FL_UNLOCKING;
1239 
1240 	mutex_unlock(&chip->mutex);
1241 	msleep(1000);
1242 	mutex_lock(&chip->mutex);
1243 
1244 	/* FIXME. Use a timer to check this, and return immediately. */
1245 	/* Once the state machine's known to be working I'll do that */
1246 
1247 	timeo = jiffies + (HZ*2);
1248 	for (;;) {
1249 
1250 		status = map_read(map, adr);
1251 		if (map_word_andequal(map, status, status_OK, status_OK))
1252 			break;
1253 
1254 		/* OK Still waiting */
1255 		if (time_after(jiffies, timeo)) {
1256 			map_write(map, CMD(0x70), adr);
1257 			chip->state = FL_STATUS;
1258 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1259 			DISABLE_VPP(map);
1260 			mutex_unlock(&chip->mutex);
1261 			return -EIO;
1262 		}
1263 
1264 		/* Latency issues. Drop the unlock, wait a while and retry */
1265 		mutex_unlock(&chip->mutex);
1266 		cfi_udelay(1);
1267 		mutex_lock(&chip->mutex);
1268 	}
1269 
1270 	/* Done and happy. */
1271 	chip->state = FL_STATUS;
1272 	DISABLE_VPP(map);
1273 	wake_up(&chip->wq);
1274 	mutex_unlock(&chip->mutex);
1275 	return 0;
1276 }
1277 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1278 {
1279 	struct map_info *map = mtd->priv;
1280 	struct cfi_private *cfi = map->fldrv_priv;
1281 	unsigned long adr;
1282 	int chipnum, ret;
1283 #ifdef DEBUG_LOCK_BITS
1284 	int ofs_factor = cfi->interleave * cfi->device_type;
1285 #endif
1286 
1287 	chipnum = ofs >> cfi->chipshift;
1288 	adr = ofs - (chipnum << cfi->chipshift);
1289 
1290 #ifdef DEBUG_LOCK_BITS
1291 	{
1292 		unsigned long temp_adr = adr;
1293 		unsigned long temp_len = len;
1294 
1295 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1296                 while (temp_len) {
1297 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1298 			temp_adr += mtd->erasesize;
1299 			temp_len -= mtd->erasesize;
1300 		}
1301 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1302 	}
1303 #endif
1304 
1305 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1306 
1307 #ifdef DEBUG_LOCK_BITS
1308 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1309 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1310 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1311 #endif
1312 
1313 	return ret;
1314 }
1315 
1316 static int cfi_staa_suspend(struct mtd_info *mtd)
1317 {
1318 	struct map_info *map = mtd->priv;
1319 	struct cfi_private *cfi = map->fldrv_priv;
1320 	int i;
1321 	struct flchip *chip;
1322 	int ret = 0;
1323 
1324 	for (i=0; !ret && i<cfi->numchips; i++) {
1325 		chip = &cfi->chips[i];
1326 
1327 		mutex_lock(&chip->mutex);
1328 
1329 		switch(chip->state) {
1330 		case FL_READY:
1331 		case FL_STATUS:
1332 		case FL_CFI_QUERY:
1333 		case FL_JEDEC_QUERY:
1334 			chip->oldstate = chip->state;
1335 			chip->state = FL_PM_SUSPENDED;
1336 			/* No need to wake_up() on this state change -
1337 			 * as the whole point is that nobody can do anything
1338 			 * with the chip now anyway.
1339 			 */
1340 		case FL_PM_SUSPENDED:
1341 			break;
1342 
1343 		default:
1344 			ret = -EAGAIN;
1345 			break;
1346 		}
1347 		mutex_unlock(&chip->mutex);
1348 	}
1349 
1350 	/* Unlock the chips again */
1351 
1352 	if (ret) {
1353 		for (i--; i >=0; i--) {
1354 			chip = &cfi->chips[i];
1355 
1356 			mutex_lock(&chip->mutex);
1357 
1358 			if (chip->state == FL_PM_SUSPENDED) {
1359 				/* No need to force it into a known state here,
1360 				   because we're returning failure, and it didn't
1361 				   get power cycled */
1362 				chip->state = chip->oldstate;
1363 				wake_up(&chip->wq);
1364 			}
1365 			mutex_unlock(&chip->mutex);
1366 		}
1367 	}
1368 
1369 	return ret;
1370 }
1371 
1372 static void cfi_staa_resume(struct mtd_info *mtd)
1373 {
1374 	struct map_info *map = mtd->priv;
1375 	struct cfi_private *cfi = map->fldrv_priv;
1376 	int i;
1377 	struct flchip *chip;
1378 
1379 	for (i=0; i<cfi->numchips; i++) {
1380 
1381 		chip = &cfi->chips[i];
1382 
1383 		mutex_lock(&chip->mutex);
1384 
1385 		/* Go to known state. Chip may have been power cycled */
1386 		if (chip->state == FL_PM_SUSPENDED) {
1387 			map_write(map, CMD(0xFF), 0);
1388 			chip->state = FL_READY;
1389 			wake_up(&chip->wq);
1390 		}
1391 
1392 		mutex_unlock(&chip->mutex);
1393 	}
1394 }
1395 
1396 static void cfi_staa_destroy(struct mtd_info *mtd)
1397 {
1398 	struct map_info *map = mtd->priv;
1399 	struct cfi_private *cfi = map->fldrv_priv;
1400 	kfree(cfi->cmdset_priv);
1401 	kfree(cfi);
1402 }
1403 
1404 MODULE_LICENSE("GPL");
1405