xref: /openbmc/linux/drivers/mtd/chips/cfi_util.c (revision dc6a81c3)
1 /*
2  * Common Flash Interface support:
3  *   Generic utility functions not dependent on command set
4  *
5  * Copyright (C) 2002 Red Hat
6  * Copyright (C) 2003 STMicroelectronics Limited
7  *
8  * This code is covered by the GPL.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <asm/io.h>
15 #include <asm/byteorder.h>
16 
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/mtd/xip.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/cfi.h>
25 
26 void cfi_udelay(int us)
27 {
28 	if (us >= 1000) {
29 		msleep(DIV_ROUND_UP(us, 1000));
30 	} else {
31 		udelay(us);
32 		cond_resched();
33 	}
34 }
35 EXPORT_SYMBOL(cfi_udelay);
36 
37 /*
38  * Returns the command address according to the given geometry.
39  */
40 uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
41 				struct map_info *map, struct cfi_private *cfi)
42 {
43 	unsigned bankwidth = map_bankwidth(map);
44 	unsigned interleave = cfi_interleave(cfi);
45 	unsigned type = cfi->device_type;
46 	uint32_t addr;
47 
48 	addr = (cmd_ofs * type) * interleave;
49 
50 	/* Modify the unlock address if we are in compatibility mode.
51 	 * For 16bit devices on 8 bit busses
52 	 * and 32bit devices on 16 bit busses
53 	 * set the low bit of the alternating bit sequence of the address.
54 	 */
55 	if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
56 		addr |= (type >> 1)*interleave;
57 
58 	return  addr;
59 }
60 EXPORT_SYMBOL(cfi_build_cmd_addr);
61 
62 /*
63  * Transforms the CFI command for the given geometry (bus width & interleave).
64  * It looks too long to be inline, but in the common case it should almost all
65  * get optimised away.
66  */
67 map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
68 {
69 	map_word val = { {0} };
70 	int wordwidth, words_per_bus, chip_mode, chips_per_word;
71 	unsigned long onecmd;
72 	int i;
73 
74 	/* We do it this way to give the compiler a fighting chance
75 	   of optimising away all the crap for 'bankwidth' larger than
76 	   an unsigned long, in the common case where that support is
77 	   disabled */
78 	if (map_bankwidth_is_large(map)) {
79 		wordwidth = sizeof(unsigned long);
80 		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
81 	} else {
82 		wordwidth = map_bankwidth(map);
83 		words_per_bus = 1;
84 	}
85 
86 	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
87 	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
88 
89 	/* First, determine what the bit-pattern should be for a single
90 	   device, according to chip mode and endianness... */
91 	switch (chip_mode) {
92 	default: BUG();
93 	case 1:
94 		onecmd = cmd;
95 		break;
96 	case 2:
97 		onecmd = cpu_to_cfi16(map, cmd);
98 		break;
99 	case 4:
100 		onecmd = cpu_to_cfi32(map, cmd);
101 		break;
102 	}
103 
104 	/* Now replicate it across the size of an unsigned long, or
105 	   just to the bus width as appropriate */
106 	switch (chips_per_word) {
107 	default: BUG();
108 #if BITS_PER_LONG >= 64
109 	case 8:
110 		onecmd |= (onecmd << (chip_mode * 32));
111 #endif
112 		/* fall through */
113 	case 4:
114 		onecmd |= (onecmd << (chip_mode * 16));
115 		/* fall through */
116 	case 2:
117 		onecmd |= (onecmd << (chip_mode * 8));
118 		/* fall through */
119 	case 1:
120 		;
121 	}
122 
123 	/* And finally, for the multi-word case, replicate it
124 	   in all words in the structure */
125 	for (i=0; i < words_per_bus; i++) {
126 		val.x[i] = onecmd;
127 	}
128 
129 	return val;
130 }
131 EXPORT_SYMBOL(cfi_build_cmd);
132 
133 unsigned long cfi_merge_status(map_word val, struct map_info *map,
134 					   struct cfi_private *cfi)
135 {
136 	int wordwidth, words_per_bus, chip_mode, chips_per_word;
137 	unsigned long onestat, res = 0;
138 	int i;
139 
140 	/* We do it this way to give the compiler a fighting chance
141 	   of optimising away all the crap for 'bankwidth' larger than
142 	   an unsigned long, in the common case where that support is
143 	   disabled */
144 	if (map_bankwidth_is_large(map)) {
145 		wordwidth = sizeof(unsigned long);
146 		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
147 	} else {
148 		wordwidth = map_bankwidth(map);
149 		words_per_bus = 1;
150 	}
151 
152 	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
153 	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
154 
155 	onestat = val.x[0];
156 	/* Or all status words together */
157 	for (i=1; i < words_per_bus; i++) {
158 		onestat |= val.x[i];
159 	}
160 
161 	res = onestat;
162 	switch(chips_per_word) {
163 	default: BUG();
164 #if BITS_PER_LONG >= 64
165 	case 8:
166 		res |= (onestat >> (chip_mode * 32));
167 #endif
168 		/* fall through */
169 	case 4:
170 		res |= (onestat >> (chip_mode * 16));
171 		/* fall through */
172 	case 2:
173 		res |= (onestat >> (chip_mode * 8));
174 		/* fall through */
175 	case 1:
176 		;
177 	}
178 
179 	/* Last, determine what the bit-pattern should be for a single
180 	   device, according to chip mode and endianness... */
181 	switch (chip_mode) {
182 	case 1:
183 		break;
184 	case 2:
185 		res = cfi16_to_cpu(map, res);
186 		break;
187 	case 4:
188 		res = cfi32_to_cpu(map, res);
189 		break;
190 	default: BUG();
191 	}
192 	return res;
193 }
194 EXPORT_SYMBOL(cfi_merge_status);
195 
196 /*
197  * Sends a CFI command to a bank of flash for the given geometry.
198  *
199  * Returns the offset in flash where the command was written.
200  * If prev_val is non-null, it will be set to the value at the command address,
201  * before the command was written.
202  */
203 uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
204 				struct map_info *map, struct cfi_private *cfi,
205 				int type, map_word *prev_val)
206 {
207 	map_word val;
208 	uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
209 	val = cfi_build_cmd(cmd, map, cfi);
210 
211 	if (prev_val)
212 		*prev_val = map_read(map, addr);
213 
214 	map_write(map, val, addr);
215 
216 	return addr - base;
217 }
218 EXPORT_SYMBOL(cfi_send_gen_cmd);
219 
220 int __xipram cfi_qry_present(struct map_info *map, __u32 base,
221 			     struct cfi_private *cfi)
222 {
223 	int osf = cfi->interleave * cfi->device_type;	/* scale factor */
224 	map_word val[3];
225 	map_word qry[3];
226 
227 	qry[0] = cfi_build_cmd('Q', map, cfi);
228 	qry[1] = cfi_build_cmd('R', map, cfi);
229 	qry[2] = cfi_build_cmd('Y', map, cfi);
230 
231 	val[0] = map_read(map, base + osf*0x10);
232 	val[1] = map_read(map, base + osf*0x11);
233 	val[2] = map_read(map, base + osf*0x12);
234 
235 	if (!map_word_equal(map, qry[0], val[0]))
236 		return 0;
237 
238 	if (!map_word_equal(map, qry[1], val[1]))
239 		return 0;
240 
241 	if (!map_word_equal(map, qry[2], val[2]))
242 		return 0;
243 
244 	return 1; 	/* "QRY" found */
245 }
246 EXPORT_SYMBOL_GPL(cfi_qry_present);
247 
248 int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
249 			     struct cfi_private *cfi)
250 {
251 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
252 	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
253 	if (cfi_qry_present(map, base, cfi))
254 		return 1;
255 	/* QRY not found probably we deal with some odd CFI chips */
256 	/* Some revisions of some old Intel chips? */
257 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
258 	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
259 	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
260 	if (cfi_qry_present(map, base, cfi))
261 		return 1;
262 	/* ST M29DW chips */
263 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
264 	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
265 	if (cfi_qry_present(map, base, cfi))
266 		return 1;
267 	/* some old SST chips, e.g. 39VF160x/39VF320x */
268 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
269 	cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
270 	cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
271 	cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
272 	if (cfi_qry_present(map, base, cfi))
273 		return 1;
274 	/* SST 39VF640xB */
275 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
276 	cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
277 	cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
278 	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
279 	if (cfi_qry_present(map, base, cfi))
280 		return 1;
281 	/* QRY not found */
282 	return 0;
283 }
284 EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
285 
286 void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
287 			       struct cfi_private *cfi)
288 {
289 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
290 	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
291 	/* M29W128G flashes require an additional reset command
292 	   when exit qry mode */
293 	if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
294 		cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
295 }
296 EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
297 
298 struct cfi_extquery *
299 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
300 {
301 	struct cfi_private *cfi = map->fldrv_priv;
302 	__u32 base = 0; // cfi->chips[0].start;
303 	int ofs_factor = cfi->interleave * cfi->device_type;
304 	int i;
305 	struct cfi_extquery *extp = NULL;
306 
307 	if (!adr)
308 		goto out;
309 
310 	printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
311 
312 	extp = kmalloc(size, GFP_KERNEL);
313 	if (!extp)
314 		goto out;
315 
316 #ifdef CONFIG_MTD_XIP
317 	local_irq_disable();
318 #endif
319 
320 	/* Switch it into Query Mode */
321 	cfi_qry_mode_on(base, map, cfi);
322 	/* Read in the Extended Query Table */
323 	for (i=0; i<size; i++) {
324 		((unsigned char *)extp)[i] =
325 			cfi_read_query(map, base+((adr+i)*ofs_factor));
326 	}
327 
328 	/* Make sure it returns to read mode */
329 	cfi_qry_mode_off(base, map, cfi);
330 
331 #ifdef CONFIG_MTD_XIP
332 	(void) map_read(map, base);
333 	xip_iprefetch();
334 	local_irq_enable();
335 #endif
336 
337  out:	return extp;
338 }
339 
340 EXPORT_SYMBOL(cfi_read_pri);
341 
342 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
343 {
344 	struct map_info *map = mtd->priv;
345 	struct cfi_private *cfi = map->fldrv_priv;
346 	struct cfi_fixup *f;
347 
348 	for (f=fixups; f->fixup; f++) {
349 		if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
350 		    ((f->id  == CFI_ID_ANY)  || (f->id  == cfi->id))) {
351 			f->fixup(mtd);
352 		}
353 	}
354 }
355 
356 EXPORT_SYMBOL(cfi_fixup);
357 
358 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
359 				     loff_t ofs, size_t len, void *thunk)
360 {
361 	struct map_info *map = mtd->priv;
362 	struct cfi_private *cfi = map->fldrv_priv;
363 	unsigned long adr;
364 	int chipnum, ret = 0;
365 	int i, first;
366 	struct mtd_erase_region_info *regions = mtd->eraseregions;
367 
368 	/* Check that both start and end of the requested erase are
369 	 * aligned with the erasesize at the appropriate addresses.
370 	 */
371 
372 	i = 0;
373 
374 	/* Skip all erase regions which are ended before the start of
375 	   the requested erase. Actually, to save on the calculations,
376 	   we skip to the first erase region which starts after the
377 	   start of the requested erase, and then go back one.
378 	*/
379 
380 	while (i < mtd->numeraseregions && ofs >= regions[i].offset)
381 	       i++;
382 	i--;
383 
384 	/* OK, now i is pointing at the erase region in which this
385 	   erase request starts. Check the start of the requested
386 	   erase range is aligned with the erase size which is in
387 	   effect here.
388 	*/
389 
390 	if (ofs & (regions[i].erasesize-1))
391 		return -EINVAL;
392 
393 	/* Remember the erase region we start on */
394 	first = i;
395 
396 	/* Next, check that the end of the requested erase is aligned
397 	 * with the erase region at that address.
398 	 */
399 
400 	while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
401 		i++;
402 
403 	/* As before, drop back one to point at the region in which
404 	   the address actually falls
405 	*/
406 	i--;
407 
408 	if ((ofs + len) & (regions[i].erasesize-1))
409 		return -EINVAL;
410 
411 	chipnum = ofs >> cfi->chipshift;
412 	adr = ofs - (chipnum << cfi->chipshift);
413 
414 	i=first;
415 
416 	while(len) {
417 		int size = regions[i].erasesize;
418 
419 		ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
420 
421 		if (ret)
422 			return ret;
423 
424 		adr += size;
425 		ofs += size;
426 		len -= size;
427 
428 		if (ofs == regions[i].offset + size * regions[i].numblocks)
429 			i++;
430 
431 		if (adr >> cfi->chipshift) {
432 			adr = 0;
433 			chipnum++;
434 
435 			if (chipnum >= cfi->numchips)
436 				break;
437 		}
438 	}
439 
440 	return 0;
441 }
442 
443 EXPORT_SYMBOL(cfi_varsize_frob);
444 
445 MODULE_LICENSE("GPL");
446