xref: /openbmc/linux/drivers/mtd/chips/cfi_util.c (revision a09d2831)
1 /*
2  * Common Flash Interface support:
3  *   Generic utility functions not dependant on command set
4  *
5  * Copyright (C) 2002 Red Hat
6  * Copyright (C) 2003 STMicroelectronics Limited
7  *
8  * This code is covered by the GPL.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <asm/io.h>
15 #include <asm/byteorder.h>
16 
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/mtd/xip.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/cfi.h>
25 #include <linux/mtd/compatmac.h>
26 
27 int __xipram cfi_qry_present(struct map_info *map, __u32 base,
28 			     struct cfi_private *cfi)
29 {
30 	int osf = cfi->interleave * cfi->device_type;	/* scale factor */
31 	map_word val[3];
32 	map_word qry[3];
33 
34 	qry[0] = cfi_build_cmd('Q', map, cfi);
35 	qry[1] = cfi_build_cmd('R', map, cfi);
36 	qry[2] = cfi_build_cmd('Y', map, cfi);
37 
38 	val[0] = map_read(map, base + osf*0x10);
39 	val[1] = map_read(map, base + osf*0x11);
40 	val[2] = map_read(map, base + osf*0x12);
41 
42 	if (!map_word_equal(map, qry[0], val[0]))
43 		return 0;
44 
45 	if (!map_word_equal(map, qry[1], val[1]))
46 		return 0;
47 
48 	if (!map_word_equal(map, qry[2], val[2]))
49 		return 0;
50 
51 	return 1; 	/* "QRY" found */
52 }
53 EXPORT_SYMBOL_GPL(cfi_qry_present);
54 
55 int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
56 			     struct cfi_private *cfi)
57 {
58 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
59 	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
60 	if (cfi_qry_present(map, base, cfi))
61 		return 1;
62 	/* QRY not found probably we deal with some odd CFI chips */
63 	/* Some revisions of some old Intel chips? */
64 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
65 	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
66 	cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
67 	if (cfi_qry_present(map, base, cfi))
68 		return 1;
69 	/* ST M29DW chips */
70 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
71 	cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
72 	if (cfi_qry_present(map, base, cfi))
73 		return 1;
74 	/* some old SST chips, e.g. 39VF160x/39VF320x */
75 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
76 	cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
77 	cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
78 	cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
79 	if (cfi_qry_present(map, base, cfi))
80 		return 1;
81 	/* QRY not found */
82 	return 0;
83 }
84 EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
85 
86 void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
87 			       struct cfi_private *cfi)
88 {
89 	cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
90 	cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
91 	/* M29W128G flashes require an additional reset command
92 	   when exit qry mode */
93 	if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
94 		cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
95 }
96 EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
97 
98 struct cfi_extquery *
99 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
100 {
101 	struct cfi_private *cfi = map->fldrv_priv;
102 	__u32 base = 0; // cfi->chips[0].start;
103 	int ofs_factor = cfi->interleave * cfi->device_type;
104 	int i;
105 	struct cfi_extquery *extp = NULL;
106 
107 	printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
108 	if (!adr)
109 		goto out;
110 
111 	extp = kmalloc(size, GFP_KERNEL);
112 	if (!extp) {
113 		printk(KERN_ERR "Failed to allocate memory\n");
114 		goto out;
115 	}
116 
117 #ifdef CONFIG_MTD_XIP
118 	local_irq_disable();
119 #endif
120 
121 	/* Switch it into Query Mode */
122 	cfi_qry_mode_on(base, map, cfi);
123 	/* Read in the Extended Query Table */
124 	for (i=0; i<size; i++) {
125 		((unsigned char *)extp)[i] =
126 			cfi_read_query(map, base+((adr+i)*ofs_factor));
127 	}
128 
129 	/* Make sure it returns to read mode */
130 	cfi_qry_mode_off(base, map, cfi);
131 
132 #ifdef CONFIG_MTD_XIP
133 	(void) map_read(map, base);
134 	xip_iprefetch();
135 	local_irq_enable();
136 #endif
137 
138  out:	return extp;
139 }
140 
141 EXPORT_SYMBOL(cfi_read_pri);
142 
143 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
144 {
145 	struct map_info *map = mtd->priv;
146 	struct cfi_private *cfi = map->fldrv_priv;
147 	struct cfi_fixup *f;
148 
149 	for (f=fixups; f->fixup; f++) {
150 		if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
151 		    ((f->id  == CFI_ID_ANY)  || (f->id  == cfi->id))) {
152 			f->fixup(mtd, f->param);
153 		}
154 	}
155 }
156 
157 EXPORT_SYMBOL(cfi_fixup);
158 
159 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
160 				     loff_t ofs, size_t len, void *thunk)
161 {
162 	struct map_info *map = mtd->priv;
163 	struct cfi_private *cfi = map->fldrv_priv;
164 	unsigned long adr;
165 	int chipnum, ret = 0;
166 	int i, first;
167 	struct mtd_erase_region_info *regions = mtd->eraseregions;
168 
169 	if (ofs > mtd->size)
170 		return -EINVAL;
171 
172 	if ((len + ofs) > mtd->size)
173 		return -EINVAL;
174 
175 	/* Check that both start and end of the requested erase are
176 	 * aligned with the erasesize at the appropriate addresses.
177 	 */
178 
179 	i = 0;
180 
181 	/* Skip all erase regions which are ended before the start of
182 	   the requested erase. Actually, to save on the calculations,
183 	   we skip to the first erase region which starts after the
184 	   start of the requested erase, and then go back one.
185 	*/
186 
187 	while (i < mtd->numeraseregions && ofs >= regions[i].offset)
188 	       i++;
189 	i--;
190 
191 	/* OK, now i is pointing at the erase region in which this
192 	   erase request starts. Check the start of the requested
193 	   erase range is aligned with the erase size which is in
194 	   effect here.
195 	*/
196 
197 	if (ofs & (regions[i].erasesize-1))
198 		return -EINVAL;
199 
200 	/* Remember the erase region we start on */
201 	first = i;
202 
203 	/* Next, check that the end of the requested erase is aligned
204 	 * with the erase region at that address.
205 	 */
206 
207 	while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
208 		i++;
209 
210 	/* As before, drop back one to point at the region in which
211 	   the address actually falls
212 	*/
213 	i--;
214 
215 	if ((ofs + len) & (regions[i].erasesize-1))
216 		return -EINVAL;
217 
218 	chipnum = ofs >> cfi->chipshift;
219 	adr = ofs - (chipnum << cfi->chipshift);
220 
221 	i=first;
222 
223 	while(len) {
224 		int size = regions[i].erasesize;
225 
226 		ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
227 
228 		if (ret)
229 			return ret;
230 
231 		adr += size;
232 		ofs += size;
233 		len -= size;
234 
235 		if (ofs == regions[i].offset + size * regions[i].numblocks)
236 			i++;
237 
238 		if (adr >> cfi->chipshift) {
239 			adr = 0;
240 			chipnum++;
241 
242 			if (chipnum >= cfi->numchips)
243 			break;
244 		}
245 	}
246 
247 	return 0;
248 }
249 
250 EXPORT_SYMBOL(cfi_varsize_frob);
251 
252 MODULE_LICENSE("GPL");
253