xref: /openbmc/linux/fs/fat/cache.c (revision 31b90347)
1 /*
2  *  linux/fs/fat/cache.c
3  *
4  *  Written 1992,1993 by Werner Almesberger
5  *
6  *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
7  *	of inode number.
8  *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/slab.h>
13 #include <linux/buffer_head.h>
14 #include "fat.h"
15 
16 /* this must be > 0. */
17 #define FAT_MAX_CACHE	8
18 
19 struct fat_cache {
20 	struct list_head cache_list;
21 	int nr_contig;	/* number of contiguous clusters */
22 	int fcluster;	/* cluster number in the file. */
23 	int dcluster;	/* cluster number on disk. */
24 };
25 
26 struct fat_cache_id {
27 	unsigned int id;
28 	int nr_contig;
29 	int fcluster;
30 	int dcluster;
31 };
32 
33 static inline int fat_max_cache(struct inode *inode)
34 {
35 	return FAT_MAX_CACHE;
36 }
37 
38 static struct kmem_cache *fat_cache_cachep;
39 
40 static void init_once(void *foo)
41 {
42 	struct fat_cache *cache = (struct fat_cache *)foo;
43 
44 	INIT_LIST_HEAD(&cache->cache_list);
45 }
46 
47 int __init fat_cache_init(void)
48 {
49 	fat_cache_cachep = kmem_cache_create("fat_cache",
50 				sizeof(struct fat_cache),
51 				0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
52 				init_once);
53 	if (fat_cache_cachep == NULL)
54 		return -ENOMEM;
55 	return 0;
56 }
57 
58 void fat_cache_destroy(void)
59 {
60 	kmem_cache_destroy(fat_cache_cachep);
61 }
62 
63 static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
64 {
65 	return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
66 }
67 
68 static inline void fat_cache_free(struct fat_cache *cache)
69 {
70 	BUG_ON(!list_empty(&cache->cache_list));
71 	kmem_cache_free(fat_cache_cachep, cache);
72 }
73 
74 static inline void fat_cache_update_lru(struct inode *inode,
75 					struct fat_cache *cache)
76 {
77 	if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
78 		list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
79 }
80 
81 static int fat_cache_lookup(struct inode *inode, int fclus,
82 			    struct fat_cache_id *cid,
83 			    int *cached_fclus, int *cached_dclus)
84 {
85 	static struct fat_cache nohit = { .fcluster = 0, };
86 
87 	struct fat_cache *hit = &nohit, *p;
88 	int offset = -1;
89 
90 	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
91 	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
92 		/* Find the cache of "fclus" or nearest cache. */
93 		if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
94 			hit = p;
95 			if ((hit->fcluster + hit->nr_contig) < fclus) {
96 				offset = hit->nr_contig;
97 			} else {
98 				offset = fclus - hit->fcluster;
99 				break;
100 			}
101 		}
102 	}
103 	if (hit != &nohit) {
104 		fat_cache_update_lru(inode, hit);
105 
106 		cid->id = MSDOS_I(inode)->cache_valid_id;
107 		cid->nr_contig = hit->nr_contig;
108 		cid->fcluster = hit->fcluster;
109 		cid->dcluster = hit->dcluster;
110 		*cached_fclus = cid->fcluster + offset;
111 		*cached_dclus = cid->dcluster + offset;
112 	}
113 	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
114 
115 	return offset;
116 }
117 
118 static struct fat_cache *fat_cache_merge(struct inode *inode,
119 					 struct fat_cache_id *new)
120 {
121 	struct fat_cache *p;
122 
123 	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
124 		/* Find the same part as "new" in cluster-chain. */
125 		if (p->fcluster == new->fcluster) {
126 			BUG_ON(p->dcluster != new->dcluster);
127 			if (new->nr_contig > p->nr_contig)
128 				p->nr_contig = new->nr_contig;
129 			return p;
130 		}
131 	}
132 	return NULL;
133 }
134 
135 static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
136 {
137 	struct fat_cache *cache, *tmp;
138 
139 	if (new->fcluster == -1) /* dummy cache */
140 		return;
141 
142 	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
143 	if (new->id != FAT_CACHE_VALID &&
144 	    new->id != MSDOS_I(inode)->cache_valid_id)
145 		goto out;	/* this cache was invalidated */
146 
147 	cache = fat_cache_merge(inode, new);
148 	if (cache == NULL) {
149 		if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
150 			MSDOS_I(inode)->nr_caches++;
151 			spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
152 
153 			tmp = fat_cache_alloc(inode);
154 			if (!tmp) {
155 				spin_lock(&MSDOS_I(inode)->cache_lru_lock);
156 				MSDOS_I(inode)->nr_caches--;
157 				spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
158 				return;
159 			}
160 
161 			spin_lock(&MSDOS_I(inode)->cache_lru_lock);
162 			cache = fat_cache_merge(inode, new);
163 			if (cache != NULL) {
164 				MSDOS_I(inode)->nr_caches--;
165 				fat_cache_free(tmp);
166 				goto out_update_lru;
167 			}
168 			cache = tmp;
169 		} else {
170 			struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
171 			cache = list_entry(p, struct fat_cache, cache_list);
172 		}
173 		cache->fcluster = new->fcluster;
174 		cache->dcluster = new->dcluster;
175 		cache->nr_contig = new->nr_contig;
176 	}
177 out_update_lru:
178 	fat_cache_update_lru(inode, cache);
179 out:
180 	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
181 }
182 
183 /*
184  * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
185  * fixes itself after a while.
186  */
187 static void __fat_cache_inval_inode(struct inode *inode)
188 {
189 	struct msdos_inode_info *i = MSDOS_I(inode);
190 	struct fat_cache *cache;
191 
192 	while (!list_empty(&i->cache_lru)) {
193 		cache = list_entry(i->cache_lru.next,
194 				   struct fat_cache, cache_list);
195 		list_del_init(&cache->cache_list);
196 		i->nr_caches--;
197 		fat_cache_free(cache);
198 	}
199 	/* Update. The copy of caches before this id is discarded. */
200 	i->cache_valid_id++;
201 	if (i->cache_valid_id == FAT_CACHE_VALID)
202 		i->cache_valid_id++;
203 }
204 
205 void fat_cache_inval_inode(struct inode *inode)
206 {
207 	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
208 	__fat_cache_inval_inode(inode);
209 	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
210 }
211 
212 static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
213 {
214 	cid->nr_contig++;
215 	return ((cid->dcluster + cid->nr_contig) == dclus);
216 }
217 
218 static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
219 {
220 	cid->id = FAT_CACHE_VALID;
221 	cid->fcluster = fclus;
222 	cid->dcluster = dclus;
223 	cid->nr_contig = 0;
224 }
225 
226 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
227 {
228 	struct super_block *sb = inode->i_sb;
229 	const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
230 	struct fat_entry fatent;
231 	struct fat_cache_id cid;
232 	int nr;
233 
234 	BUG_ON(MSDOS_I(inode)->i_start == 0);
235 
236 	*fclus = 0;
237 	*dclus = MSDOS_I(inode)->i_start;
238 	if (cluster == 0)
239 		return 0;
240 
241 	if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
242 		/*
243 		 * dummy, always not contiguous
244 		 * This is reinitialized by cache_init(), later.
245 		 */
246 		cache_init(&cid, -1, -1);
247 	}
248 
249 	fatent_init(&fatent);
250 	while (*fclus < cluster) {
251 		/* prevent the infinite loop of cluster chain */
252 		if (*fclus > limit) {
253 			fat_fs_error_ratelimit(sb,
254 					"%s: detected the cluster chain loop"
255 					" (i_pos %lld)", __func__,
256 					MSDOS_I(inode)->i_pos);
257 			nr = -EIO;
258 			goto out;
259 		}
260 
261 		nr = fat_ent_read(inode, &fatent, *dclus);
262 		if (nr < 0)
263 			goto out;
264 		else if (nr == FAT_ENT_FREE) {
265 			fat_fs_error_ratelimit(sb,
266 				       "%s: invalid cluster chain (i_pos %lld)",
267 				       __func__,
268 				       MSDOS_I(inode)->i_pos);
269 			nr = -EIO;
270 			goto out;
271 		} else if (nr == FAT_ENT_EOF) {
272 			fat_cache_add(inode, &cid);
273 			goto out;
274 		}
275 		(*fclus)++;
276 		*dclus = nr;
277 		if (!cache_contiguous(&cid, *dclus))
278 			cache_init(&cid, *fclus, *dclus);
279 	}
280 	nr = 0;
281 	fat_cache_add(inode, &cid);
282 out:
283 	fatent_brelse(&fatent);
284 	return nr;
285 }
286 
287 static int fat_bmap_cluster(struct inode *inode, int cluster)
288 {
289 	struct super_block *sb = inode->i_sb;
290 	int ret, fclus, dclus;
291 
292 	if (MSDOS_I(inode)->i_start == 0)
293 		return 0;
294 
295 	ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
296 	if (ret < 0)
297 		return ret;
298 	else if (ret == FAT_ENT_EOF) {
299 		fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
300 			     __func__, MSDOS_I(inode)->i_pos);
301 		return -EIO;
302 	}
303 	return dclus;
304 }
305 
306 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
307 	     unsigned long *mapped_blocks, int create)
308 {
309 	struct super_block *sb = inode->i_sb;
310 	struct msdos_sb_info *sbi = MSDOS_SB(sb);
311 	const unsigned long blocksize = sb->s_blocksize;
312 	const unsigned char blocksize_bits = sb->s_blocksize_bits;
313 	sector_t last_block;
314 	int cluster, offset;
315 
316 	*phys = 0;
317 	*mapped_blocks = 0;
318 	if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
319 		if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
320 			*phys = sector + sbi->dir_start;
321 			*mapped_blocks = 1;
322 		}
323 		return 0;
324 	}
325 
326 	last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
327 	if (sector >= last_block) {
328 		if (!create)
329 			return 0;
330 
331 		/*
332 		 * ->mmu_private can access on only allocation path.
333 		 * (caller must hold ->i_mutex)
334 		 */
335 		last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
336 			>> blocksize_bits;
337 		if (sector >= last_block)
338 			return 0;
339 	}
340 
341 	cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
342 	offset  = sector & (sbi->sec_per_clus - 1);
343 	cluster = fat_bmap_cluster(inode, cluster);
344 	if (cluster < 0)
345 		return cluster;
346 	else if (cluster) {
347 		*phys = fat_clus_to_blknr(sbi, cluster) + offset;
348 		*mapped_blocks = sbi->sec_per_clus - offset;
349 		if (*mapped_blocks > last_block - sector)
350 			*mapped_blocks = last_block - sector;
351 	}
352 	return 0;
353 }
354