xref: /openbmc/linux/fs/fat/cache.c (revision 15d90a6a)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/fs/fat/cache.c
4   *
5   *  Written 1992,1993 by Werner Almesberger
6   *
7   *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
8   *	of inode number.
9   *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
10   */
11  
12  #include <linux/slab.h>
13  #include "fat.h"
14  
15  /* this must be > 0. */
16  #define FAT_MAX_CACHE	8
17  
18  struct fat_cache {
19  	struct list_head cache_list;
20  	int nr_contig;	/* number of contiguous clusters */
21  	int fcluster;	/* cluster number in the file. */
22  	int dcluster;	/* cluster number on disk. */
23  };
24  
25  struct fat_cache_id {
26  	unsigned int id;
27  	int nr_contig;
28  	int fcluster;
29  	int dcluster;
30  };
31  
32  static inline int fat_max_cache(struct inode *inode)
33  {
34  	return FAT_MAX_CACHE;
35  }
36  
37  static struct kmem_cache *fat_cache_cachep;
38  
39  static void init_once(void *foo)
40  {
41  	struct fat_cache *cache = (struct fat_cache *)foo;
42  
43  	INIT_LIST_HEAD(&cache->cache_list);
44  }
45  
46  int __init fat_cache_init(void)
47  {
48  	fat_cache_cachep = kmem_cache_create("fat_cache",
49  				sizeof(struct fat_cache),
50  				0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51  				init_once);
52  	if (fat_cache_cachep == NULL)
53  		return -ENOMEM;
54  	return 0;
55  }
56  
57  void fat_cache_destroy(void)
58  {
59  	kmem_cache_destroy(fat_cache_cachep);
60  }
61  
62  static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
63  {
64  	return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
65  }
66  
67  static inline void fat_cache_free(struct fat_cache *cache)
68  {
69  	BUG_ON(!list_empty(&cache->cache_list));
70  	kmem_cache_free(fat_cache_cachep, cache);
71  }
72  
73  static inline void fat_cache_update_lru(struct inode *inode,
74  					struct fat_cache *cache)
75  {
76  	if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
77  		list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
78  }
79  
80  static int fat_cache_lookup(struct inode *inode, int fclus,
81  			    struct fat_cache_id *cid,
82  			    int *cached_fclus, int *cached_dclus)
83  {
84  	static struct fat_cache nohit = { .fcluster = 0, };
85  
86  	struct fat_cache *hit = &nohit, *p;
87  	int offset = -1;
88  
89  	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
90  	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
91  		/* Find the cache of "fclus" or nearest cache. */
92  		if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
93  			hit = p;
94  			if ((hit->fcluster + hit->nr_contig) < fclus) {
95  				offset = hit->nr_contig;
96  			} else {
97  				offset = fclus - hit->fcluster;
98  				break;
99  			}
100  		}
101  	}
102  	if (hit != &nohit) {
103  		fat_cache_update_lru(inode, hit);
104  
105  		cid->id = MSDOS_I(inode)->cache_valid_id;
106  		cid->nr_contig = hit->nr_contig;
107  		cid->fcluster = hit->fcluster;
108  		cid->dcluster = hit->dcluster;
109  		*cached_fclus = cid->fcluster + offset;
110  		*cached_dclus = cid->dcluster + offset;
111  	}
112  	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
113  
114  	return offset;
115  }
116  
117  static struct fat_cache *fat_cache_merge(struct inode *inode,
118  					 struct fat_cache_id *new)
119  {
120  	struct fat_cache *p;
121  
122  	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
123  		/* Find the same part as "new" in cluster-chain. */
124  		if (p->fcluster == new->fcluster) {
125  			BUG_ON(p->dcluster != new->dcluster);
126  			if (new->nr_contig > p->nr_contig)
127  				p->nr_contig = new->nr_contig;
128  			return p;
129  		}
130  	}
131  	return NULL;
132  }
133  
134  static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
135  {
136  	struct fat_cache *cache, *tmp;
137  
138  	if (new->fcluster == -1) /* dummy cache */
139  		return;
140  
141  	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
142  	if (new->id != FAT_CACHE_VALID &&
143  	    new->id != MSDOS_I(inode)->cache_valid_id)
144  		goto out;	/* this cache was invalidated */
145  
146  	cache = fat_cache_merge(inode, new);
147  	if (cache == NULL) {
148  		if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
149  			MSDOS_I(inode)->nr_caches++;
150  			spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
151  
152  			tmp = fat_cache_alloc(inode);
153  			if (!tmp) {
154  				spin_lock(&MSDOS_I(inode)->cache_lru_lock);
155  				MSDOS_I(inode)->nr_caches--;
156  				spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
157  				return;
158  			}
159  
160  			spin_lock(&MSDOS_I(inode)->cache_lru_lock);
161  			cache = fat_cache_merge(inode, new);
162  			if (cache != NULL) {
163  				MSDOS_I(inode)->nr_caches--;
164  				fat_cache_free(tmp);
165  				goto out_update_lru;
166  			}
167  			cache = tmp;
168  		} else {
169  			struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
170  			cache = list_entry(p, struct fat_cache, cache_list);
171  		}
172  		cache->fcluster = new->fcluster;
173  		cache->dcluster = new->dcluster;
174  		cache->nr_contig = new->nr_contig;
175  	}
176  out_update_lru:
177  	fat_cache_update_lru(inode, cache);
178  out:
179  	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
180  }
181  
182  /*
183   * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
184   * fixes itself after a while.
185   */
186  static void __fat_cache_inval_inode(struct inode *inode)
187  {
188  	struct msdos_inode_info *i = MSDOS_I(inode);
189  	struct fat_cache *cache;
190  
191  	while (!list_empty(&i->cache_lru)) {
192  		cache = list_entry(i->cache_lru.next,
193  				   struct fat_cache, cache_list);
194  		list_del_init(&cache->cache_list);
195  		i->nr_caches--;
196  		fat_cache_free(cache);
197  	}
198  	/* Update. The copy of caches before this id is discarded. */
199  	i->cache_valid_id++;
200  	if (i->cache_valid_id == FAT_CACHE_VALID)
201  		i->cache_valid_id++;
202  }
203  
204  void fat_cache_inval_inode(struct inode *inode)
205  {
206  	spin_lock(&MSDOS_I(inode)->cache_lru_lock);
207  	__fat_cache_inval_inode(inode);
208  	spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
209  }
210  
211  static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
212  {
213  	cid->nr_contig++;
214  	return ((cid->dcluster + cid->nr_contig) == dclus);
215  }
216  
217  static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
218  {
219  	cid->id = FAT_CACHE_VALID;
220  	cid->fcluster = fclus;
221  	cid->dcluster = dclus;
222  	cid->nr_contig = 0;
223  }
224  
225  int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
226  {
227  	struct super_block *sb = inode->i_sb;
228  	struct msdos_sb_info *sbi = MSDOS_SB(sb);
229  	const int limit = sb->s_maxbytes >> sbi->cluster_bits;
230  	struct fat_entry fatent;
231  	struct fat_cache_id cid;
232  	int nr;
233  
234  	BUG_ON(MSDOS_I(inode)->i_start == 0);
235  
236  	*fclus = 0;
237  	*dclus = MSDOS_I(inode)->i_start;
238  	if (!fat_valid_entry(sbi, *dclus)) {
239  		fat_fs_error_ratelimit(sb,
240  			"%s: invalid start cluster (i_pos %lld, start %08x)",
241  			__func__, MSDOS_I(inode)->i_pos, *dclus);
242  		return -EIO;
243  	}
244  	if (cluster == 0)
245  		return 0;
246  
247  	if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
248  		/*
249  		 * dummy, always not contiguous
250  		 * This is reinitialized by cache_init(), later.
251  		 */
252  		cache_init(&cid, -1, -1);
253  	}
254  
255  	fatent_init(&fatent);
256  	while (*fclus < cluster) {
257  		/* prevent the infinite loop of cluster chain */
258  		if (*fclus > limit) {
259  			fat_fs_error_ratelimit(sb,
260  				"%s: detected the cluster chain loop (i_pos %lld)",
261  				__func__, MSDOS_I(inode)->i_pos);
262  			nr = -EIO;
263  			goto out;
264  		}
265  
266  		nr = fat_ent_read(inode, &fatent, *dclus);
267  		if (nr < 0)
268  			goto out;
269  		else if (nr == FAT_ENT_FREE) {
270  			fat_fs_error_ratelimit(sb,
271  				"%s: invalid cluster chain (i_pos %lld)",
272  				__func__, MSDOS_I(inode)->i_pos);
273  			nr = -EIO;
274  			goto out;
275  		} else if (nr == FAT_ENT_EOF) {
276  			fat_cache_add(inode, &cid);
277  			goto out;
278  		}
279  		(*fclus)++;
280  		*dclus = nr;
281  		if (!cache_contiguous(&cid, *dclus))
282  			cache_init(&cid, *fclus, *dclus);
283  	}
284  	nr = 0;
285  	fat_cache_add(inode, &cid);
286  out:
287  	fatent_brelse(&fatent);
288  	return nr;
289  }
290  
291  static int fat_bmap_cluster(struct inode *inode, int cluster)
292  {
293  	struct super_block *sb = inode->i_sb;
294  	int ret, fclus, dclus;
295  
296  	if (MSDOS_I(inode)->i_start == 0)
297  		return 0;
298  
299  	ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
300  	if (ret < 0)
301  		return ret;
302  	else if (ret == FAT_ENT_EOF) {
303  		fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
304  			     __func__, MSDOS_I(inode)->i_pos);
305  		return -EIO;
306  	}
307  	return dclus;
308  }
309  
310  int fat_get_mapped_cluster(struct inode *inode, sector_t sector,
311  			   sector_t last_block,
312  			   unsigned long *mapped_blocks, sector_t *bmap)
313  {
314  	struct super_block *sb = inode->i_sb;
315  	struct msdos_sb_info *sbi = MSDOS_SB(sb);
316  	int cluster, offset;
317  
318  	cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
319  	offset  = sector & (sbi->sec_per_clus - 1);
320  	cluster = fat_bmap_cluster(inode, cluster);
321  	if (cluster < 0)
322  		return cluster;
323  	else if (cluster) {
324  		*bmap = fat_clus_to_blknr(sbi, cluster) + offset;
325  		*mapped_blocks = sbi->sec_per_clus - offset;
326  		if (*mapped_blocks > last_block - sector)
327  			*mapped_blocks = last_block - sector;
328  	}
329  
330  	return 0;
331  }
332  
333  static int is_exceed_eof(struct inode *inode, sector_t sector,
334  			 sector_t *last_block, int create)
335  {
336  	struct super_block *sb = inode->i_sb;
337  	const unsigned long blocksize = sb->s_blocksize;
338  	const unsigned char blocksize_bits = sb->s_blocksize_bits;
339  
340  	*last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
341  	if (sector >= *last_block) {
342  		if (!create)
343  			return 1;
344  
345  		/*
346  		 * ->mmu_private can access on only allocation path.
347  		 * (caller must hold ->i_mutex)
348  		 */
349  		*last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
350  			>> blocksize_bits;
351  		if (sector >= *last_block)
352  			return 1;
353  	}
354  
355  	return 0;
356  }
357  
358  int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
359  	     unsigned long *mapped_blocks, int create, bool from_bmap)
360  {
361  	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
362  	sector_t last_block;
363  
364  	*phys = 0;
365  	*mapped_blocks = 0;
366  	if (!is_fat32(sbi) && (inode->i_ino == MSDOS_ROOT_INO)) {
367  		if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
368  			*phys = sector + sbi->dir_start;
369  			*mapped_blocks = 1;
370  		}
371  		return 0;
372  	}
373  
374  	if (!from_bmap) {
375  		if (is_exceed_eof(inode, sector, &last_block, create))
376  			return 0;
377  	} else {
378  		last_block = inode->i_blocks >>
379  				(inode->i_sb->s_blocksize_bits - 9);
380  		if (sector >= last_block)
381  			return 0;
382  	}
383  
384  	return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks,
385  				      phys);
386  }
387