1 /* 2 * linux/fs/fat/cache.c 3 * 4 * Written 1992,1993 by Werner Almesberger 5 * 6 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead 7 * of inode number. 8 * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers. 9 */ 10 11 #include <linux/slab.h> 12 #include "fat.h" 13 14 /* this must be > 0. */ 15 #define FAT_MAX_CACHE 8 16 17 struct fat_cache { 18 struct list_head cache_list; 19 int nr_contig; /* number of contiguous clusters */ 20 int fcluster; /* cluster number in the file. */ 21 int dcluster; /* cluster number on disk. */ 22 }; 23 24 struct fat_cache_id { 25 unsigned int id; 26 int nr_contig; 27 int fcluster; 28 int dcluster; 29 }; 30 31 static inline int fat_max_cache(struct inode *inode) 32 { 33 return FAT_MAX_CACHE; 34 } 35 36 static struct kmem_cache *fat_cache_cachep; 37 38 static void init_once(void *foo) 39 { 40 struct fat_cache *cache = (struct fat_cache *)foo; 41 42 INIT_LIST_HEAD(&cache->cache_list); 43 } 44 45 int __init fat_cache_init(void) 46 { 47 fat_cache_cachep = kmem_cache_create("fat_cache", 48 sizeof(struct fat_cache), 49 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 50 init_once); 51 if (fat_cache_cachep == NULL) 52 return -ENOMEM; 53 return 0; 54 } 55 56 void fat_cache_destroy(void) 57 { 58 kmem_cache_destroy(fat_cache_cachep); 59 } 60 61 static inline struct fat_cache *fat_cache_alloc(struct inode *inode) 62 { 63 return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS); 64 } 65 66 static inline void fat_cache_free(struct fat_cache *cache) 67 { 68 BUG_ON(!list_empty(&cache->cache_list)); 69 kmem_cache_free(fat_cache_cachep, cache); 70 } 71 72 static inline void fat_cache_update_lru(struct inode *inode, 73 struct fat_cache *cache) 74 { 75 if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) 76 list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); 77 } 78 79 static int fat_cache_lookup(struct inode *inode, int fclus, 80 struct fat_cache_id *cid, 81 int *cached_fclus, int *cached_dclus) 82 { 83 static struct fat_cache nohit = { .fcluster = 0, }; 84 85 struct fat_cache *hit = &nohit, *p; 86 int offset = -1; 87 88 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 89 list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { 90 /* Find the cache of "fclus" or nearest cache. */ 91 if (p->fcluster <= fclus && hit->fcluster < p->fcluster) { 92 hit = p; 93 if ((hit->fcluster + hit->nr_contig) < fclus) { 94 offset = hit->nr_contig; 95 } else { 96 offset = fclus - hit->fcluster; 97 break; 98 } 99 } 100 } 101 if (hit != &nohit) { 102 fat_cache_update_lru(inode, hit); 103 104 cid->id = MSDOS_I(inode)->cache_valid_id; 105 cid->nr_contig = hit->nr_contig; 106 cid->fcluster = hit->fcluster; 107 cid->dcluster = hit->dcluster; 108 *cached_fclus = cid->fcluster + offset; 109 *cached_dclus = cid->dcluster + offset; 110 } 111 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 112 113 return offset; 114 } 115 116 static struct fat_cache *fat_cache_merge(struct inode *inode, 117 struct fat_cache_id *new) 118 { 119 struct fat_cache *p; 120 121 list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { 122 /* Find the same part as "new" in cluster-chain. */ 123 if (p->fcluster == new->fcluster) { 124 BUG_ON(p->dcluster != new->dcluster); 125 if (new->nr_contig > p->nr_contig) 126 p->nr_contig = new->nr_contig; 127 return p; 128 } 129 } 130 return NULL; 131 } 132 133 static void fat_cache_add(struct inode *inode, struct fat_cache_id *new) 134 { 135 struct fat_cache *cache, *tmp; 136 137 if (new->fcluster == -1) /* dummy cache */ 138 return; 139 140 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 141 if (new->id != FAT_CACHE_VALID && 142 new->id != MSDOS_I(inode)->cache_valid_id) 143 goto out; /* this cache was invalidated */ 144 145 cache = fat_cache_merge(inode, new); 146 if (cache == NULL) { 147 if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) { 148 MSDOS_I(inode)->nr_caches++; 149 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 150 151 tmp = fat_cache_alloc(inode); 152 if (!tmp) { 153 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 154 MSDOS_I(inode)->nr_caches--; 155 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 156 return; 157 } 158 159 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 160 cache = fat_cache_merge(inode, new); 161 if (cache != NULL) { 162 MSDOS_I(inode)->nr_caches--; 163 fat_cache_free(tmp); 164 goto out_update_lru; 165 } 166 cache = tmp; 167 } else { 168 struct list_head *p = MSDOS_I(inode)->cache_lru.prev; 169 cache = list_entry(p, struct fat_cache, cache_list); 170 } 171 cache->fcluster = new->fcluster; 172 cache->dcluster = new->dcluster; 173 cache->nr_contig = new->nr_contig; 174 } 175 out_update_lru: 176 fat_cache_update_lru(inode, cache); 177 out: 178 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 179 } 180 181 /* 182 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It 183 * fixes itself after a while. 184 */ 185 static void __fat_cache_inval_inode(struct inode *inode) 186 { 187 struct msdos_inode_info *i = MSDOS_I(inode); 188 struct fat_cache *cache; 189 190 while (!list_empty(&i->cache_lru)) { 191 cache = list_entry(i->cache_lru.next, 192 struct fat_cache, cache_list); 193 list_del_init(&cache->cache_list); 194 i->nr_caches--; 195 fat_cache_free(cache); 196 } 197 /* Update. The copy of caches before this id is discarded. */ 198 i->cache_valid_id++; 199 if (i->cache_valid_id == FAT_CACHE_VALID) 200 i->cache_valid_id++; 201 } 202 203 void fat_cache_inval_inode(struct inode *inode) 204 { 205 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 206 __fat_cache_inval_inode(inode); 207 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 208 } 209 210 static inline int cache_contiguous(struct fat_cache_id *cid, int dclus) 211 { 212 cid->nr_contig++; 213 return ((cid->dcluster + cid->nr_contig) == dclus); 214 } 215 216 static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus) 217 { 218 cid->id = FAT_CACHE_VALID; 219 cid->fcluster = fclus; 220 cid->dcluster = dclus; 221 cid->nr_contig = 0; 222 } 223 224 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) 225 { 226 struct super_block *sb = inode->i_sb; 227 const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits; 228 struct fat_entry fatent; 229 struct fat_cache_id cid; 230 int nr; 231 232 BUG_ON(MSDOS_I(inode)->i_start == 0); 233 234 *fclus = 0; 235 *dclus = MSDOS_I(inode)->i_start; 236 if (cluster == 0) 237 return 0; 238 239 if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { 240 /* 241 * dummy, always not contiguous 242 * This is reinitialized by cache_init(), later. 243 */ 244 cache_init(&cid, -1, -1); 245 } 246 247 fatent_init(&fatent); 248 while (*fclus < cluster) { 249 /* prevent the infinite loop of cluster chain */ 250 if (*fclus > limit) { 251 fat_fs_error_ratelimit(sb, 252 "%s: detected the cluster chain loop" 253 " (i_pos %lld)", __func__, 254 MSDOS_I(inode)->i_pos); 255 nr = -EIO; 256 goto out; 257 } 258 259 nr = fat_ent_read(inode, &fatent, *dclus); 260 if (nr < 0) 261 goto out; 262 else if (nr == FAT_ENT_FREE) { 263 fat_fs_error_ratelimit(sb, 264 "%s: invalid cluster chain (i_pos %lld)", 265 __func__, 266 MSDOS_I(inode)->i_pos); 267 nr = -EIO; 268 goto out; 269 } else if (nr == FAT_ENT_EOF) { 270 fat_cache_add(inode, &cid); 271 goto out; 272 } 273 (*fclus)++; 274 *dclus = nr; 275 if (!cache_contiguous(&cid, *dclus)) 276 cache_init(&cid, *fclus, *dclus); 277 } 278 nr = 0; 279 fat_cache_add(inode, &cid); 280 out: 281 fatent_brelse(&fatent); 282 return nr; 283 } 284 285 static int fat_bmap_cluster(struct inode *inode, int cluster) 286 { 287 struct super_block *sb = inode->i_sb; 288 int ret, fclus, dclus; 289 290 if (MSDOS_I(inode)->i_start == 0) 291 return 0; 292 293 ret = fat_get_cluster(inode, cluster, &fclus, &dclus); 294 if (ret < 0) 295 return ret; 296 else if (ret == FAT_ENT_EOF) { 297 fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)", 298 __func__, MSDOS_I(inode)->i_pos); 299 return -EIO; 300 } 301 return dclus; 302 } 303 304 int fat_get_mapped_cluster(struct inode *inode, sector_t sector, 305 sector_t last_block, 306 unsigned long *mapped_blocks, sector_t *bmap) 307 { 308 struct super_block *sb = inode->i_sb; 309 struct msdos_sb_info *sbi = MSDOS_SB(sb); 310 int cluster, offset; 311 312 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); 313 offset = sector & (sbi->sec_per_clus - 1); 314 cluster = fat_bmap_cluster(inode, cluster); 315 if (cluster < 0) 316 return cluster; 317 else if (cluster) { 318 *bmap = fat_clus_to_blknr(sbi, cluster) + offset; 319 *mapped_blocks = sbi->sec_per_clus - offset; 320 if (*mapped_blocks > last_block - sector) 321 *mapped_blocks = last_block - sector; 322 } 323 324 return 0; 325 } 326 327 static int is_exceed_eof(struct inode *inode, sector_t sector, 328 sector_t *last_block, int create) 329 { 330 struct super_block *sb = inode->i_sb; 331 const unsigned long blocksize = sb->s_blocksize; 332 const unsigned char blocksize_bits = sb->s_blocksize_bits; 333 334 *last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits; 335 if (sector >= *last_block) { 336 if (!create) 337 return 1; 338 339 /* 340 * ->mmu_private can access on only allocation path. 341 * (caller must hold ->i_mutex) 342 */ 343 *last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1)) 344 >> blocksize_bits; 345 if (sector >= *last_block) 346 return 1; 347 } 348 349 return 0; 350 } 351 352 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, 353 unsigned long *mapped_blocks, int create, bool from_bmap) 354 { 355 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); 356 sector_t last_block; 357 358 *phys = 0; 359 *mapped_blocks = 0; 360 if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) { 361 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { 362 *phys = sector + sbi->dir_start; 363 *mapped_blocks = 1; 364 } 365 return 0; 366 } 367 368 if (!from_bmap) { 369 if (is_exceed_eof(inode, sector, &last_block, create)) 370 return 0; 371 } else { 372 last_block = inode->i_blocks >> 373 (inode->i_sb->s_blocksize_bits - 9); 374 if (sector >= last_block) 375 return 0; 376 } 377 378 return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks, 379 phys); 380 } 381