1 /* 2 * linux/fs/fat/cache.c 3 * 4 * Written 1992,1993 by Werner Almesberger 5 * 6 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead 7 * of inode number. 8 * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/msdos_fs.h> 13 #include <linux/buffer_head.h> 14 15 /* this must be > 0. */ 16 #define FAT_MAX_CACHE 8 17 18 struct fat_cache { 19 struct list_head cache_list; 20 int nr_contig; /* number of contiguous clusters */ 21 int fcluster; /* cluster number in the file. */ 22 int dcluster; /* cluster number on disk. */ 23 }; 24 25 struct fat_cache_id { 26 unsigned int id; 27 int nr_contig; 28 int fcluster; 29 int dcluster; 30 }; 31 32 static inline int fat_max_cache(struct inode *inode) 33 { 34 return FAT_MAX_CACHE; 35 } 36 37 static kmem_cache_t *fat_cache_cachep; 38 39 static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 40 { 41 struct fat_cache *cache = (struct fat_cache *)foo; 42 43 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 44 SLAB_CTOR_CONSTRUCTOR) 45 INIT_LIST_HEAD(&cache->cache_list); 46 } 47 48 int __init fat_cache_init(void) 49 { 50 fat_cache_cachep = kmem_cache_create("fat_cache", 51 sizeof(struct fat_cache), 52 0, SLAB_RECLAIM_ACCOUNT, 53 init_once, NULL); 54 if (fat_cache_cachep == NULL) 55 return -ENOMEM; 56 return 0; 57 } 58 59 void fat_cache_destroy(void) 60 { 61 if (kmem_cache_destroy(fat_cache_cachep)) 62 printk(KERN_INFO "fat_cache: not all structures were freed\n"); 63 } 64 65 static inline struct fat_cache *fat_cache_alloc(struct inode *inode) 66 { 67 return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL); 68 } 69 70 static inline void fat_cache_free(struct fat_cache *cache) 71 { 72 BUG_ON(!list_empty(&cache->cache_list)); 73 kmem_cache_free(fat_cache_cachep, cache); 74 } 75 76 static inline void fat_cache_update_lru(struct inode *inode, 77 struct fat_cache *cache) 78 { 79 if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) 80 list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); 81 } 82 83 static int fat_cache_lookup(struct inode *inode, int fclus, 84 struct fat_cache_id *cid, 85 int *cached_fclus, int *cached_dclus) 86 { 87 static struct fat_cache nohit = { .fcluster = 0, }; 88 89 struct fat_cache *hit = &nohit, *p; 90 int offset = -1; 91 92 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 93 list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { 94 /* Find the cache of "fclus" or nearest cache. */ 95 if (p->fcluster <= fclus && hit->fcluster < p->fcluster) { 96 hit = p; 97 if ((hit->fcluster + hit->nr_contig) < fclus) { 98 offset = hit->nr_contig; 99 } else { 100 offset = fclus - hit->fcluster; 101 break; 102 } 103 } 104 } 105 if (hit != &nohit) { 106 fat_cache_update_lru(inode, hit); 107 108 cid->id = MSDOS_I(inode)->cache_valid_id; 109 cid->nr_contig = hit->nr_contig; 110 cid->fcluster = hit->fcluster; 111 cid->dcluster = hit->dcluster; 112 *cached_fclus = cid->fcluster + offset; 113 *cached_dclus = cid->dcluster + offset; 114 } 115 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 116 117 return offset; 118 } 119 120 static struct fat_cache *fat_cache_merge(struct inode *inode, 121 struct fat_cache_id *new) 122 { 123 struct fat_cache *p; 124 125 list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { 126 /* Find the same part as "new" in cluster-chain. */ 127 if (p->fcluster == new->fcluster) { 128 BUG_ON(p->dcluster != new->dcluster); 129 if (new->nr_contig > p->nr_contig) 130 p->nr_contig = new->nr_contig; 131 return p; 132 } 133 } 134 return NULL; 135 } 136 137 static void fat_cache_add(struct inode *inode, struct fat_cache_id *new) 138 { 139 struct fat_cache *cache, *tmp; 140 141 if (new->fcluster == -1) /* dummy cache */ 142 return; 143 144 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 145 if (new->id != FAT_CACHE_VALID && 146 new->id != MSDOS_I(inode)->cache_valid_id) 147 goto out; /* this cache was invalidated */ 148 149 cache = fat_cache_merge(inode, new); 150 if (cache == NULL) { 151 if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) { 152 MSDOS_I(inode)->nr_caches++; 153 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 154 155 tmp = fat_cache_alloc(inode); 156 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 157 cache = fat_cache_merge(inode, new); 158 if (cache != NULL) { 159 MSDOS_I(inode)->nr_caches--; 160 fat_cache_free(tmp); 161 goto out_update_lru; 162 } 163 cache = tmp; 164 } else { 165 struct list_head *p = MSDOS_I(inode)->cache_lru.prev; 166 cache = list_entry(p, struct fat_cache, cache_list); 167 } 168 cache->fcluster = new->fcluster; 169 cache->dcluster = new->dcluster; 170 cache->nr_contig = new->nr_contig; 171 } 172 out_update_lru: 173 fat_cache_update_lru(inode, cache); 174 out: 175 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 176 } 177 178 /* 179 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It 180 * fixes itself after a while. 181 */ 182 static void __fat_cache_inval_inode(struct inode *inode) 183 { 184 struct msdos_inode_info *i = MSDOS_I(inode); 185 struct fat_cache *cache; 186 187 while (!list_empty(&i->cache_lru)) { 188 cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list); 189 list_del_init(&cache->cache_list); 190 i->nr_caches--; 191 fat_cache_free(cache); 192 } 193 /* Update. The copy of caches before this id is discarded. */ 194 i->cache_valid_id++; 195 if (i->cache_valid_id == FAT_CACHE_VALID) 196 i->cache_valid_id++; 197 } 198 199 void fat_cache_inval_inode(struct inode *inode) 200 { 201 spin_lock(&MSDOS_I(inode)->cache_lru_lock); 202 __fat_cache_inval_inode(inode); 203 spin_unlock(&MSDOS_I(inode)->cache_lru_lock); 204 } 205 206 static inline int cache_contiguous(struct fat_cache_id *cid, int dclus) 207 { 208 cid->nr_contig++; 209 return ((cid->dcluster + cid->nr_contig) == dclus); 210 } 211 212 static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus) 213 { 214 cid->id = FAT_CACHE_VALID; 215 cid->fcluster = fclus; 216 cid->dcluster = dclus; 217 cid->nr_contig = 0; 218 } 219 220 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) 221 { 222 struct super_block *sb = inode->i_sb; 223 const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits; 224 struct fat_entry fatent; 225 struct fat_cache_id cid; 226 int nr; 227 228 BUG_ON(MSDOS_I(inode)->i_start == 0); 229 230 *fclus = 0; 231 *dclus = MSDOS_I(inode)->i_start; 232 if (cluster == 0) 233 return 0; 234 235 if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { 236 /* 237 * dummy, always not contiguous 238 * This is reinitialized by cache_init(), later. 239 */ 240 cache_init(&cid, -1, -1); 241 } 242 243 fatent_init(&fatent); 244 while (*fclus < cluster) { 245 /* prevent the infinite loop of cluster chain */ 246 if (*fclus > limit) { 247 fat_fs_panic(sb, "%s: detected the cluster chain loop" 248 " (i_pos %lld)", __FUNCTION__, 249 MSDOS_I(inode)->i_pos); 250 nr = -EIO; 251 goto out; 252 } 253 254 nr = fat_ent_read(inode, &fatent, *dclus); 255 if (nr < 0) 256 goto out; 257 else if (nr == FAT_ENT_FREE) { 258 fat_fs_panic(sb, "%s: invalid cluster chain" 259 " (i_pos %lld)", __FUNCTION__, 260 MSDOS_I(inode)->i_pos); 261 nr = -EIO; 262 goto out; 263 } else if (nr == FAT_ENT_EOF) { 264 fat_cache_add(inode, &cid); 265 goto out; 266 } 267 (*fclus)++; 268 *dclus = nr; 269 if (!cache_contiguous(&cid, *dclus)) 270 cache_init(&cid, *fclus, *dclus); 271 } 272 nr = 0; 273 fat_cache_add(inode, &cid); 274 out: 275 fatent_brelse(&fatent); 276 return nr; 277 } 278 279 static int fat_bmap_cluster(struct inode *inode, int cluster) 280 { 281 struct super_block *sb = inode->i_sb; 282 int ret, fclus, dclus; 283 284 if (MSDOS_I(inode)->i_start == 0) 285 return 0; 286 287 ret = fat_get_cluster(inode, cluster, &fclus, &dclus); 288 if (ret < 0) 289 return ret; 290 else if (ret == FAT_ENT_EOF) { 291 fat_fs_panic(sb, "%s: request beyond EOF (i_pos %lld)", 292 __FUNCTION__, MSDOS_I(inode)->i_pos); 293 return -EIO; 294 } 295 return dclus; 296 } 297 298 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys) 299 { 300 struct super_block *sb = inode->i_sb; 301 struct msdos_sb_info *sbi = MSDOS_SB(sb); 302 sector_t last_block; 303 int cluster, offset; 304 305 *phys = 0; 306 if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) { 307 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) 308 *phys = sector + sbi->dir_start; 309 return 0; 310 } 311 last_block = (MSDOS_I(inode)->mmu_private + (sb->s_blocksize - 1)) 312 >> sb->s_blocksize_bits; 313 if (sector >= last_block) 314 return 0; 315 316 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); 317 offset = sector & (sbi->sec_per_clus - 1); 318 cluster = fat_bmap_cluster(inode, cluster); 319 if (cluster < 0) 320 return cluster; 321 else if (cluster) 322 *phys = fat_clus_to_blknr(sbi, cluster) + offset; 323 return 0; 324 } 325