1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2004, OGAWA Hirofumi
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/sched/signal.h>
8 #include <linux/backing-dev-defs.h>
9 #include "fat.h"
10
11 struct fatent_operations {
12 void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
13 void (*ent_set_ptr)(struct fat_entry *, int);
14 int (*ent_bread)(struct super_block *, struct fat_entry *,
15 int, sector_t);
16 int (*ent_get)(struct fat_entry *);
17 void (*ent_put)(struct fat_entry *, int);
18 int (*ent_next)(struct fat_entry *);
19 };
20
21 static DEFINE_SPINLOCK(fat12_entry_lock);
22
fat12_ent_blocknr(struct super_block * sb,int entry,int * offset,sector_t * blocknr)23 static void fat12_ent_blocknr(struct super_block *sb, int entry,
24 int *offset, sector_t *blocknr)
25 {
26 struct msdos_sb_info *sbi = MSDOS_SB(sb);
27 int bytes = entry + (entry >> 1);
28 WARN_ON(!fat_valid_entry(sbi, entry));
29 *offset = bytes & (sb->s_blocksize - 1);
30 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
31 }
32
fat_ent_blocknr(struct super_block * sb,int entry,int * offset,sector_t * blocknr)33 static void fat_ent_blocknr(struct super_block *sb, int entry,
34 int *offset, sector_t *blocknr)
35 {
36 struct msdos_sb_info *sbi = MSDOS_SB(sb);
37 int bytes = (entry << sbi->fatent_shift);
38 WARN_ON(!fat_valid_entry(sbi, entry));
39 *offset = bytes & (sb->s_blocksize - 1);
40 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
41 }
42
fat12_ent_set_ptr(struct fat_entry * fatent,int offset)43 static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
44 {
45 struct buffer_head **bhs = fatent->bhs;
46 if (fatent->nr_bhs == 1) {
47 WARN_ON(offset >= (bhs[0]->b_size - 1));
48 fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
49 fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
50 } else {
51 WARN_ON(offset != (bhs[0]->b_size - 1));
52 fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
53 fatent->u.ent12_p[1] = bhs[1]->b_data;
54 }
55 }
56
fat16_ent_set_ptr(struct fat_entry * fatent,int offset)57 static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
58 {
59 WARN_ON(offset & (2 - 1));
60 fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
61 }
62
fat32_ent_set_ptr(struct fat_entry * fatent,int offset)63 static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
64 {
65 WARN_ON(offset & (4 - 1));
66 fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
67 }
68
fat12_ent_bread(struct super_block * sb,struct fat_entry * fatent,int offset,sector_t blocknr)69 static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
70 int offset, sector_t blocknr)
71 {
72 struct buffer_head **bhs = fatent->bhs;
73
74 WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
75 fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
76
77 bhs[0] = sb_bread(sb, blocknr);
78 if (!bhs[0])
79 goto err;
80
81 if ((offset + 1) < sb->s_blocksize)
82 fatent->nr_bhs = 1;
83 else {
84 /* This entry is block boundary, it needs the next block */
85 blocknr++;
86 bhs[1] = sb_bread(sb, blocknr);
87 if (!bhs[1])
88 goto err_brelse;
89 fatent->nr_bhs = 2;
90 }
91 fat12_ent_set_ptr(fatent, offset);
92 return 0;
93
94 err_brelse:
95 brelse(bhs[0]);
96 err:
97 fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
98 (llu)blocknr);
99 return -EIO;
100 }
101
fat_ent_bread(struct super_block * sb,struct fat_entry * fatent,int offset,sector_t blocknr)102 static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
103 int offset, sector_t blocknr)
104 {
105 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
106
107 WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
108 fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
109 fatent->bhs[0] = sb_bread(sb, blocknr);
110 if (!fatent->bhs[0]) {
111 fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
112 (llu)blocknr);
113 return -EIO;
114 }
115 fatent->nr_bhs = 1;
116 ops->ent_set_ptr(fatent, offset);
117 return 0;
118 }
119
fat12_ent_get(struct fat_entry * fatent)120 static int fat12_ent_get(struct fat_entry *fatent)
121 {
122 u8 **ent12_p = fatent->u.ent12_p;
123 int next;
124
125 spin_lock(&fat12_entry_lock);
126 if (fatent->entry & 1)
127 next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
128 else
129 next = (*ent12_p[1] << 8) | *ent12_p[0];
130 spin_unlock(&fat12_entry_lock);
131
132 next &= 0x0fff;
133 if (next >= BAD_FAT12)
134 next = FAT_ENT_EOF;
135 return next;
136 }
137
fat16_ent_get(struct fat_entry * fatent)138 static int fat16_ent_get(struct fat_entry *fatent)
139 {
140 int next = le16_to_cpu(*fatent->u.ent16_p);
141 WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
142 if (next >= BAD_FAT16)
143 next = FAT_ENT_EOF;
144 return next;
145 }
146
fat32_ent_get(struct fat_entry * fatent)147 static int fat32_ent_get(struct fat_entry *fatent)
148 {
149 int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
150 WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
151 if (next >= BAD_FAT32)
152 next = FAT_ENT_EOF;
153 return next;
154 }
155
fat12_ent_put(struct fat_entry * fatent,int new)156 static void fat12_ent_put(struct fat_entry *fatent, int new)
157 {
158 u8 **ent12_p = fatent->u.ent12_p;
159
160 if (new == FAT_ENT_EOF)
161 new = EOF_FAT12;
162
163 spin_lock(&fat12_entry_lock);
164 if (fatent->entry & 1) {
165 *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
166 *ent12_p[1] = new >> 4;
167 } else {
168 *ent12_p[0] = new & 0xff;
169 *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
170 }
171 spin_unlock(&fat12_entry_lock);
172
173 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
174 if (fatent->nr_bhs == 2)
175 mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
176 }
177
fat16_ent_put(struct fat_entry * fatent,int new)178 static void fat16_ent_put(struct fat_entry *fatent, int new)
179 {
180 if (new == FAT_ENT_EOF)
181 new = EOF_FAT16;
182
183 *fatent->u.ent16_p = cpu_to_le16(new);
184 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
185 }
186
fat32_ent_put(struct fat_entry * fatent,int new)187 static void fat32_ent_put(struct fat_entry *fatent, int new)
188 {
189 WARN_ON(new & 0xf0000000);
190 new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
191 *fatent->u.ent32_p = cpu_to_le32(new);
192 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
193 }
194
fat12_ent_next(struct fat_entry * fatent)195 static int fat12_ent_next(struct fat_entry *fatent)
196 {
197 u8 **ent12_p = fatent->u.ent12_p;
198 struct buffer_head **bhs = fatent->bhs;
199 u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
200
201 fatent->entry++;
202 if (fatent->nr_bhs == 1) {
203 WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
204 (bhs[0]->b_size - 2)));
205 WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
206 (bhs[0]->b_size - 1)));
207 if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
208 ent12_p[0] = nextp - 1;
209 ent12_p[1] = nextp;
210 return 1;
211 }
212 } else {
213 WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
214 (bhs[0]->b_size - 1)));
215 WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
216 ent12_p[0] = nextp - 1;
217 ent12_p[1] = nextp;
218 brelse(bhs[0]);
219 bhs[0] = bhs[1];
220 fatent->nr_bhs = 1;
221 return 1;
222 }
223 ent12_p[0] = NULL;
224 ent12_p[1] = NULL;
225 return 0;
226 }
227
fat16_ent_next(struct fat_entry * fatent)228 static int fat16_ent_next(struct fat_entry *fatent)
229 {
230 const struct buffer_head *bh = fatent->bhs[0];
231 fatent->entry++;
232 if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
233 fatent->u.ent16_p++;
234 return 1;
235 }
236 fatent->u.ent16_p = NULL;
237 return 0;
238 }
239
fat32_ent_next(struct fat_entry * fatent)240 static int fat32_ent_next(struct fat_entry *fatent)
241 {
242 const struct buffer_head *bh = fatent->bhs[0];
243 fatent->entry++;
244 if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
245 fatent->u.ent32_p++;
246 return 1;
247 }
248 fatent->u.ent32_p = NULL;
249 return 0;
250 }
251
252 static const struct fatent_operations fat12_ops = {
253 .ent_blocknr = fat12_ent_blocknr,
254 .ent_set_ptr = fat12_ent_set_ptr,
255 .ent_bread = fat12_ent_bread,
256 .ent_get = fat12_ent_get,
257 .ent_put = fat12_ent_put,
258 .ent_next = fat12_ent_next,
259 };
260
261 static const struct fatent_operations fat16_ops = {
262 .ent_blocknr = fat_ent_blocknr,
263 .ent_set_ptr = fat16_ent_set_ptr,
264 .ent_bread = fat_ent_bread,
265 .ent_get = fat16_ent_get,
266 .ent_put = fat16_ent_put,
267 .ent_next = fat16_ent_next,
268 };
269
270 static const struct fatent_operations fat32_ops = {
271 .ent_blocknr = fat_ent_blocknr,
272 .ent_set_ptr = fat32_ent_set_ptr,
273 .ent_bread = fat_ent_bread,
274 .ent_get = fat32_ent_get,
275 .ent_put = fat32_ent_put,
276 .ent_next = fat32_ent_next,
277 };
278
lock_fat(struct msdos_sb_info * sbi)279 static inline void lock_fat(struct msdos_sb_info *sbi)
280 {
281 mutex_lock(&sbi->fat_lock);
282 }
283
unlock_fat(struct msdos_sb_info * sbi)284 static inline void unlock_fat(struct msdos_sb_info *sbi)
285 {
286 mutex_unlock(&sbi->fat_lock);
287 }
288
fat_ent_access_init(struct super_block * sb)289 void fat_ent_access_init(struct super_block *sb)
290 {
291 struct msdos_sb_info *sbi = MSDOS_SB(sb);
292
293 mutex_init(&sbi->fat_lock);
294
295 if (is_fat32(sbi)) {
296 sbi->fatent_shift = 2;
297 sbi->fatent_ops = &fat32_ops;
298 } else if (is_fat16(sbi)) {
299 sbi->fatent_shift = 1;
300 sbi->fatent_ops = &fat16_ops;
301 } else if (is_fat12(sbi)) {
302 sbi->fatent_shift = -1;
303 sbi->fatent_ops = &fat12_ops;
304 } else {
305 fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits);
306 }
307 }
308
mark_fsinfo_dirty(struct super_block * sb)309 static void mark_fsinfo_dirty(struct super_block *sb)
310 {
311 struct msdos_sb_info *sbi = MSDOS_SB(sb);
312
313 if (sb_rdonly(sb) || !is_fat32(sbi))
314 return;
315
316 __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
317 }
318
fat_ent_update_ptr(struct super_block * sb,struct fat_entry * fatent,int offset,sector_t blocknr)319 static inline int fat_ent_update_ptr(struct super_block *sb,
320 struct fat_entry *fatent,
321 int offset, sector_t blocknr)
322 {
323 struct msdos_sb_info *sbi = MSDOS_SB(sb);
324 const struct fatent_operations *ops = sbi->fatent_ops;
325 struct buffer_head **bhs = fatent->bhs;
326
327 /* Is this fatent's blocks including this entry? */
328 if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
329 return 0;
330 if (is_fat12(sbi)) {
331 if ((offset + 1) < sb->s_blocksize) {
332 /* This entry is on bhs[0]. */
333 if (fatent->nr_bhs == 2) {
334 brelse(bhs[1]);
335 fatent->nr_bhs = 1;
336 }
337 } else {
338 /* This entry needs the next block. */
339 if (fatent->nr_bhs != 2)
340 return 0;
341 if (bhs[1]->b_blocknr != (blocknr + 1))
342 return 0;
343 }
344 }
345 ops->ent_set_ptr(fatent, offset);
346 return 1;
347 }
348
fat_ent_read(struct inode * inode,struct fat_entry * fatent,int entry)349 int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
350 {
351 struct super_block *sb = inode->i_sb;
352 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
353 const struct fatent_operations *ops = sbi->fatent_ops;
354 int err, offset;
355 sector_t blocknr;
356
357 if (!fat_valid_entry(sbi, entry)) {
358 fatent_brelse(fatent);
359 fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
360 return -EIO;
361 }
362
363 fatent_set_entry(fatent, entry);
364 ops->ent_blocknr(sb, entry, &offset, &blocknr);
365
366 if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
367 fatent_brelse(fatent);
368 err = ops->ent_bread(sb, fatent, offset, blocknr);
369 if (err)
370 return err;
371 }
372 return ops->ent_get(fatent);
373 }
374
375 /* FIXME: We can write the blocks as more big chunk. */
fat_mirror_bhs(struct super_block * sb,struct buffer_head ** bhs,int nr_bhs)376 static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
377 int nr_bhs)
378 {
379 struct msdos_sb_info *sbi = MSDOS_SB(sb);
380 struct buffer_head *c_bh;
381 int err, n, copy;
382
383 err = 0;
384 for (copy = 1; copy < sbi->fats; copy++) {
385 sector_t backup_fat = sbi->fat_length * copy;
386
387 for (n = 0; n < nr_bhs; n++) {
388 c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
389 if (!c_bh) {
390 err = -ENOMEM;
391 goto error;
392 }
393 /* Avoid race with userspace read via bdev */
394 lock_buffer(c_bh);
395 memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
396 set_buffer_uptodate(c_bh);
397 unlock_buffer(c_bh);
398 mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
399 if (sb->s_flags & SB_SYNCHRONOUS)
400 err = sync_dirty_buffer(c_bh);
401 brelse(c_bh);
402 if (err)
403 goto error;
404 }
405 }
406 error:
407 return err;
408 }
409
fat_ent_write(struct inode * inode,struct fat_entry * fatent,int new,int wait)410 int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
411 int new, int wait)
412 {
413 struct super_block *sb = inode->i_sb;
414 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
415 int err;
416
417 ops->ent_put(fatent, new);
418 if (wait) {
419 err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
420 if (err)
421 return err;
422 }
423 return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
424 }
425
fat_ent_next(struct msdos_sb_info * sbi,struct fat_entry * fatent)426 static inline int fat_ent_next(struct msdos_sb_info *sbi,
427 struct fat_entry *fatent)
428 {
429 if (sbi->fatent_ops->ent_next(fatent)) {
430 if (fatent->entry < sbi->max_cluster)
431 return 1;
432 }
433 return 0;
434 }
435
fat_ent_read_block(struct super_block * sb,struct fat_entry * fatent)436 static inline int fat_ent_read_block(struct super_block *sb,
437 struct fat_entry *fatent)
438 {
439 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
440 sector_t blocknr;
441 int offset;
442
443 fatent_brelse(fatent);
444 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
445 return ops->ent_bread(sb, fatent, offset, blocknr);
446 }
447
fat_collect_bhs(struct buffer_head ** bhs,int * nr_bhs,struct fat_entry * fatent)448 static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
449 struct fat_entry *fatent)
450 {
451 int n, i;
452
453 for (n = 0; n < fatent->nr_bhs; n++) {
454 for (i = 0; i < *nr_bhs; i++) {
455 if (fatent->bhs[n] == bhs[i])
456 break;
457 }
458 if (i == *nr_bhs) {
459 get_bh(fatent->bhs[n]);
460 bhs[i] = fatent->bhs[n];
461 (*nr_bhs)++;
462 }
463 }
464 }
465
fat_alloc_clusters(struct inode * inode,int * cluster,int nr_cluster)466 int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
467 {
468 struct super_block *sb = inode->i_sb;
469 struct msdos_sb_info *sbi = MSDOS_SB(sb);
470 const struct fatent_operations *ops = sbi->fatent_ops;
471 struct fat_entry fatent, prev_ent;
472 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
473 int i, count, err, nr_bhs, idx_clus;
474
475 BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
476
477 lock_fat(sbi);
478 if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
479 sbi->free_clusters < nr_cluster) {
480 unlock_fat(sbi);
481 return -ENOSPC;
482 }
483
484 err = nr_bhs = idx_clus = 0;
485 count = FAT_START_ENT;
486 fatent_init(&prev_ent);
487 fatent_init(&fatent);
488 fatent_set_entry(&fatent, sbi->prev_free + 1);
489 while (count < sbi->max_cluster) {
490 if (fatent.entry >= sbi->max_cluster)
491 fatent.entry = FAT_START_ENT;
492 fatent_set_entry(&fatent, fatent.entry);
493 err = fat_ent_read_block(sb, &fatent);
494 if (err)
495 goto out;
496
497 /* Find the free entries in a block */
498 do {
499 if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
500 int entry = fatent.entry;
501
502 /* make the cluster chain */
503 ops->ent_put(&fatent, FAT_ENT_EOF);
504 if (prev_ent.nr_bhs)
505 ops->ent_put(&prev_ent, entry);
506
507 fat_collect_bhs(bhs, &nr_bhs, &fatent);
508
509 sbi->prev_free = entry;
510 if (sbi->free_clusters != -1)
511 sbi->free_clusters--;
512
513 cluster[idx_clus] = entry;
514 idx_clus++;
515 if (idx_clus == nr_cluster)
516 goto out;
517
518 /*
519 * fat_collect_bhs() gets ref-count of bhs,
520 * so we can still use the prev_ent.
521 */
522 prev_ent = fatent;
523 }
524 count++;
525 if (count == sbi->max_cluster)
526 break;
527 } while (fat_ent_next(sbi, &fatent));
528 }
529
530 /* Couldn't allocate the free entries */
531 sbi->free_clusters = 0;
532 sbi->free_clus_valid = 1;
533 err = -ENOSPC;
534
535 out:
536 unlock_fat(sbi);
537 mark_fsinfo_dirty(sb);
538 fatent_brelse(&fatent);
539 if (!err) {
540 if (inode_needs_sync(inode))
541 err = fat_sync_bhs(bhs, nr_bhs);
542 if (!err)
543 err = fat_mirror_bhs(sb, bhs, nr_bhs);
544 }
545 for (i = 0; i < nr_bhs; i++)
546 brelse(bhs[i]);
547
548 if (err && idx_clus)
549 fat_free_clusters(inode, cluster[0]);
550
551 return err;
552 }
553
fat_free_clusters(struct inode * inode,int cluster)554 int fat_free_clusters(struct inode *inode, int cluster)
555 {
556 struct super_block *sb = inode->i_sb;
557 struct msdos_sb_info *sbi = MSDOS_SB(sb);
558 const struct fatent_operations *ops = sbi->fatent_ops;
559 struct fat_entry fatent;
560 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
561 int i, err, nr_bhs;
562 int first_cl = cluster, dirty_fsinfo = 0;
563
564 nr_bhs = 0;
565 fatent_init(&fatent);
566 lock_fat(sbi);
567 do {
568 cluster = fat_ent_read(inode, &fatent, cluster);
569 if (cluster < 0) {
570 err = cluster;
571 goto error;
572 } else if (cluster == FAT_ENT_FREE) {
573 fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
574 __func__);
575 err = -EIO;
576 goto error;
577 }
578
579 if (sbi->options.discard) {
580 /*
581 * Issue discard for the sectors we no longer
582 * care about, batching contiguous clusters
583 * into one request
584 */
585 if (cluster != fatent.entry + 1) {
586 int nr_clus = fatent.entry - first_cl + 1;
587
588 sb_issue_discard(sb,
589 fat_clus_to_blknr(sbi, first_cl),
590 nr_clus * sbi->sec_per_clus,
591 GFP_NOFS, 0);
592
593 first_cl = cluster;
594 }
595 }
596
597 ops->ent_put(&fatent, FAT_ENT_FREE);
598 if (sbi->free_clusters != -1) {
599 sbi->free_clusters++;
600 dirty_fsinfo = 1;
601 }
602
603 if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
604 if (sb->s_flags & SB_SYNCHRONOUS) {
605 err = fat_sync_bhs(bhs, nr_bhs);
606 if (err)
607 goto error;
608 }
609 err = fat_mirror_bhs(sb, bhs, nr_bhs);
610 if (err)
611 goto error;
612 for (i = 0; i < nr_bhs; i++)
613 brelse(bhs[i]);
614 nr_bhs = 0;
615 }
616 fat_collect_bhs(bhs, &nr_bhs, &fatent);
617 } while (cluster != FAT_ENT_EOF);
618
619 if (sb->s_flags & SB_SYNCHRONOUS) {
620 err = fat_sync_bhs(bhs, nr_bhs);
621 if (err)
622 goto error;
623 }
624 err = fat_mirror_bhs(sb, bhs, nr_bhs);
625 error:
626 fatent_brelse(&fatent);
627 for (i = 0; i < nr_bhs; i++)
628 brelse(bhs[i]);
629 unlock_fat(sbi);
630 if (dirty_fsinfo)
631 mark_fsinfo_dirty(sb);
632
633 return err;
634 }
635 EXPORT_SYMBOL_GPL(fat_free_clusters);
636
637 struct fatent_ra {
638 sector_t cur;
639 sector_t limit;
640
641 unsigned int ra_blocks;
642 sector_t ra_advance;
643 sector_t ra_next;
644 sector_t ra_limit;
645 };
646
fat_ra_init(struct super_block * sb,struct fatent_ra * ra,struct fat_entry * fatent,int ent_limit)647 static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
648 struct fat_entry *fatent, int ent_limit)
649 {
650 struct msdos_sb_info *sbi = MSDOS_SB(sb);
651 const struct fatent_operations *ops = sbi->fatent_ops;
652 sector_t blocknr, block_end;
653 int offset;
654 /*
655 * This is the sequential read, so ra_pages * 2 (but try to
656 * align the optimal hardware IO size).
657 * [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
658 */
659 unsigned long ra_pages = sb->s_bdi->ra_pages;
660 unsigned int reada_blocks;
661
662 if (fatent->entry >= ent_limit)
663 return;
664
665 if (ra_pages > sb->s_bdi->io_pages)
666 ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
667 reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
668
669 /* Initialize the range for sequential read */
670 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
671 ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end);
672 ra->cur = 0;
673 ra->limit = (block_end + 1) - blocknr;
674
675 /* Advancing the window at half size */
676 ra->ra_blocks = reada_blocks >> 1;
677 ra->ra_advance = ra->cur;
678 ra->ra_next = ra->cur;
679 ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
680 }
681
682 /* Assuming to be called before reading a new block (increments ->cur). */
fat_ent_reada(struct super_block * sb,struct fatent_ra * ra,struct fat_entry * fatent)683 static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra,
684 struct fat_entry *fatent)
685 {
686 if (ra->ra_next >= ra->ra_limit)
687 return;
688
689 if (ra->cur >= ra->ra_advance) {
690 struct msdos_sb_info *sbi = MSDOS_SB(sb);
691 const struct fatent_operations *ops = sbi->fatent_ops;
692 struct blk_plug plug;
693 sector_t blocknr, diff;
694 int offset;
695
696 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
697
698 diff = blocknr - ra->cur;
699 blk_start_plug(&plug);
700 /*
701 * FIXME: we would want to directly use the bio with
702 * pages to reduce the number of segments.
703 */
704 for (; ra->ra_next < ra->ra_limit; ra->ra_next++)
705 sb_breadahead(sb, ra->ra_next + diff);
706 blk_finish_plug(&plug);
707
708 /* Advance the readahead window */
709 ra->ra_advance += ra->ra_blocks;
710 ra->ra_limit += min_t(sector_t,
711 ra->ra_blocks, ra->limit - ra->ra_limit);
712 }
713 ra->cur++;
714 }
715
fat_count_free_clusters(struct super_block * sb)716 int fat_count_free_clusters(struct super_block *sb)
717 {
718 struct msdos_sb_info *sbi = MSDOS_SB(sb);
719 const struct fatent_operations *ops = sbi->fatent_ops;
720 struct fat_entry fatent;
721 struct fatent_ra fatent_ra;
722 int err = 0, free;
723
724 lock_fat(sbi);
725 if (sbi->free_clusters != -1 && sbi->free_clus_valid)
726 goto out;
727
728 free = 0;
729 fatent_init(&fatent);
730 fatent_set_entry(&fatent, FAT_START_ENT);
731 fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster);
732 while (fatent.entry < sbi->max_cluster) {
733 /* readahead of fat blocks */
734 fat_ent_reada(sb, &fatent_ra, &fatent);
735
736 err = fat_ent_read_block(sb, &fatent);
737 if (err)
738 goto out;
739
740 do {
741 if (ops->ent_get(&fatent) == FAT_ENT_FREE)
742 free++;
743 } while (fat_ent_next(sbi, &fatent));
744 cond_resched();
745 }
746 sbi->free_clusters = free;
747 sbi->free_clus_valid = 1;
748 mark_fsinfo_dirty(sb);
749 fatent_brelse(&fatent);
750 out:
751 unlock_fat(sbi);
752 return err;
753 }
754
fat_trim_clusters(struct super_block * sb,u32 clus,u32 nr_clus)755 static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus)
756 {
757 struct msdos_sb_info *sbi = MSDOS_SB(sb);
758 return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus),
759 nr_clus * sbi->sec_per_clus, GFP_NOFS, 0);
760 }
761
fat_trim_fs(struct inode * inode,struct fstrim_range * range)762 int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
763 {
764 struct super_block *sb = inode->i_sb;
765 struct msdos_sb_info *sbi = MSDOS_SB(sb);
766 const struct fatent_operations *ops = sbi->fatent_ops;
767 struct fat_entry fatent;
768 struct fatent_ra fatent_ra;
769 u64 ent_start, ent_end, minlen, trimmed = 0;
770 u32 free = 0;
771 int err = 0;
772
773 /*
774 * FAT data is organized as clusters, trim at the granulary of cluster.
775 *
776 * fstrim_range is in byte, convert values to cluster index.
777 * Treat sectors before data region as all used, not to trim them.
778 */
779 ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
780 ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
781 minlen = range->minlen >> sbi->cluster_bits;
782
783 if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
784 return -EINVAL;
785 if (ent_end >= sbi->max_cluster)
786 ent_end = sbi->max_cluster - 1;
787
788 fatent_init(&fatent);
789 lock_fat(sbi);
790 fatent_set_entry(&fatent, ent_start);
791 fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1);
792 while (fatent.entry <= ent_end) {
793 /* readahead of fat blocks */
794 fat_ent_reada(sb, &fatent_ra, &fatent);
795
796 err = fat_ent_read_block(sb, &fatent);
797 if (err)
798 goto error;
799 do {
800 if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
801 free++;
802 } else if (free) {
803 if (free >= minlen) {
804 u32 clus = fatent.entry - free;
805
806 err = fat_trim_clusters(sb, clus, free);
807 if (err && err != -EOPNOTSUPP)
808 goto error;
809 if (!err)
810 trimmed += free;
811 err = 0;
812 }
813 free = 0;
814 }
815 } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end);
816
817 if (fatal_signal_pending(current)) {
818 err = -ERESTARTSYS;
819 goto error;
820 }
821
822 if (need_resched()) {
823 fatent_brelse(&fatent);
824 unlock_fat(sbi);
825 cond_resched();
826 lock_fat(sbi);
827 }
828 }
829 /* handle scenario when tail entries are all free */
830 if (free && free >= minlen) {
831 u32 clus = fatent.entry - free;
832
833 err = fat_trim_clusters(sb, clus, free);
834 if (err && err != -EOPNOTSUPP)
835 goto error;
836 if (!err)
837 trimmed += free;
838 err = 0;
839 }
840
841 error:
842 fatent_brelse(&fatent);
843 unlock_fat(sbi);
844
845 range->len = trimmed << sbi->cluster_bits;
846
847 return err;
848 }
849