12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f6bed0efSShaohua Li /*
3f6bed0efSShaohua Li * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4b4c625c6SSong Liu * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
5f6bed0efSShaohua Li */
6f6bed0efSShaohua Li #include <linux/kernel.h>
7f6bed0efSShaohua Li #include <linux/wait.h>
8f6bed0efSShaohua Li #include <linux/blkdev.h>
9f6bed0efSShaohua Li #include <linux/slab.h>
10f6bed0efSShaohua Li #include <linux/raid/md_p.h>
115cb2fbd6SShaohua Li #include <linux/crc32c.h>
12f6bed0efSShaohua Li #include <linux/random.h>
13ce1ccd07SShaohua Li #include <linux/kthread.h>
1403b047f4SSong Liu #include <linux/types.h>
15f6bed0efSShaohua Li #include "md.h"
16f6bed0efSShaohua Li #include "raid5.h"
17935fe098SMike Snitzer #include "md-bitmap.h"
1870d466f7SSong Liu #include "raid5-log.h"
19f6bed0efSShaohua Li
20f6bed0efSShaohua Li /*
21f6bed0efSShaohua Li * metadata/data stored in disk with 4k size unit (a block) regardless
22f6bed0efSShaohua Li * underneath hardware sector size. only works with PAGE_SIZE == 4096
23f6bed0efSShaohua Li */
24f6bed0efSShaohua Li #define BLOCK_SECTORS (8)
25effe6ee7SSong Liu #define BLOCK_SECTOR_SHIFT (3)
26f6bed0efSShaohua Li
270576b1c6SShaohua Li /*
28a39f7afdSSong Liu * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
29a39f7afdSSong Liu *
30a39f7afdSSong Liu * In write through mode, the reclaim runs every log->max_free_space.
31a39f7afdSSong Liu * This can prevent the recovery scans for too long
320576b1c6SShaohua Li */
330576b1c6SShaohua Li #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
340576b1c6SShaohua Li #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
350576b1c6SShaohua Li
36a39f7afdSSong Liu /* wake up reclaim thread periodically */
37a39f7afdSSong Liu #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
38a39f7afdSSong Liu /* start flush with these full stripes */
3984890c03SShaohua Li #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
40a39f7afdSSong Liu /* reclaim stripes in groups */
41a39f7afdSSong Liu #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
42a39f7afdSSong Liu
43c38d29b3SChristoph Hellwig /*
44c38d29b3SChristoph Hellwig * We only need 2 bios per I/O unit to make progress, but ensure we
45c38d29b3SChristoph Hellwig * have a few more available to not get too tight.
46c38d29b3SChristoph Hellwig */
47c38d29b3SChristoph Hellwig #define R5L_POOL_SIZE 4
48c38d29b3SChristoph Hellwig
492c7da14bSSong Liu static char *r5c_journal_mode_str[] = {"write-through",
502c7da14bSSong Liu "write-back"};
512ded3703SSong Liu /*
522ded3703SSong Liu * raid5 cache state machine
532ded3703SSong Liu *
549b69173eSJackieLiu * With the RAID cache, each stripe works in two phases:
552ded3703SSong Liu * - caching phase
562ded3703SSong Liu * - writing-out phase
572ded3703SSong Liu *
582ded3703SSong Liu * These two phases are controlled by bit STRIPE_R5C_CACHING:
592ded3703SSong Liu * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
602ded3703SSong Liu * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
612ded3703SSong Liu *
622ded3703SSong Liu * When there is no journal, or the journal is in write-through mode,
632ded3703SSong Liu * the stripe is always in writing-out phase.
642ded3703SSong Liu *
652ded3703SSong Liu * For write-back journal, the stripe is sent to caching phase on write
662ded3703SSong Liu * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
672ded3703SSong Liu * the write-out phase by clearing STRIPE_R5C_CACHING.
682ded3703SSong Liu *
692ded3703SSong Liu * Stripes in caching phase do not write the raid disks. Instead, all
702ded3703SSong Liu * writes are committed from the log device. Therefore, a stripe in
712ded3703SSong Liu * caching phase handles writes as:
722ded3703SSong Liu * - write to log device
732ded3703SSong Liu * - return IO
742ded3703SSong Liu *
752ded3703SSong Liu * Stripes in writing-out phase handle writes as:
762ded3703SSong Liu * - calculate parity
772ded3703SSong Liu * - write pending data and parity to journal
782ded3703SSong Liu * - write data and parity to raid disks
792ded3703SSong Liu * - return IO for pending writes
802ded3703SSong Liu */
812ded3703SSong Liu
82f6bed0efSShaohua Li struct r5l_log {
83f6bed0efSShaohua Li struct md_rdev *rdev;
84f6bed0efSShaohua Li
85f6bed0efSShaohua Li u32 uuid_checksum;
86f6bed0efSShaohua Li
87f6bed0efSShaohua Li sector_t device_size; /* log device size, round to
88f6bed0efSShaohua Li * BLOCK_SECTORS */
890576b1c6SShaohua Li sector_t max_free_space; /* reclaim run if free space is at
900576b1c6SShaohua Li * this size */
91f6bed0efSShaohua Li
92f6bed0efSShaohua Li sector_t last_checkpoint; /* log tail. where recovery scan
93f6bed0efSShaohua Li * starts from */
94f6bed0efSShaohua Li u64 last_cp_seq; /* log tail sequence */
95f6bed0efSShaohua Li
96f6bed0efSShaohua Li sector_t log_start; /* log head. where new data appends */
97f6bed0efSShaohua Li u64 seq; /* log head sequence */
98f6bed0efSShaohua Li
9917036461SChristoph Hellwig sector_t next_checkpoint;
10017036461SChristoph Hellwig
101f6bed0efSShaohua Li struct mutex io_mutex;
102f6bed0efSShaohua Li struct r5l_io_unit *current_io; /* current io_unit accepting new data */
103f6bed0efSShaohua Li
104f6bed0efSShaohua Li spinlock_t io_list_lock;
105f6bed0efSShaohua Li struct list_head running_ios; /* io_units which are still running,
106f6bed0efSShaohua Li * and have not yet been completely
107f6bed0efSShaohua Li * written to the log */
108f6bed0efSShaohua Li struct list_head io_end_ios; /* io_units which have been completely
109f6bed0efSShaohua Li * written to the log but not yet written
110f6bed0efSShaohua Li * to the RAID */
111a8c34f91SShaohua Li struct list_head flushing_ios; /* io_units which are waiting for log
112a8c34f91SShaohua Li * cache flush */
11304732f74SChristoph Hellwig struct list_head finished_ios; /* io_units which settle down in log disk */
114a8c34f91SShaohua Li struct bio flush_bio;
115f6bed0efSShaohua Li
1165036c390SChristoph Hellwig struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
1175036c390SChristoph Hellwig
118f6bed0efSShaohua Li struct kmem_cache *io_kc;
119afeee514SKent Overstreet mempool_t io_pool;
120afeee514SKent Overstreet struct bio_set bs;
121afeee514SKent Overstreet mempool_t meta_pool;
122f6bed0efSShaohua Li
12344693154SYu Kuai struct md_thread __rcu *reclaim_thread;
1240576b1c6SShaohua Li unsigned long reclaim_target; /* number of space that need to be
1250576b1c6SShaohua Li * reclaimed. if it's 0, reclaim spaces
1260576b1c6SShaohua Li * used by io_units which are in
1270576b1c6SShaohua Li * IO_UNIT_STRIPE_END state (eg, reclaim
12865b94b52SZhou nan * doesn't wait for specific io_unit
1290576b1c6SShaohua Li * switching to IO_UNIT_STRIPE_END
1300576b1c6SShaohua Li * state) */
1310fd22b45SShaohua Li wait_queue_head_t iounit_wait;
1320576b1c6SShaohua Li
133f6bed0efSShaohua Li struct list_head no_space_stripes; /* pending stripes, log has no space */
134f6bed0efSShaohua Li spinlock_t no_space_stripes_lock;
13556fef7c6SChristoph Hellwig
13656fef7c6SChristoph Hellwig bool need_cache_flush;
1372ded3703SSong Liu
1382ded3703SSong Liu /* for r5c_cache */
1392ded3703SSong Liu enum r5c_journal_mode r5c_journal_mode;
140a39f7afdSSong Liu
141a39f7afdSSong Liu /* all stripes in r5cache, in the order of seq at sh->log_start */
142a39f7afdSSong Liu struct list_head stripe_in_journal_list;
143a39f7afdSSong Liu
144a39f7afdSSong Liu spinlock_t stripe_in_journal_lock;
145a39f7afdSSong Liu atomic_t stripe_in_journal_count;
1463bddb7f8SSong Liu
1473bddb7f8SSong Liu /* to submit async io_units, to fulfill ordering of flush */
1483bddb7f8SSong Liu struct work_struct deferred_io_work;
1492e38a37fSSong Liu /* to disable write back during in degraded mode */
1502e38a37fSSong Liu struct work_struct disable_writeback_work;
15103b047f4SSong Liu
15203b047f4SSong Liu /* to for chunk_aligned_read in writeback mode, details below */
15303b047f4SSong Liu spinlock_t tree_lock;
15403b047f4SSong Liu struct radix_tree_root big_stripe_tree;
155f6bed0efSShaohua Li };
156f6bed0efSShaohua Li
157f6bed0efSShaohua Li /*
15803b047f4SSong Liu * Enable chunk_aligned_read() with write back cache.
15903b047f4SSong Liu *
16003b047f4SSong Liu * Each chunk may contain more than one stripe (for example, a 256kB
16103b047f4SSong Liu * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
16203b047f4SSong Liu * chunk_aligned_read, these stripes are grouped into one "big_stripe".
16303b047f4SSong Liu * For each big_stripe, we count how many stripes of this big_stripe
16403b047f4SSong Liu * are in the write back cache. These data are tracked in a radix tree
16503b047f4SSong Liu * (big_stripe_tree). We use radix_tree item pointer as the counter.
16603b047f4SSong Liu * r5c_tree_index() is used to calculate keys for the radix tree.
16703b047f4SSong Liu *
16803b047f4SSong Liu * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
16903b047f4SSong Liu * big_stripe of each chunk in the tree. If this big_stripe is in the
17003b047f4SSong Liu * tree, chunk_aligned_read() aborts. This look up is protected by
17103b047f4SSong Liu * rcu_read_lock().
17203b047f4SSong Liu *
17303b047f4SSong Liu * It is necessary to remember whether a stripe is counted in
17403b047f4SSong Liu * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
17503b047f4SSong Liu * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
17603b047f4SSong Liu * two flags are set, the stripe is counted in big_stripe_tree. This
17703b047f4SSong Liu * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
17803b047f4SSong Liu * r5c_try_caching_write(); and moving clear_bit of
17903b047f4SSong Liu * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
18003b047f4SSong Liu * r5c_finish_stripe_write_out().
18103b047f4SSong Liu */
18203b047f4SSong Liu
18303b047f4SSong Liu /*
18403b047f4SSong Liu * radix tree requests lowest 2 bits of data pointer to be 2b'00.
18503b047f4SSong Liu * So it is necessary to left shift the counter by 2 bits before using it
18603b047f4SSong Liu * as data pointer of the tree.
18703b047f4SSong Liu */
18803b047f4SSong Liu #define R5C_RADIX_COUNT_SHIFT 2
18903b047f4SSong Liu
19003b047f4SSong Liu /*
19103b047f4SSong Liu * calculate key for big_stripe_tree
19203b047f4SSong Liu *
19303b047f4SSong Liu * sect: align_bi->bi_iter.bi_sector or sh->sector
19403b047f4SSong Liu */
r5c_tree_index(struct r5conf * conf,sector_t sect)19503b047f4SSong Liu static inline sector_t r5c_tree_index(struct r5conf *conf,
19603b047f4SSong Liu sector_t sect)
19703b047f4SSong Liu {
19852923083SDamien Le Moal sector_div(sect, conf->chunk_sectors);
19903b047f4SSong Liu return sect;
20003b047f4SSong Liu }
20103b047f4SSong Liu
20203b047f4SSong Liu /*
203f6bed0efSShaohua Li * an IO range starts from a meta data block and end at the next meta data
204f6bed0efSShaohua Li * block. The io unit's the meta data block tracks data/parity followed it. io
205f6bed0efSShaohua Li * unit is written to log disk with normal write, as we always flush log disk
206f6bed0efSShaohua Li * first and then start move data to raid disks, there is no requirement to
207f6bed0efSShaohua Li * write io unit with FLUSH/FUA
208f6bed0efSShaohua Li */
209f6bed0efSShaohua Li struct r5l_io_unit {
210f6bed0efSShaohua Li struct r5l_log *log;
211f6bed0efSShaohua Li
212f6bed0efSShaohua Li struct page *meta_page; /* store meta block */
213f6bed0efSShaohua Li int meta_offset; /* current offset in meta_page */
214f6bed0efSShaohua Li
215f6bed0efSShaohua Li struct bio *current_bio;/* current_bio accepting new data */
216f6bed0efSShaohua Li
217f6bed0efSShaohua Li atomic_t pending_stripe;/* how many stripes not flushed to raid */
218f6bed0efSShaohua Li u64 seq; /* seq number of the metablock */
219f6bed0efSShaohua Li sector_t log_start; /* where the io_unit starts */
220f6bed0efSShaohua Li sector_t log_end; /* where the io_unit ends */
221f6bed0efSShaohua Li struct list_head log_sibling; /* log->running_ios */
222f6bed0efSShaohua Li struct list_head stripe_list; /* stripes added to the io_unit */
223f6bed0efSShaohua Li
224f6bed0efSShaohua Li int state;
2256143e2ceSChristoph Hellwig bool need_split_bio;
2263bddb7f8SSong Liu struct bio *split_bio;
2273bddb7f8SSong Liu
2283bddb7f8SSong Liu unsigned int has_flush:1; /* include flush request */
2293bddb7f8SSong Liu unsigned int has_fua:1; /* include fua request */
230a9501d74SSong Liu unsigned int has_null_flush:1; /* include null flush request */
231a9501d74SSong Liu unsigned int has_flush_payload:1; /* include flush payload */
2323bddb7f8SSong Liu /*
2333bddb7f8SSong Liu * io isn't sent yet, flush/fua request can only be submitted till it's
2343bddb7f8SSong Liu * the first IO in running_ios list
2353bddb7f8SSong Liu */
2363bddb7f8SSong Liu unsigned int io_deferred:1;
2373bddb7f8SSong Liu
2383bddb7f8SSong Liu struct bio_list flush_barriers; /* size == 0 flush bios */
239f6bed0efSShaohua Li };
240f6bed0efSShaohua Li
241f6bed0efSShaohua Li /* r5l_io_unit state */
242f6bed0efSShaohua Li enum r5l_io_unit_state {
243f6bed0efSShaohua Li IO_UNIT_RUNNING = 0, /* accepting new IO */
244f6bed0efSShaohua Li IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
245f6bed0efSShaohua Li * don't accepting new bio */
246f6bed0efSShaohua Li IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
247a8c34f91SShaohua Li IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
248f6bed0efSShaohua Li };
249f6bed0efSShaohua Li
r5c_is_writeback(struct r5l_log * log)2502ded3703SSong Liu bool r5c_is_writeback(struct r5l_log *log)
2512ded3703SSong Liu {
2522ded3703SSong Liu return (log != NULL &&
2532ded3703SSong Liu log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
2542ded3703SSong Liu }
2552ded3703SSong Liu
r5l_ring_add(struct r5l_log * log,sector_t start,sector_t inc)256f6bed0efSShaohua Li static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
257f6bed0efSShaohua Li {
258f6bed0efSShaohua Li start += inc;
259f6bed0efSShaohua Li if (start >= log->device_size)
260f6bed0efSShaohua Li start = start - log->device_size;
261f6bed0efSShaohua Li return start;
262f6bed0efSShaohua Li }
263f6bed0efSShaohua Li
r5l_ring_distance(struct r5l_log * log,sector_t start,sector_t end)264f6bed0efSShaohua Li static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
265f6bed0efSShaohua Li sector_t end)
266f6bed0efSShaohua Li {
267f6bed0efSShaohua Li if (end >= start)
268f6bed0efSShaohua Li return end - start;
269f6bed0efSShaohua Li else
270f6bed0efSShaohua Li return end + log->device_size - start;
271f6bed0efSShaohua Li }
272f6bed0efSShaohua Li
r5l_has_free_space(struct r5l_log * log,sector_t size)273f6bed0efSShaohua Li static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
274f6bed0efSShaohua Li {
275f6bed0efSShaohua Li sector_t used_size;
276f6bed0efSShaohua Li
277f6bed0efSShaohua Li used_size = r5l_ring_distance(log, log->last_checkpoint,
278f6bed0efSShaohua Li log->log_start);
279f6bed0efSShaohua Li
280f6bed0efSShaohua Li return log->device_size > used_size + size;
281f6bed0efSShaohua Li }
282f6bed0efSShaohua Li
__r5l_set_io_unit_state(struct r5l_io_unit * io,enum r5l_io_unit_state state)283f6bed0efSShaohua Li static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
284f6bed0efSShaohua Li enum r5l_io_unit_state state)
285f6bed0efSShaohua Li {
286f6bed0efSShaohua Li if (WARN_ON(io->state >= state))
287f6bed0efSShaohua Li return;
288f6bed0efSShaohua Li io->state = state;
289f6bed0efSShaohua Li }
290f6bed0efSShaohua Li
2911e6d690bSSong Liu static void
r5c_return_dev_pending_writes(struct r5conf * conf,struct r5dev * dev)292bd83d0a2SNeilBrown r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
2931e6d690bSSong Liu {
2941e6d690bSSong Liu struct bio *wbi, *wbi2;
2951e6d690bSSong Liu
2961e6d690bSSong Liu wbi = dev->written;
2971e6d690bSSong Liu dev->written = NULL;
2981e6d690bSSong Liu while (wbi && wbi->bi_iter.bi_sector <
299c911c46cSYufen Yu dev->sector + RAID5_STRIPE_SECTORS(conf)) {
300c911c46cSYufen Yu wbi2 = r5_next_bio(conf, wbi, dev->sector);
3011e6d690bSSong Liu md_write_end(conf->mddev);
302bd83d0a2SNeilBrown bio_endio(wbi);
3031e6d690bSSong Liu wbi = wbi2;
3041e6d690bSSong Liu }
3051e6d690bSSong Liu }
3061e6d690bSSong Liu
r5c_handle_cached_data_endio(struct r5conf * conf,struct stripe_head * sh,int disks)3071e6d690bSSong Liu void r5c_handle_cached_data_endio(struct r5conf *conf,
308bd83d0a2SNeilBrown struct stripe_head *sh, int disks)
3091e6d690bSSong Liu {
3101e6d690bSSong Liu int i;
3111e6d690bSSong Liu
3121e6d690bSSong Liu for (i = sh->disks; i--; ) {
3131e6d690bSSong Liu if (sh->dev[i].written) {
3141e6d690bSSong Liu set_bit(R5_UPTODATE, &sh->dev[i].flags);
315bd83d0a2SNeilBrown r5c_return_dev_pending_writes(conf, &sh->dev[i]);
3161e6d690bSSong Liu }
3171e6d690bSSong Liu }
3181e6d690bSSong Liu }
3191e6d690bSSong Liu
320ff875738SArtur Paszkiewicz void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
321ff875738SArtur Paszkiewicz
322a39f7afdSSong Liu /* Check whether we should flush some stripes to free up stripe cache */
r5c_check_stripe_cache_usage(struct r5conf * conf)323a39f7afdSSong Liu void r5c_check_stripe_cache_usage(struct r5conf *conf)
324a39f7afdSSong Liu {
325a39f7afdSSong Liu int total_cached;
326*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
327a39f7afdSSong Liu
328*ebf6f517SYu Kuai if (!r5c_is_writeback(log))
329a39f7afdSSong Liu return;
330a39f7afdSSong Liu
331a39f7afdSSong Liu total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
332a39f7afdSSong Liu atomic_read(&conf->r5c_cached_full_stripes);
333a39f7afdSSong Liu
334a39f7afdSSong Liu /*
335a39f7afdSSong Liu * The following condition is true for either of the following:
336a39f7afdSSong Liu * - stripe cache pressure high:
337a39f7afdSSong Liu * total_cached > 3/4 min_nr_stripes ||
338a39f7afdSSong Liu * empty_inactive_list_nr > 0
339a39f7afdSSong Liu * - stripe cache pressure moderate:
340a39f7afdSSong Liu * total_cached > 1/2 min_nr_stripes
341a39f7afdSSong Liu */
342a39f7afdSSong Liu if (total_cached > conf->min_nr_stripes * 1 / 2 ||
343a39f7afdSSong Liu atomic_read(&conf->empty_inactive_list_nr) > 0)
344*ebf6f517SYu Kuai r5l_wake_reclaim(log, 0);
345a39f7afdSSong Liu }
346a39f7afdSSong Liu
347a39f7afdSSong Liu /*
348a39f7afdSSong Liu * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
349a39f7afdSSong Liu * stripes in the cache
350a39f7afdSSong Liu */
r5c_check_cached_full_stripe(struct r5conf * conf)351a39f7afdSSong Liu void r5c_check_cached_full_stripe(struct r5conf *conf)
352a39f7afdSSong Liu {
353*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
354*ebf6f517SYu Kuai
355*ebf6f517SYu Kuai if (!r5c_is_writeback(log))
356a39f7afdSSong Liu return;
357a39f7afdSSong Liu
358a39f7afdSSong Liu /*
359a39f7afdSSong Liu * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
360a39f7afdSSong Liu * or a full stripe (chunk size / 4k stripes).
361a39f7afdSSong Liu */
362a39f7afdSSong Liu if (atomic_read(&conf->r5c_cached_full_stripes) >=
36384890c03SShaohua Li min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
364c911c46cSYufen Yu conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
365*ebf6f517SYu Kuai r5l_wake_reclaim(log, 0);
366a39f7afdSSong Liu }
367a39f7afdSSong Liu
368a39f7afdSSong Liu /*
369a39f7afdSSong Liu * Total log space (in sectors) needed to flush all data in cache
370a39f7afdSSong Liu *
37139b99586SSong Liu * To avoid deadlock due to log space, it is necessary to reserve log
37239b99586SSong Liu * space to flush critical stripes (stripes that occupying log space near
37339b99586SSong Liu * last_checkpoint). This function helps check how much log space is
37439b99586SSong Liu * required to flush all cached stripes.
375a39f7afdSSong Liu *
37639b99586SSong Liu * To reduce log space requirements, two mechanisms are used to give cache
37739b99586SSong Liu * flush higher priorities:
37839b99586SSong Liu * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
37939b99586SSong Liu * stripes ALREADY in journal can be flushed w/o pending writes;
38039b99586SSong Liu * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
38139b99586SSong Liu * can be delayed (r5l_add_no_space_stripe).
382a39f7afdSSong Liu *
38339b99586SSong Liu * In cache flush, the stripe goes through 1 and then 2. For a stripe that
38439b99586SSong Liu * already passed 1, flushing it requires at most (conf->max_degraded + 1)
38539b99586SSong Liu * pages of journal space. For stripes that has not passed 1, flushing it
38639b99586SSong Liu * requires (conf->raid_disks + 1) pages of journal space. There are at
38739b99586SSong Liu * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
38839b99586SSong Liu * required to flush all cached stripes (in pages) is:
38939b99586SSong Liu *
39039b99586SSong Liu * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
39139b99586SSong Liu * (group_cnt + 1) * (raid_disks + 1)
39239b99586SSong Liu * or
39339b99586SSong Liu * (stripe_in_journal_count) * (max_degraded + 1) +
39439b99586SSong Liu * (group_cnt + 1) * (raid_disks - max_degraded)
395a39f7afdSSong Liu */
r5c_log_required_to_flush_cache(struct r5conf * conf)396a39f7afdSSong Liu static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
397a39f7afdSSong Liu {
398*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
399a39f7afdSSong Liu
400a39f7afdSSong Liu if (!r5c_is_writeback(log))
401a39f7afdSSong Liu return 0;
402a39f7afdSSong Liu
40339b99586SSong Liu return BLOCK_SECTORS *
40439b99586SSong Liu ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
40539b99586SSong Liu (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
406a39f7afdSSong Liu }
407a39f7afdSSong Liu
408a39f7afdSSong Liu /*
409a39f7afdSSong Liu * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
410a39f7afdSSong Liu *
411a39f7afdSSong Liu * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
412a39f7afdSSong Liu * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
413a39f7afdSSong Liu * device is less than 2x of reclaim_required_space.
414a39f7afdSSong Liu */
r5c_update_log_state(struct r5l_log * log)415a39f7afdSSong Liu static inline void r5c_update_log_state(struct r5l_log *log)
416a39f7afdSSong Liu {
417a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private;
418a39f7afdSSong Liu sector_t free_space;
419a39f7afdSSong Liu sector_t reclaim_space;
420f687a33eSSong Liu bool wake_reclaim = false;
421a39f7afdSSong Liu
422a39f7afdSSong Liu if (!r5c_is_writeback(log))
423a39f7afdSSong Liu return;
424a39f7afdSSong Liu
425a39f7afdSSong Liu free_space = r5l_ring_distance(log, log->log_start,
426a39f7afdSSong Liu log->last_checkpoint);
427a39f7afdSSong Liu reclaim_space = r5c_log_required_to_flush_cache(conf);
428a39f7afdSSong Liu if (free_space < 2 * reclaim_space)
429a39f7afdSSong Liu set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
430f687a33eSSong Liu else {
431f687a33eSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
432f687a33eSSong Liu wake_reclaim = true;
433a39f7afdSSong Liu clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
434f687a33eSSong Liu }
435a39f7afdSSong Liu if (free_space < 3 * reclaim_space)
436a39f7afdSSong Liu set_bit(R5C_LOG_TIGHT, &conf->cache_state);
437a39f7afdSSong Liu else
438a39f7afdSSong Liu clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
439f687a33eSSong Liu
440f687a33eSSong Liu if (wake_reclaim)
441f687a33eSSong Liu r5l_wake_reclaim(log, 0);
442a39f7afdSSong Liu }
443a39f7afdSSong Liu
4442ded3703SSong Liu /*
4452ded3703SSong Liu * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
4462ded3703SSong Liu * This function should only be called in write-back mode.
4472ded3703SSong Liu */
r5c_make_stripe_write_out(struct stripe_head * sh)448a39f7afdSSong Liu void r5c_make_stripe_write_out(struct stripe_head *sh)
4492ded3703SSong Liu {
4502ded3703SSong Liu struct r5conf *conf = sh->raid_conf;
451*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
4522ded3703SSong Liu
4532ded3703SSong Liu BUG_ON(!r5c_is_writeback(log));
4542ded3703SSong Liu
4552ded3703SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
4562ded3703SSong Liu clear_bit(STRIPE_R5C_CACHING, &sh->state);
4571e6d690bSSong Liu
4581e6d690bSSong Liu if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4591e6d690bSSong Liu atomic_inc(&conf->preread_active_stripes);
4601e6d690bSSong Liu }
4611e6d690bSSong Liu
r5c_handle_data_cached(struct stripe_head * sh)4621e6d690bSSong Liu static void r5c_handle_data_cached(struct stripe_head *sh)
4631e6d690bSSong Liu {
4641e6d690bSSong Liu int i;
4651e6d690bSSong Liu
4661e6d690bSSong Liu for (i = sh->disks; i--; )
4671e6d690bSSong Liu if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
4681e6d690bSSong Liu set_bit(R5_InJournal, &sh->dev[i].flags);
4691e6d690bSSong Liu clear_bit(R5_LOCKED, &sh->dev[i].flags);
4701e6d690bSSong Liu }
4711e6d690bSSong Liu clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
4721e6d690bSSong Liu }
4731e6d690bSSong Liu
4741e6d690bSSong Liu /*
4751e6d690bSSong Liu * this journal write must contain full parity,
4761e6d690bSSong Liu * it may also contain some data pages
4771e6d690bSSong Liu */
r5c_handle_parity_cached(struct stripe_head * sh)4781e6d690bSSong Liu static void r5c_handle_parity_cached(struct stripe_head *sh)
4791e6d690bSSong Liu {
4801e6d690bSSong Liu int i;
4811e6d690bSSong Liu
4821e6d690bSSong Liu for (i = sh->disks; i--; )
4831e6d690bSSong Liu if (test_bit(R5_InJournal, &sh->dev[i].flags))
4841e6d690bSSong Liu set_bit(R5_Wantwrite, &sh->dev[i].flags);
4852ded3703SSong Liu }
4862ded3703SSong Liu
4872ded3703SSong Liu /*
4882ded3703SSong Liu * Setting proper flags after writing (or flushing) data and/or parity to the
4892ded3703SSong Liu * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
4902ded3703SSong Liu */
r5c_finish_cache_stripe(struct stripe_head * sh)4912ded3703SSong Liu static void r5c_finish_cache_stripe(struct stripe_head *sh)
4922ded3703SSong Liu {
493*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
4942ded3703SSong Liu
4952ded3703SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
4962ded3703SSong Liu BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
4972ded3703SSong Liu /*
4982ded3703SSong Liu * Set R5_InJournal for parity dev[pd_idx]. This means
4992ded3703SSong Liu * all data AND parity in the journal. For RAID 6, it is
5002ded3703SSong Liu * NOT necessary to set the flag for dev[qd_idx], as the
5012ded3703SSong Liu * two parities are written out together.
5022ded3703SSong Liu */
5032ded3703SSong Liu set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
5041e6d690bSSong Liu } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
5051e6d690bSSong Liu r5c_handle_data_cached(sh);
5061e6d690bSSong Liu } else {
5071e6d690bSSong Liu r5c_handle_parity_cached(sh);
5081e6d690bSSong Liu set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
5091e6d690bSSong Liu }
5102ded3703SSong Liu }
5112ded3703SSong Liu
r5l_io_run_stripes(struct r5l_io_unit * io)512d8858f43SChristoph Hellwig static void r5l_io_run_stripes(struct r5l_io_unit *io)
513d8858f43SChristoph Hellwig {
514d8858f43SChristoph Hellwig struct stripe_head *sh, *next;
515d8858f43SChristoph Hellwig
516d8858f43SChristoph Hellwig list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
517d8858f43SChristoph Hellwig list_del_init(&sh->log_list);
5182ded3703SSong Liu
5192ded3703SSong Liu r5c_finish_cache_stripe(sh);
5202ded3703SSong Liu
521d8858f43SChristoph Hellwig set_bit(STRIPE_HANDLE, &sh->state);
522d8858f43SChristoph Hellwig raid5_release_stripe(sh);
523d8858f43SChristoph Hellwig }
524d8858f43SChristoph Hellwig }
525d8858f43SChristoph Hellwig
r5l_log_run_stripes(struct r5l_log * log)52656fef7c6SChristoph Hellwig static void r5l_log_run_stripes(struct r5l_log *log)
52756fef7c6SChristoph Hellwig {
52856fef7c6SChristoph Hellwig struct r5l_io_unit *io, *next;
52956fef7c6SChristoph Hellwig
530efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock);
53156fef7c6SChristoph Hellwig
53256fef7c6SChristoph Hellwig list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
53356fef7c6SChristoph Hellwig /* don't change list order */
53456fef7c6SChristoph Hellwig if (io->state < IO_UNIT_IO_END)
53556fef7c6SChristoph Hellwig break;
53656fef7c6SChristoph Hellwig
53756fef7c6SChristoph Hellwig list_move_tail(&io->log_sibling, &log->finished_ios);
53856fef7c6SChristoph Hellwig r5l_io_run_stripes(io);
53956fef7c6SChristoph Hellwig }
54056fef7c6SChristoph Hellwig }
54156fef7c6SChristoph Hellwig
r5l_move_to_end_ios(struct r5l_log * log)5423848c0bcSChristoph Hellwig static void r5l_move_to_end_ios(struct r5l_log *log)
5433848c0bcSChristoph Hellwig {
5443848c0bcSChristoph Hellwig struct r5l_io_unit *io, *next;
5453848c0bcSChristoph Hellwig
546efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock);
5473848c0bcSChristoph Hellwig
5483848c0bcSChristoph Hellwig list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
5493848c0bcSChristoph Hellwig /* don't change list order */
5503848c0bcSChristoph Hellwig if (io->state < IO_UNIT_IO_END)
5513848c0bcSChristoph Hellwig break;
5523848c0bcSChristoph Hellwig list_move_tail(&io->log_sibling, &log->io_end_ios);
5533848c0bcSChristoph Hellwig }
5543848c0bcSChristoph Hellwig }
5553848c0bcSChristoph Hellwig
5563bddb7f8SSong Liu static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
r5l_log_endio(struct bio * bio)557f6bed0efSShaohua Li static void r5l_log_endio(struct bio *bio)
558f6bed0efSShaohua Li {
559f6bed0efSShaohua Li struct r5l_io_unit *io = bio->bi_private;
5603bddb7f8SSong Liu struct r5l_io_unit *io_deferred;
561f6bed0efSShaohua Li struct r5l_log *log = io->log;
562509ffec7SChristoph Hellwig unsigned long flags;
563a9501d74SSong Liu bool has_null_flush;
564a9501d74SSong Liu bool has_flush_payload;
565f6bed0efSShaohua Li
5664e4cbee9SChristoph Hellwig if (bio->bi_status)
5676e74a9cfSShaohua Li md_error(log->rdev->mddev, log->rdev);
5686e74a9cfSShaohua Li
569f6bed0efSShaohua Li bio_put(bio);
570afeee514SKent Overstreet mempool_free(io->meta_page, &log->meta_pool);
571f6bed0efSShaohua Li
572509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags);
573509ffec7SChristoph Hellwig __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
574a9501d74SSong Liu
575a9501d74SSong Liu /*
576a9501d74SSong Liu * if the io doesn't not have null_flush or flush payload,
577a9501d74SSong Liu * it is not safe to access it after releasing io_list_lock.
578a9501d74SSong Liu * Therefore, it is necessary to check the condition with
579a9501d74SSong Liu * the lock held.
580a9501d74SSong Liu */
581a9501d74SSong Liu has_null_flush = io->has_null_flush;
582a9501d74SSong Liu has_flush_payload = io->has_flush_payload;
583a9501d74SSong Liu
584ea17481fSSong Liu if (log->need_cache_flush && !list_empty(&io->stripe_list))
5853848c0bcSChristoph Hellwig r5l_move_to_end_ios(log);
58656fef7c6SChristoph Hellwig else
58756fef7c6SChristoph Hellwig r5l_log_run_stripes(log);
5883bddb7f8SSong Liu if (!list_empty(&log->running_ios)) {
5893bddb7f8SSong Liu /*
5903bddb7f8SSong Liu * FLUSH/FUA io_unit is deferred because of ordering, now we
5913bddb7f8SSong Liu * can dispatch it
5923bddb7f8SSong Liu */
5933bddb7f8SSong Liu io_deferred = list_first_entry(&log->running_ios,
5943bddb7f8SSong Liu struct r5l_io_unit, log_sibling);
5953bddb7f8SSong Liu if (io_deferred->io_deferred)
5963bddb7f8SSong Liu schedule_work(&log->deferred_io_work);
5973bddb7f8SSong Liu }
5983bddb7f8SSong Liu
599509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags);
600509ffec7SChristoph Hellwig
60156fef7c6SChristoph Hellwig if (log->need_cache_flush)
602f6bed0efSShaohua Li md_wakeup_thread(log->rdev->mddev->thread);
6033bddb7f8SSong Liu
604a9501d74SSong Liu /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
605a9501d74SSong Liu if (has_null_flush) {
6063bddb7f8SSong Liu struct bio *bi;
6073bddb7f8SSong Liu
6083bddb7f8SSong Liu WARN_ON(bio_list_empty(&io->flush_barriers));
6093bddb7f8SSong Liu while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
6103bddb7f8SSong Liu bio_endio(bi);
611a9501d74SSong Liu if (atomic_dec_and_test(&io->pending_stripe)) {
612a9501d74SSong Liu __r5l_stripe_write_finished(io);
613a9501d74SSong Liu return;
6143bddb7f8SSong Liu }
615ea17481fSSong Liu }
616a9501d74SSong Liu }
617a9501d74SSong Liu /* decrease pending_stripe for flush payload */
618a9501d74SSong Liu if (has_flush_payload)
619a9501d74SSong Liu if (atomic_dec_and_test(&io->pending_stripe))
6203bddb7f8SSong Liu __r5l_stripe_write_finished(io);
6213bddb7f8SSong Liu }
6223bddb7f8SSong Liu
r5l_do_submit_io(struct r5l_log * log,struct r5l_io_unit * io)6233bddb7f8SSong Liu static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
6243bddb7f8SSong Liu {
6253bddb7f8SSong Liu unsigned long flags;
6263bddb7f8SSong Liu
6273bddb7f8SSong Liu spin_lock_irqsave(&log->io_list_lock, flags);
6283bddb7f8SSong Liu __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
6293bddb7f8SSong Liu spin_unlock_irqrestore(&log->io_list_lock, flags);
6303bddb7f8SSong Liu
631bb3338d3SSong Liu /*
632bb3338d3SSong Liu * In case of journal device failures, submit_bio will get error
633bb3338d3SSong Liu * and calls endio, then active stripes will continue write
634bb3338d3SSong Liu * process. Therefore, it is not necessary to check Faulty bit
635bb3338d3SSong Liu * of journal device here.
636bb3338d3SSong Liu *
637bb3338d3SSong Liu * We can't check split_bio after current_bio is submitted. If
638bb3338d3SSong Liu * io->split_bio is null, after current_bio is submitted, current_bio
639bb3338d3SSong Liu * might already be completed and the io_unit is freed. We submit
640bb3338d3SSong Liu * split_bio first to avoid the issue.
641bb3338d3SSong Liu */
642bb3338d3SSong Liu if (io->split_bio) {
6433bddb7f8SSong Liu if (io->has_flush)
64420737738SShaohua Li io->split_bio->bi_opf |= REQ_PREFLUSH;
6453bddb7f8SSong Liu if (io->has_fua)
64620737738SShaohua Li io->split_bio->bi_opf |= REQ_FUA;
6473bddb7f8SSong Liu submit_bio(io->split_bio);
6483bddb7f8SSong Liu }
6493bddb7f8SSong Liu
650bb3338d3SSong Liu if (io->has_flush)
651bb3338d3SSong Liu io->current_bio->bi_opf |= REQ_PREFLUSH;
652bb3338d3SSong Liu if (io->has_fua)
653bb3338d3SSong Liu io->current_bio->bi_opf |= REQ_FUA;
654bb3338d3SSong Liu submit_bio(io->current_bio);
655bb3338d3SSong Liu }
656bb3338d3SSong Liu
6573bddb7f8SSong Liu /* deferred io_unit will be dispatched here */
r5l_submit_io_async(struct work_struct * work)6583bddb7f8SSong Liu static void r5l_submit_io_async(struct work_struct *work)
6593bddb7f8SSong Liu {
6603bddb7f8SSong Liu struct r5l_log *log = container_of(work, struct r5l_log,
6613bddb7f8SSong Liu deferred_io_work);
6623bddb7f8SSong Liu struct r5l_io_unit *io = NULL;
6633bddb7f8SSong Liu unsigned long flags;
6643bddb7f8SSong Liu
6653bddb7f8SSong Liu spin_lock_irqsave(&log->io_list_lock, flags);
6663bddb7f8SSong Liu if (!list_empty(&log->running_ios)) {
6673bddb7f8SSong Liu io = list_first_entry(&log->running_ios, struct r5l_io_unit,
6683bddb7f8SSong Liu log_sibling);
6693bddb7f8SSong Liu if (!io->io_deferred)
6703bddb7f8SSong Liu io = NULL;
6713bddb7f8SSong Liu else
6723bddb7f8SSong Liu io->io_deferred = 0;
6733bddb7f8SSong Liu }
6743bddb7f8SSong Liu spin_unlock_irqrestore(&log->io_list_lock, flags);
6753bddb7f8SSong Liu if (io)
6763bddb7f8SSong Liu r5l_do_submit_io(log, io);
677f6bed0efSShaohua Li }
678f6bed0efSShaohua Li
r5c_disable_writeback_async(struct work_struct * work)6792e38a37fSSong Liu static void r5c_disable_writeback_async(struct work_struct *work)
6802e38a37fSSong Liu {
6812e38a37fSSong Liu struct r5l_log *log = container_of(work, struct r5l_log,
6822e38a37fSSong Liu disable_writeback_work);
6832e38a37fSSong Liu struct mddev *mddev = log->rdev->mddev;
6844d5324f7SNeilBrown struct r5conf *conf = mddev->private;
6854d5324f7SNeilBrown int locked = 0;
6862e38a37fSSong Liu
6872e38a37fSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
6882e38a37fSSong Liu return;
6892e38a37fSSong Liu pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
6902e38a37fSSong Liu mdname(mddev));
69170d466f7SSong Liu
69270d466f7SSong Liu /* wait superblock change before suspend */
69370d466f7SSong Liu wait_event(mddev->sb_wait,
694*ebf6f517SYu Kuai !READ_ONCE(conf->log) ||
6954d5324f7SNeilBrown (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
6964d5324f7SNeilBrown (locked = mddev_trylock(mddev))));
6974d5324f7SNeilBrown if (locked) {
6982e38a37fSSong Liu mddev_suspend(mddev);
6992e38a37fSSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
7002e38a37fSSong Liu mddev_resume(mddev);
7014d5324f7SNeilBrown mddev_unlock(mddev);
7024d5324f7SNeilBrown }
7032e38a37fSSong Liu }
7042e38a37fSSong Liu
r5l_submit_current_io(struct r5l_log * log)705f6bed0efSShaohua Li static void r5l_submit_current_io(struct r5l_log *log)
706f6bed0efSShaohua Li {
707f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io;
708f6bed0efSShaohua Li struct r5l_meta_block *block;
709509ffec7SChristoph Hellwig unsigned long flags;
710f6bed0efSShaohua Li u32 crc;
7113bddb7f8SSong Liu bool do_submit = true;
712f6bed0efSShaohua Li
713f6bed0efSShaohua Li if (!io)
714f6bed0efSShaohua Li return;
715f6bed0efSShaohua Li
716f6bed0efSShaohua Li block = page_address(io->meta_page);
717f6bed0efSShaohua Li block->meta_size = cpu_to_le32(io->meta_offset);
7185cb2fbd6SShaohua Li crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
719f6bed0efSShaohua Li block->checksum = cpu_to_le32(crc);
720f6bed0efSShaohua Li
721f6bed0efSShaohua Li log->current_io = NULL;
722509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags);
7233bddb7f8SSong Liu if (io->has_flush || io->has_fua) {
7243bddb7f8SSong Liu if (io != list_first_entry(&log->running_ios,
7253bddb7f8SSong Liu struct r5l_io_unit, log_sibling)) {
7263bddb7f8SSong Liu io->io_deferred = 1;
7273bddb7f8SSong Liu do_submit = false;
7283bddb7f8SSong Liu }
7293bddb7f8SSong Liu }
730509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags);
7313bddb7f8SSong Liu if (do_submit)
7323bddb7f8SSong Liu r5l_do_submit_io(log, io);
733f6bed0efSShaohua Li }
734f6bed0efSShaohua Li
r5l_bio_alloc(struct r5l_log * log)7356143e2ceSChristoph Hellwig static struct bio *r5l_bio_alloc(struct r5l_log *log)
736b349feb3SChristoph Hellwig {
737609be106SChristoph Hellwig struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
738609be106SChristoph Hellwig REQ_OP_WRITE, GFP_NOIO, &log->bs);
739b349feb3SChristoph Hellwig
7401e932a37SChristoph Hellwig bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
741b349feb3SChristoph Hellwig
742b349feb3SChristoph Hellwig return bio;
743b349feb3SChristoph Hellwig }
744b349feb3SChristoph Hellwig
r5_reserve_log_entry(struct r5l_log * log,struct r5l_io_unit * io)745c1b99198SChristoph Hellwig static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
746c1b99198SChristoph Hellwig {
747c1b99198SChristoph Hellwig log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
748c1b99198SChristoph Hellwig
749a39f7afdSSong Liu r5c_update_log_state(log);
750c1b99198SChristoph Hellwig /*
751c1b99198SChristoph Hellwig * If we filled up the log device start from the beginning again,
752c1b99198SChristoph Hellwig * which will require a new bio.
753c1b99198SChristoph Hellwig *
754c1b99198SChristoph Hellwig * Note: for this to work properly the log size needs to me a multiple
755c1b99198SChristoph Hellwig * of BLOCK_SECTORS.
756c1b99198SChristoph Hellwig */
757c1b99198SChristoph Hellwig if (log->log_start == 0)
7586143e2ceSChristoph Hellwig io->need_split_bio = true;
759c1b99198SChristoph Hellwig
760c1b99198SChristoph Hellwig io->log_end = log->log_start;
761c1b99198SChristoph Hellwig }
762c1b99198SChristoph Hellwig
r5l_new_meta(struct r5l_log * log)763f6bed0efSShaohua Li static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
764f6bed0efSShaohua Li {
765f6bed0efSShaohua Li struct r5l_io_unit *io;
766f6bed0efSShaohua Li struct r5l_meta_block *block;
767f6bed0efSShaohua Li
768afeee514SKent Overstreet io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
7695036c390SChristoph Hellwig if (!io)
7705036c390SChristoph Hellwig return NULL;
7715036c390SChristoph Hellwig memset(io, 0, sizeof(*io));
7725036c390SChristoph Hellwig
77351039cd0SChristoph Hellwig io->log = log;
77451039cd0SChristoph Hellwig INIT_LIST_HEAD(&io->log_sibling);
77551039cd0SChristoph Hellwig INIT_LIST_HEAD(&io->stripe_list);
7763bddb7f8SSong Liu bio_list_init(&io->flush_barriers);
77751039cd0SChristoph Hellwig io->state = IO_UNIT_RUNNING;
778f6bed0efSShaohua Li
779afeee514SKent Overstreet io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
780f6bed0efSShaohua Li block = page_address(io->meta_page);
781e8deb638SChristoph Hellwig clear_page(block);
782f6bed0efSShaohua Li block->magic = cpu_to_le32(R5LOG_MAGIC);
783f6bed0efSShaohua Li block->version = R5LOG_VERSION;
784f6bed0efSShaohua Li block->seq = cpu_to_le64(log->seq);
785f6bed0efSShaohua Li block->position = cpu_to_le64(log->log_start);
786f6bed0efSShaohua Li
787f6bed0efSShaohua Li io->log_start = log->log_start;
788f6bed0efSShaohua Li io->meta_offset = sizeof(struct r5l_meta_block);
7892b8ef16eSChristoph Hellwig io->seq = log->seq++;
790f6bed0efSShaohua Li
7916143e2ceSChristoph Hellwig io->current_bio = r5l_bio_alloc(log);
7926143e2ceSChristoph Hellwig io->current_bio->bi_end_io = r5l_log_endio;
7936143e2ceSChristoph Hellwig io->current_bio->bi_private = io;
794b0a2f17cSJohannes Thumshirn __bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
795f6bed0efSShaohua Li
796c1b99198SChristoph Hellwig r5_reserve_log_entry(log, io);
797f6bed0efSShaohua Li
798f6bed0efSShaohua Li spin_lock_irq(&log->io_list_lock);
799f6bed0efSShaohua Li list_add_tail(&io->log_sibling, &log->running_ios);
800f6bed0efSShaohua Li spin_unlock_irq(&log->io_list_lock);
801f6bed0efSShaohua Li
802f6bed0efSShaohua Li return io;
803f6bed0efSShaohua Li }
804f6bed0efSShaohua Li
r5l_get_meta(struct r5l_log * log,unsigned int payload_size)805f6bed0efSShaohua Li static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
806f6bed0efSShaohua Li {
80722581f58SChristoph Hellwig if (log->current_io &&
80822581f58SChristoph Hellwig log->current_io->meta_offset + payload_size > PAGE_SIZE)
809f6bed0efSShaohua Li r5l_submit_current_io(log);
810f6bed0efSShaohua Li
8115036c390SChristoph Hellwig if (!log->current_io) {
812f6bed0efSShaohua Li log->current_io = r5l_new_meta(log);
8135036c390SChristoph Hellwig if (!log->current_io)
8145036c390SChristoph Hellwig return -ENOMEM;
8155036c390SChristoph Hellwig }
8165036c390SChristoph Hellwig
817f6bed0efSShaohua Li return 0;
818f6bed0efSShaohua Li }
819f6bed0efSShaohua Li
r5l_append_payload_meta(struct r5l_log * log,u16 type,sector_t location,u32 checksum1,u32 checksum2,bool checksum2_valid)820f6bed0efSShaohua Li static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
821f6bed0efSShaohua Li sector_t location,
822f6bed0efSShaohua Li u32 checksum1, u32 checksum2,
823f6bed0efSShaohua Li bool checksum2_valid)
824f6bed0efSShaohua Li {
825f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io;
826f6bed0efSShaohua Li struct r5l_payload_data_parity *payload;
827f6bed0efSShaohua Li
828f6bed0efSShaohua Li payload = page_address(io->meta_page) + io->meta_offset;
829f6bed0efSShaohua Li payload->header.type = cpu_to_le16(type);
830f6bed0efSShaohua Li payload->header.flags = cpu_to_le16(0);
831f6bed0efSShaohua Li payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
832f6bed0efSShaohua Li (PAGE_SHIFT - 9));
833f6bed0efSShaohua Li payload->location = cpu_to_le64(location);
834f6bed0efSShaohua Li payload->checksum[0] = cpu_to_le32(checksum1);
835f6bed0efSShaohua Li if (checksum2_valid)
836f6bed0efSShaohua Li payload->checksum[1] = cpu_to_le32(checksum2);
837f6bed0efSShaohua Li
838f6bed0efSShaohua Li io->meta_offset += sizeof(struct r5l_payload_data_parity) +
839f6bed0efSShaohua Li sizeof(__le32) * (1 + !!checksum2_valid);
840f6bed0efSShaohua Li }
841f6bed0efSShaohua Li
r5l_append_payload_page(struct r5l_log * log,struct page * page)842f6bed0efSShaohua Li static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
843f6bed0efSShaohua Li {
844f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io;
845f6bed0efSShaohua Li
8466143e2ceSChristoph Hellwig if (io->need_split_bio) {
8473bddb7f8SSong Liu BUG_ON(io->split_bio);
8483bddb7f8SSong Liu io->split_bio = io->current_bio;
8496143e2ceSChristoph Hellwig io->current_bio = r5l_bio_alloc(log);
8503bddb7f8SSong Liu bio_chain(io->current_bio, io->split_bio);
8513bddb7f8SSong Liu io->need_split_bio = false;
852f6bed0efSShaohua Li }
853f6bed0efSShaohua Li
8546143e2ceSChristoph Hellwig if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
8556143e2ceSChristoph Hellwig BUG();
8566143e2ceSChristoph Hellwig
857c1b99198SChristoph Hellwig r5_reserve_log_entry(log, io);
858f6bed0efSShaohua Li }
859f6bed0efSShaohua Li
r5l_append_flush_payload(struct r5l_log * log,sector_t sect)860ea17481fSSong Liu static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
861ea17481fSSong Liu {
862ea17481fSSong Liu struct mddev *mddev = log->rdev->mddev;
863ea17481fSSong Liu struct r5conf *conf = mddev->private;
864ea17481fSSong Liu struct r5l_io_unit *io;
865ea17481fSSong Liu struct r5l_payload_flush *payload;
866ea17481fSSong Liu int meta_size;
867ea17481fSSong Liu
868ea17481fSSong Liu /*
869ea17481fSSong Liu * payload_flush requires extra writes to the journal.
870ea17481fSSong Liu * To avoid handling the extra IO in quiesce, just skip
871ea17481fSSong Liu * flush_payload
872ea17481fSSong Liu */
873ea17481fSSong Liu if (conf->quiesce)
874ea17481fSSong Liu return;
875ea17481fSSong Liu
876ea17481fSSong Liu mutex_lock(&log->io_mutex);
877ea17481fSSong Liu meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
878ea17481fSSong Liu
879ea17481fSSong Liu if (r5l_get_meta(log, meta_size)) {
880ea17481fSSong Liu mutex_unlock(&log->io_mutex);
881ea17481fSSong Liu return;
882ea17481fSSong Liu }
883ea17481fSSong Liu
884ea17481fSSong Liu /* current implementation is one stripe per flush payload */
885ea17481fSSong Liu io = log->current_io;
886ea17481fSSong Liu payload = page_address(io->meta_page) + io->meta_offset;
887ea17481fSSong Liu payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
888ea17481fSSong Liu payload->header.flags = cpu_to_le16(0);
889ea17481fSSong Liu payload->size = cpu_to_le32(sizeof(__le64));
890ea17481fSSong Liu payload->flush_stripes[0] = cpu_to_le64(sect);
891ea17481fSSong Liu io->meta_offset += meta_size;
892a9501d74SSong Liu /* multiple flush payloads count as one pending_stripe */
893a9501d74SSong Liu if (!io->has_flush_payload) {
894a9501d74SSong Liu io->has_flush_payload = 1;
895a9501d74SSong Liu atomic_inc(&io->pending_stripe);
896a9501d74SSong Liu }
897ea17481fSSong Liu mutex_unlock(&log->io_mutex);
898ea17481fSSong Liu }
899ea17481fSSong Liu
r5l_log_stripe(struct r5l_log * log,struct stripe_head * sh,int data_pages,int parity_pages)9005036c390SChristoph Hellwig static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
901f6bed0efSShaohua Li int data_pages, int parity_pages)
902f6bed0efSShaohua Li {
903f6bed0efSShaohua Li int i;
904f6bed0efSShaohua Li int meta_size;
9055036c390SChristoph Hellwig int ret;
906f6bed0efSShaohua Li struct r5l_io_unit *io;
907f6bed0efSShaohua Li
908f6bed0efSShaohua Li meta_size =
909f6bed0efSShaohua Li ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
910f6bed0efSShaohua Li * data_pages) +
911f6bed0efSShaohua Li sizeof(struct r5l_payload_data_parity) +
912f6bed0efSShaohua Li sizeof(__le32) * parity_pages;
913f6bed0efSShaohua Li
9145036c390SChristoph Hellwig ret = r5l_get_meta(log, meta_size);
9155036c390SChristoph Hellwig if (ret)
9165036c390SChristoph Hellwig return ret;
9175036c390SChristoph Hellwig
918f6bed0efSShaohua Li io = log->current_io;
919f6bed0efSShaohua Li
9203bddb7f8SSong Liu if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
9213bddb7f8SSong Liu io->has_flush = 1;
9223bddb7f8SSong Liu
923f6bed0efSShaohua Li for (i = 0; i < sh->disks; i++) {
9241e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
9251e6d690bSSong Liu test_bit(R5_InJournal, &sh->dev[i].flags))
926f6bed0efSShaohua Li continue;
927f6bed0efSShaohua Li if (i == sh->pd_idx || i == sh->qd_idx)
928f6bed0efSShaohua Li continue;
9293bddb7f8SSong Liu if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
9303bddb7f8SSong Liu log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
9313bddb7f8SSong Liu io->has_fua = 1;
9323bddb7f8SSong Liu /*
9333bddb7f8SSong Liu * we need to flush journal to make sure recovery can
9343bddb7f8SSong Liu * reach the data with fua flag
9353bddb7f8SSong Liu */
9363bddb7f8SSong Liu io->has_flush = 1;
9373bddb7f8SSong Liu }
938f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
939f6bed0efSShaohua Li raid5_compute_blocknr(sh, i, 0),
940f6bed0efSShaohua Li sh->dev[i].log_checksum, 0, false);
941f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[i].page);
942f6bed0efSShaohua Li }
943f6bed0efSShaohua Li
9442ded3703SSong Liu if (parity_pages == 2) {
945f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
946f6bed0efSShaohua Li sh->sector, sh->dev[sh->pd_idx].log_checksum,
947f6bed0efSShaohua Li sh->dev[sh->qd_idx].log_checksum, true);
948f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
949f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
9502ded3703SSong Liu } else if (parity_pages == 1) {
951f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
952f6bed0efSShaohua Li sh->sector, sh->dev[sh->pd_idx].log_checksum,
953f6bed0efSShaohua Li 0, false);
954f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
9552ded3703SSong Liu } else /* Just writing data, not parity, in caching phase */
9562ded3703SSong Liu BUG_ON(parity_pages != 0);
957f6bed0efSShaohua Li
958f6bed0efSShaohua Li list_add_tail(&sh->log_list, &io->stripe_list);
959f6bed0efSShaohua Li atomic_inc(&io->pending_stripe);
960f6bed0efSShaohua Li sh->log_io = io;
9615036c390SChristoph Hellwig
962a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
963a39f7afdSSong Liu return 0;
964a39f7afdSSong Liu
965a39f7afdSSong Liu if (sh->log_start == MaxSector) {
966a39f7afdSSong Liu BUG_ON(!list_empty(&sh->r5c));
967a39f7afdSSong Liu sh->log_start = io->log_start;
968a39f7afdSSong Liu spin_lock_irq(&log->stripe_in_journal_lock);
969a39f7afdSSong Liu list_add_tail(&sh->r5c,
970a39f7afdSSong Liu &log->stripe_in_journal_list);
971a39f7afdSSong Liu spin_unlock_irq(&log->stripe_in_journal_lock);
972a39f7afdSSong Liu atomic_inc(&log->stripe_in_journal_count);
973a39f7afdSSong Liu }
9745036c390SChristoph Hellwig return 0;
975f6bed0efSShaohua Li }
976f6bed0efSShaohua Li
977a39f7afdSSong Liu /* add stripe to no_space_stripes, and then wake up reclaim */
r5l_add_no_space_stripe(struct r5l_log * log,struct stripe_head * sh)978a39f7afdSSong Liu static inline void r5l_add_no_space_stripe(struct r5l_log *log,
979a39f7afdSSong Liu struct stripe_head *sh)
980a39f7afdSSong Liu {
981a39f7afdSSong Liu spin_lock(&log->no_space_stripes_lock);
982a39f7afdSSong Liu list_add_tail(&sh->log_list, &log->no_space_stripes);
983a39f7afdSSong Liu spin_unlock(&log->no_space_stripes_lock);
984a39f7afdSSong Liu }
985a39f7afdSSong Liu
986f6bed0efSShaohua Li /*
987f6bed0efSShaohua Li * running in raid5d, where reclaim could wait for raid5d too (when it flushes
988f6bed0efSShaohua Li * data from log to raid disks), so we shouldn't wait for reclaim here
989f6bed0efSShaohua Li */
r5l_write_stripe(struct r5l_log * log,struct stripe_head * sh)990f6bed0efSShaohua Li int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
991f6bed0efSShaohua Li {
992a39f7afdSSong Liu struct r5conf *conf = sh->raid_conf;
993f6bed0efSShaohua Li int write_disks = 0;
994f6bed0efSShaohua Li int data_pages, parity_pages;
995f6bed0efSShaohua Li int reserve;
996f6bed0efSShaohua Li int i;
9975036c390SChristoph Hellwig int ret = 0;
998a39f7afdSSong Liu bool wake_reclaim = false;
999f6bed0efSShaohua Li
1000f6bed0efSShaohua Li if (!log)
1001f6bed0efSShaohua Li return -EAGAIN;
1002f6bed0efSShaohua Li /* Don't support stripe batch */
1003f6bed0efSShaohua Li if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1004f6bed0efSShaohua Li test_bit(STRIPE_SYNCING, &sh->state)) {
1005f6bed0efSShaohua Li /* the stripe is written to log, we start writing it to raid */
1006f6bed0efSShaohua Li clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
1007f6bed0efSShaohua Li return -EAGAIN;
1008f6bed0efSShaohua Li }
1009f6bed0efSShaohua Li
10102ded3703SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
10112ded3703SSong Liu
1012f6bed0efSShaohua Li for (i = 0; i < sh->disks; i++) {
1013f6bed0efSShaohua Li void *addr;
1014f6bed0efSShaohua Li
10151e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
10161e6d690bSSong Liu test_bit(R5_InJournal, &sh->dev[i].flags))
1017f6bed0efSShaohua Li continue;
10181e6d690bSSong Liu
1019f6bed0efSShaohua Li write_disks++;
1020f6bed0efSShaohua Li /* checksum is already calculated in last run */
1021f6bed0efSShaohua Li if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1022f6bed0efSShaohua Li continue;
1023f6bed0efSShaohua Li addr = kmap_atomic(sh->dev[i].page);
10245cb2fbd6SShaohua Li sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1025f6bed0efSShaohua Li addr, PAGE_SIZE);
1026f6bed0efSShaohua Li kunmap_atomic(addr);
1027f6bed0efSShaohua Li }
1028f6bed0efSShaohua Li parity_pages = 1 + !!(sh->qd_idx >= 0);
1029f6bed0efSShaohua Li data_pages = write_disks - parity_pages;
1030f6bed0efSShaohua Li
1031f6bed0efSShaohua Li set_bit(STRIPE_LOG_TRAPPED, &sh->state);
1032253f9fd4SShaohua Li /*
1033253f9fd4SShaohua Li * The stripe must enter state machine again to finish the write, so
1034253f9fd4SShaohua Li * don't delay.
1035253f9fd4SShaohua Li */
1036253f9fd4SShaohua Li clear_bit(STRIPE_DELAYED, &sh->state);
1037f6bed0efSShaohua Li atomic_inc(&sh->count);
1038f6bed0efSShaohua Li
1039f6bed0efSShaohua Li mutex_lock(&log->io_mutex);
1040f6bed0efSShaohua Li /* meta + data */
1041f6bed0efSShaohua Li reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
1042f6bed0efSShaohua Li
1043a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1044a39f7afdSSong Liu if (!r5l_has_free_space(log, reserve)) {
1045a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
1046a39f7afdSSong Liu wake_reclaim = true;
10475036c390SChristoph Hellwig } else {
10485036c390SChristoph Hellwig ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
10495036c390SChristoph Hellwig if (ret) {
10505036c390SChristoph Hellwig spin_lock_irq(&log->io_list_lock);
1051a39f7afdSSong Liu list_add_tail(&sh->log_list,
1052a39f7afdSSong Liu &log->no_mem_stripes);
10535036c390SChristoph Hellwig spin_unlock_irq(&log->io_list_lock);
1054f6bed0efSShaohua Li }
10555036c390SChristoph Hellwig }
1056a39f7afdSSong Liu } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1057a39f7afdSSong Liu /*
1058a39f7afdSSong Liu * log space critical, do not process stripes that are
1059a39f7afdSSong Liu * not in cache yet (sh->log_start == MaxSector).
1060a39f7afdSSong Liu */
1061a39f7afdSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1062a39f7afdSSong Liu sh->log_start == MaxSector) {
1063a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
1064a39f7afdSSong Liu wake_reclaim = true;
1065a39f7afdSSong Liu reserve = 0;
1066a39f7afdSSong Liu } else if (!r5l_has_free_space(log, reserve)) {
1067a39f7afdSSong Liu if (sh->log_start == log->last_checkpoint)
1068a39f7afdSSong Liu BUG();
1069a39f7afdSSong Liu else
1070a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
1071a39f7afdSSong Liu } else {
1072a39f7afdSSong Liu ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1073a39f7afdSSong Liu if (ret) {
1074a39f7afdSSong Liu spin_lock_irq(&log->io_list_lock);
1075a39f7afdSSong Liu list_add_tail(&sh->log_list,
1076a39f7afdSSong Liu &log->no_mem_stripes);
1077a39f7afdSSong Liu spin_unlock_irq(&log->io_list_lock);
1078a39f7afdSSong Liu }
1079a39f7afdSSong Liu }
1080a39f7afdSSong Liu }
1081f6bed0efSShaohua Li
10825036c390SChristoph Hellwig mutex_unlock(&log->io_mutex);
1083a39f7afdSSong Liu if (wake_reclaim)
1084a39f7afdSSong Liu r5l_wake_reclaim(log, reserve);
1085f6bed0efSShaohua Li return 0;
1086f6bed0efSShaohua Li }
1087f6bed0efSShaohua Li
r5l_write_stripe_run(struct r5l_log * log)1088f6bed0efSShaohua Li void r5l_write_stripe_run(struct r5l_log *log)
1089f6bed0efSShaohua Li {
1090f6bed0efSShaohua Li if (!log)
1091f6bed0efSShaohua Li return;
1092f6bed0efSShaohua Li mutex_lock(&log->io_mutex);
1093f6bed0efSShaohua Li r5l_submit_current_io(log);
1094f6bed0efSShaohua Li mutex_unlock(&log->io_mutex);
1095f6bed0efSShaohua Li }
1096f6bed0efSShaohua Li
r5l_handle_flush_request(struct r5l_log * log,struct bio * bio)1097828cbe98SShaohua Li int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1098828cbe98SShaohua Li {
10993bddb7f8SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1100828cbe98SShaohua Li /*
11013bddb7f8SSong Liu * in write through (journal only)
11023bddb7f8SSong Liu * we flush log disk cache first, then write stripe data to
11033bddb7f8SSong Liu * raid disks. So if bio is finished, the log disk cache is
11043bddb7f8SSong Liu * flushed already. The recovery guarantees we can recovery
11053bddb7f8SSong Liu * the bio from log disk, so we don't need to flush again
1106828cbe98SShaohua Li */
1107828cbe98SShaohua Li if (bio->bi_iter.bi_size == 0) {
1108828cbe98SShaohua Li bio_endio(bio);
1109828cbe98SShaohua Li return 0;
1110828cbe98SShaohua Li }
11111eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH;
11123bddb7f8SSong Liu } else {
11133bddb7f8SSong Liu /* write back (with cache) */
11143bddb7f8SSong Liu if (bio->bi_iter.bi_size == 0) {
11153bddb7f8SSong Liu mutex_lock(&log->io_mutex);
11163bddb7f8SSong Liu r5l_get_meta(log, 0);
11173bddb7f8SSong Liu bio_list_add(&log->current_io->flush_barriers, bio);
11183bddb7f8SSong Liu log->current_io->has_flush = 1;
11193bddb7f8SSong Liu log->current_io->has_null_flush = 1;
11203bddb7f8SSong Liu atomic_inc(&log->current_io->pending_stripe);
11213bddb7f8SSong Liu r5l_submit_current_io(log);
11223bddb7f8SSong Liu mutex_unlock(&log->io_mutex);
11233bddb7f8SSong Liu return 0;
11243bddb7f8SSong Liu }
11253bddb7f8SSong Liu }
1126828cbe98SShaohua Li return -EAGAIN;
1127828cbe98SShaohua Li }
1128828cbe98SShaohua Li
1129f6bed0efSShaohua Li /* This will run after log space is reclaimed */
r5l_run_no_space_stripes(struct r5l_log * log)1130f6bed0efSShaohua Li static void r5l_run_no_space_stripes(struct r5l_log *log)
1131f6bed0efSShaohua Li {
1132f6bed0efSShaohua Li struct stripe_head *sh;
1133f6bed0efSShaohua Li
1134f6bed0efSShaohua Li spin_lock(&log->no_space_stripes_lock);
1135f6bed0efSShaohua Li while (!list_empty(&log->no_space_stripes)) {
1136f6bed0efSShaohua Li sh = list_first_entry(&log->no_space_stripes,
1137f6bed0efSShaohua Li struct stripe_head, log_list);
1138f6bed0efSShaohua Li list_del_init(&sh->log_list);
1139f6bed0efSShaohua Li set_bit(STRIPE_HANDLE, &sh->state);
1140f6bed0efSShaohua Li raid5_release_stripe(sh);
1141f6bed0efSShaohua Li }
1142f6bed0efSShaohua Li spin_unlock(&log->no_space_stripes_lock);
1143f6bed0efSShaohua Li }
1144f6bed0efSShaohua Li
1145a39f7afdSSong Liu /*
1146a39f7afdSSong Liu * calculate new last_checkpoint
1147a39f7afdSSong Liu * for write through mode, returns log->next_checkpoint
1148a39f7afdSSong Liu * for write back, returns log_start of first sh in stripe_in_journal_list
1149a39f7afdSSong Liu */
r5c_calculate_new_cp(struct r5conf * conf)1150a39f7afdSSong Liu static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1151a39f7afdSSong Liu {
1152a39f7afdSSong Liu struct stripe_head *sh;
1153*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
1154a39f7afdSSong Liu sector_t new_cp;
1155a39f7afdSSong Liu unsigned long flags;
1156a39f7afdSSong Liu
1157a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1158a39f7afdSSong Liu return log->next_checkpoint;
1159a39f7afdSSong Liu
1160a39f7afdSSong Liu spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1161*ebf6f517SYu Kuai if (list_empty(&log->stripe_in_journal_list)) {
1162a39f7afdSSong Liu /* all stripes flushed */
1163d3014e21SDan Carpenter spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1164a39f7afdSSong Liu return log->next_checkpoint;
1165a39f7afdSSong Liu }
1166*ebf6f517SYu Kuai sh = list_first_entry(&log->stripe_in_journal_list,
1167a39f7afdSSong Liu struct stripe_head, r5c);
1168a39f7afdSSong Liu new_cp = sh->log_start;
1169a39f7afdSSong Liu spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1170a39f7afdSSong Liu return new_cp;
1171a39f7afdSSong Liu }
1172a39f7afdSSong Liu
r5l_reclaimable_space(struct r5l_log * log)117317036461SChristoph Hellwig static sector_t r5l_reclaimable_space(struct r5l_log *log)
117417036461SChristoph Hellwig {
1175a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private;
1176a39f7afdSSong Liu
117717036461SChristoph Hellwig return r5l_ring_distance(log, log->last_checkpoint,
1178a39f7afdSSong Liu r5c_calculate_new_cp(conf));
117917036461SChristoph Hellwig }
118017036461SChristoph Hellwig
r5l_run_no_mem_stripe(struct r5l_log * log)11815036c390SChristoph Hellwig static void r5l_run_no_mem_stripe(struct r5l_log *log)
11825036c390SChristoph Hellwig {
11835036c390SChristoph Hellwig struct stripe_head *sh;
11845036c390SChristoph Hellwig
1185efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock);
11865036c390SChristoph Hellwig
11875036c390SChristoph Hellwig if (!list_empty(&log->no_mem_stripes)) {
11885036c390SChristoph Hellwig sh = list_first_entry(&log->no_mem_stripes,
11895036c390SChristoph Hellwig struct stripe_head, log_list);
11905036c390SChristoph Hellwig list_del_init(&sh->log_list);
11915036c390SChristoph Hellwig set_bit(STRIPE_HANDLE, &sh->state);
11925036c390SChristoph Hellwig raid5_release_stripe(sh);
11935036c390SChristoph Hellwig }
11945036c390SChristoph Hellwig }
11955036c390SChristoph Hellwig
r5l_complete_finished_ios(struct r5l_log * log)119604732f74SChristoph Hellwig static bool r5l_complete_finished_ios(struct r5l_log *log)
119717036461SChristoph Hellwig {
119817036461SChristoph Hellwig struct r5l_io_unit *io, *next;
119917036461SChristoph Hellwig bool found = false;
120017036461SChristoph Hellwig
1201efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock);
120217036461SChristoph Hellwig
120304732f74SChristoph Hellwig list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
120417036461SChristoph Hellwig /* don't change list order */
120517036461SChristoph Hellwig if (io->state < IO_UNIT_STRIPE_END)
120617036461SChristoph Hellwig break;
120717036461SChristoph Hellwig
120817036461SChristoph Hellwig log->next_checkpoint = io->log_start;
120917036461SChristoph Hellwig
121017036461SChristoph Hellwig list_del(&io->log_sibling);
1211afeee514SKent Overstreet mempool_free(io, &log->io_pool);
12125036c390SChristoph Hellwig r5l_run_no_mem_stripe(log);
121317036461SChristoph Hellwig
121417036461SChristoph Hellwig found = true;
121517036461SChristoph Hellwig }
121617036461SChristoph Hellwig
121717036461SChristoph Hellwig return found;
121817036461SChristoph Hellwig }
121917036461SChristoph Hellwig
__r5l_stripe_write_finished(struct r5l_io_unit * io)1220509ffec7SChristoph Hellwig static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1221509ffec7SChristoph Hellwig {
1222509ffec7SChristoph Hellwig struct r5l_log *log = io->log;
1223a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private;
1224509ffec7SChristoph Hellwig unsigned long flags;
1225509ffec7SChristoph Hellwig
1226509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags);
1227509ffec7SChristoph Hellwig __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
122817036461SChristoph Hellwig
122904732f74SChristoph Hellwig if (!r5l_complete_finished_ios(log)) {
123085f2f9a4SShaohua Li spin_unlock_irqrestore(&log->io_list_lock, flags);
123185f2f9a4SShaohua Li return;
123285f2f9a4SShaohua Li }
1233509ffec7SChristoph Hellwig
1234a39f7afdSSong Liu if (r5l_reclaimable_space(log) > log->max_free_space ||
1235a39f7afdSSong Liu test_bit(R5C_LOG_TIGHT, &conf->cache_state))
1236509ffec7SChristoph Hellwig r5l_wake_reclaim(log, 0);
1237509ffec7SChristoph Hellwig
1238509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags);
1239509ffec7SChristoph Hellwig wake_up(&log->iounit_wait);
1240509ffec7SChristoph Hellwig }
1241509ffec7SChristoph Hellwig
r5l_stripe_write_finished(struct stripe_head * sh)12420576b1c6SShaohua Li void r5l_stripe_write_finished(struct stripe_head *sh)
12430576b1c6SShaohua Li {
12440576b1c6SShaohua Li struct r5l_io_unit *io;
12450576b1c6SShaohua Li
12460576b1c6SShaohua Li io = sh->log_io;
12470576b1c6SShaohua Li sh->log_io = NULL;
12480576b1c6SShaohua Li
1249509ffec7SChristoph Hellwig if (io && atomic_dec_and_test(&io->pending_stripe))
1250509ffec7SChristoph Hellwig __r5l_stripe_write_finished(io);
12510576b1c6SShaohua Li }
12520576b1c6SShaohua Li
r5l_log_flush_endio(struct bio * bio)1253a8c34f91SShaohua Li static void r5l_log_flush_endio(struct bio *bio)
1254a8c34f91SShaohua Li {
1255a8c34f91SShaohua Li struct r5l_log *log = container_of(bio, struct r5l_log,
1256a8c34f91SShaohua Li flush_bio);
1257a8c34f91SShaohua Li unsigned long flags;
1258a8c34f91SShaohua Li struct r5l_io_unit *io;
1259a8c34f91SShaohua Li
12604e4cbee9SChristoph Hellwig if (bio->bi_status)
12616e74a9cfSShaohua Li md_error(log->rdev->mddev, log->rdev);
12620d0bd28cSYu Kuai bio_uninit(bio);
12636e74a9cfSShaohua Li
1264a8c34f91SShaohua Li spin_lock_irqsave(&log->io_list_lock, flags);
1265d8858f43SChristoph Hellwig list_for_each_entry(io, &log->flushing_ios, log_sibling)
1266d8858f43SChristoph Hellwig r5l_io_run_stripes(io);
126704732f74SChristoph Hellwig list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1268a8c34f91SShaohua Li spin_unlock_irqrestore(&log->io_list_lock, flags);
1269a8c34f91SShaohua Li }
1270a8c34f91SShaohua Li
12710576b1c6SShaohua Li /*
12720576b1c6SShaohua Li * Starting dispatch IO to raid.
12730576b1c6SShaohua Li * io_unit(meta) consists of a log. There is one situation we want to avoid. A
12740576b1c6SShaohua Li * broken meta in the middle of a log causes recovery can't find meta at the
12750576b1c6SShaohua Li * head of log. If operations require meta at the head persistent in log, we
12760576b1c6SShaohua Li * must make sure meta before it persistent in log too. A case is:
12770576b1c6SShaohua Li *
12780576b1c6SShaohua Li * stripe data/parity is in log, we start write stripe to raid disks. stripe
12790576b1c6SShaohua Li * data/parity must be persistent in log before we do the write to raid disks.
12800576b1c6SShaohua Li *
12810576b1c6SShaohua Li * The solution is we restrictly maintain io_unit list order. In this case, we
12820576b1c6SShaohua Li * only write stripes of an io_unit to raid disks till the io_unit is the first
12830576b1c6SShaohua Li * one whose data/parity is in log.
12840576b1c6SShaohua Li */
r5l_flush_stripe_to_raid(struct r5l_log * log)12850576b1c6SShaohua Li void r5l_flush_stripe_to_raid(struct r5l_log *log)
12860576b1c6SShaohua Li {
1287a8c34f91SShaohua Li bool do_flush;
128856fef7c6SChristoph Hellwig
128956fef7c6SChristoph Hellwig if (!log || !log->need_cache_flush)
12900576b1c6SShaohua Li return;
12910576b1c6SShaohua Li
1292a8c34f91SShaohua Li spin_lock_irq(&log->io_list_lock);
1293a8c34f91SShaohua Li /* flush bio is running */
1294a8c34f91SShaohua Li if (!list_empty(&log->flushing_ios)) {
1295a8c34f91SShaohua Li spin_unlock_irq(&log->io_list_lock);
12960576b1c6SShaohua Li return;
12970576b1c6SShaohua Li }
1298a8c34f91SShaohua Li list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1299a8c34f91SShaohua Li do_flush = !list_empty(&log->flushing_ios);
13000576b1c6SShaohua Li spin_unlock_irq(&log->io_list_lock);
1301a8c34f91SShaohua Li
1302a8c34f91SShaohua Li if (!do_flush)
1303a8c34f91SShaohua Li return;
13040dd00cbaSChristoph Hellwig bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1305a7c50c94SChristoph Hellwig REQ_OP_WRITE | REQ_PREFLUSH);
1306a8c34f91SShaohua Li log->flush_bio.bi_end_io = r5l_log_flush_endio;
13074e49ea4aSMike Christie submit_bio(&log->flush_bio);
13080576b1c6SShaohua Li }
13090576b1c6SShaohua Li
13100576b1c6SShaohua Li static void r5l_write_super(struct r5l_log *log, sector_t cp);
r5l_write_super_and_discard_space(struct r5l_log * log,sector_t end)13114b482044SShaohua Li static void r5l_write_super_and_discard_space(struct r5l_log *log,
13124b482044SShaohua Li sector_t end)
13134b482044SShaohua Li {
13144b482044SShaohua Li struct block_device *bdev = log->rdev->bdev;
13154b482044SShaohua Li struct mddev *mddev;
13164b482044SShaohua Li
13174b482044SShaohua Li r5l_write_super(log, end);
13184b482044SShaohua Li
131970200574SChristoph Hellwig if (!bdev_max_discard_sectors(bdev))
13204b482044SShaohua Li return;
13214b482044SShaohua Li
13224b482044SShaohua Li mddev = log->rdev->mddev;
13234b482044SShaohua Li /*
13248e018c21SShaohua Li * Discard could zero data, so before discard we must make sure
13258e018c21SShaohua Li * superblock is updated to new log tail. Updating superblock (either
13268e018c21SShaohua Li * directly call md_update_sb() or depend on md thread) must hold
13278e018c21SShaohua Li * reconfig mutex. On the other hand, raid5_quiesce is called with
132812ba6676SXU pengfei * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
132912ba6676SXU pengfei * for all IO finish, hence waiting for reclaim thread, while reclaim
133012ba6676SXU pengfei * thread is calling this function and waiting for reconfig mutex. So
13318e018c21SShaohua Li * there is a deadlock. We workaround this issue with a trylock.
13328e018c21SShaohua Li * FIXME: we could miss discard if we can't take reconfig mutex
13334b482044SShaohua Li */
13342953079cSShaohua Li set_mask_bits(&mddev->sb_flags, 0,
13352953079cSShaohua Li BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
13368e018c21SShaohua Li if (!mddev_trylock(mddev))
13378e018c21SShaohua Li return;
13384b482044SShaohua Li md_update_sb(mddev, 1);
13398e018c21SShaohua Li mddev_unlock(mddev);
13404b482044SShaohua Li
13416e74a9cfSShaohua Li /* discard IO error really doesn't matter, ignore it */
13424b482044SShaohua Li if (log->last_checkpoint < end) {
13434b482044SShaohua Li blkdev_issue_discard(bdev,
13444b482044SShaohua Li log->last_checkpoint + log->rdev->data_offset,
134544abff2cSChristoph Hellwig end - log->last_checkpoint, GFP_NOIO);
13464b482044SShaohua Li } else {
13474b482044SShaohua Li blkdev_issue_discard(bdev,
13484b482044SShaohua Li log->last_checkpoint + log->rdev->data_offset,
13494b482044SShaohua Li log->device_size - log->last_checkpoint,
135044abff2cSChristoph Hellwig GFP_NOIO);
13514b482044SShaohua Li blkdev_issue_discard(bdev, log->rdev->data_offset, end,
135244abff2cSChristoph Hellwig GFP_NOIO);
13534b482044SShaohua Li }
13544b482044SShaohua Li }
13554b482044SShaohua Li
1356a39f7afdSSong Liu /*
1357a39f7afdSSong Liu * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1358a39f7afdSSong Liu * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1359a39f7afdSSong Liu *
1360a39f7afdSSong Liu * must hold conf->device_lock
1361a39f7afdSSong Liu */
r5c_flush_stripe(struct r5conf * conf,struct stripe_head * sh)1362a39f7afdSSong Liu static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1363a39f7afdSSong Liu {
1364a39f7afdSSong Liu BUG_ON(list_empty(&sh->lru));
1365a39f7afdSSong Liu BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1366a39f7afdSSong Liu BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1367a39f7afdSSong Liu
1368a39f7afdSSong Liu /*
1369a39f7afdSSong Liu * The stripe is not ON_RELEASE_LIST, so it is safe to call
1370a39f7afdSSong Liu * raid5_release_stripe() while holding conf->device_lock
1371a39f7afdSSong Liu */
1372a39f7afdSSong Liu BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1373efa4b77bSShaohua Li lockdep_assert_held(&conf->device_lock);
1374a39f7afdSSong Liu
1375a39f7afdSSong Liu list_del_init(&sh->lru);
1376a39f7afdSSong Liu atomic_inc(&sh->count);
1377a39f7afdSSong Liu
1378a39f7afdSSong Liu set_bit(STRIPE_HANDLE, &sh->state);
1379a39f7afdSSong Liu atomic_inc(&conf->active_stripes);
1380a39f7afdSSong Liu r5c_make_stripe_write_out(sh);
1381a39f7afdSSong Liu
1382e33fbb9cSShaohua Li if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1383e33fbb9cSShaohua Li atomic_inc(&conf->r5c_flushing_partial_stripes);
1384e33fbb9cSShaohua Li else
1385e33fbb9cSShaohua Li atomic_inc(&conf->r5c_flushing_full_stripes);
1386a39f7afdSSong Liu raid5_release_stripe(sh);
1387a39f7afdSSong Liu }
1388a39f7afdSSong Liu
1389a39f7afdSSong Liu /*
1390a39f7afdSSong Liu * if num == 0, flush all full stripes
1391a39f7afdSSong Liu * if num > 0, flush all full stripes. If less than num full stripes are
1392a39f7afdSSong Liu * flushed, flush some partial stripes until totally num stripes are
1393a39f7afdSSong Liu * flushed or there is no more cached stripes.
1394a39f7afdSSong Liu */
r5c_flush_cache(struct r5conf * conf,int num)1395a39f7afdSSong Liu void r5c_flush_cache(struct r5conf *conf, int num)
1396a39f7afdSSong Liu {
1397a39f7afdSSong Liu int count;
1398a39f7afdSSong Liu struct stripe_head *sh, *next;
1399a39f7afdSSong Liu
1400efa4b77bSShaohua Li lockdep_assert_held(&conf->device_lock);
1401*ebf6f517SYu Kuai if (!READ_ONCE(conf->log))
1402a39f7afdSSong Liu return;
1403a39f7afdSSong Liu
1404a39f7afdSSong Liu count = 0;
1405a39f7afdSSong Liu list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1406a39f7afdSSong Liu r5c_flush_stripe(conf, sh);
1407a39f7afdSSong Liu count++;
1408a39f7afdSSong Liu }
1409a39f7afdSSong Liu
1410a39f7afdSSong Liu if (count >= num)
1411a39f7afdSSong Liu return;
1412a39f7afdSSong Liu list_for_each_entry_safe(sh, next,
1413a39f7afdSSong Liu &conf->r5c_partial_stripe_list, lru) {
1414a39f7afdSSong Liu r5c_flush_stripe(conf, sh);
1415a39f7afdSSong Liu if (++count >= num)
1416a39f7afdSSong Liu break;
1417a39f7afdSSong Liu }
1418a39f7afdSSong Liu }
1419a39f7afdSSong Liu
r5c_do_reclaim(struct r5conf * conf)1420a39f7afdSSong Liu static void r5c_do_reclaim(struct r5conf *conf)
1421a39f7afdSSong Liu {
1422*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
1423a39f7afdSSong Liu struct stripe_head *sh;
1424a39f7afdSSong Liu int count = 0;
1425a39f7afdSSong Liu unsigned long flags;
1426a39f7afdSSong Liu int total_cached;
1427a39f7afdSSong Liu int stripes_to_flush;
1428e33fbb9cSShaohua Li int flushing_partial, flushing_full;
1429a39f7afdSSong Liu
1430a39f7afdSSong Liu if (!r5c_is_writeback(log))
1431a39f7afdSSong Liu return;
1432a39f7afdSSong Liu
1433e33fbb9cSShaohua Li flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1434e33fbb9cSShaohua Li flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
1435a39f7afdSSong Liu total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
1436e33fbb9cSShaohua Li atomic_read(&conf->r5c_cached_full_stripes) -
1437e33fbb9cSShaohua Li flushing_full - flushing_partial;
1438a39f7afdSSong Liu
1439a39f7afdSSong Liu if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1440a39f7afdSSong Liu atomic_read(&conf->empty_inactive_list_nr) > 0)
1441a39f7afdSSong Liu /*
1442a39f7afdSSong Liu * if stripe cache pressure high, flush all full stripes and
1443a39f7afdSSong Liu * some partial stripes
1444a39f7afdSSong Liu */
1445a39f7afdSSong Liu stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1446a39f7afdSSong Liu else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
1447e33fbb9cSShaohua Li atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
144884890c03SShaohua Li R5C_FULL_STRIPE_FLUSH_BATCH(conf))
1449a39f7afdSSong Liu /*
1450a39f7afdSSong Liu * if stripe cache pressure moderate, or if there is many full
1451a39f7afdSSong Liu * stripes,flush all full stripes
1452a39f7afdSSong Liu */
1453a39f7afdSSong Liu stripes_to_flush = 0;
1454a39f7afdSSong Liu else
1455a39f7afdSSong Liu /* no need to flush */
1456a39f7afdSSong Liu stripes_to_flush = -1;
1457a39f7afdSSong Liu
1458a39f7afdSSong Liu if (stripes_to_flush >= 0) {
1459a39f7afdSSong Liu spin_lock_irqsave(&conf->device_lock, flags);
1460a39f7afdSSong Liu r5c_flush_cache(conf, stripes_to_flush);
1461a39f7afdSSong Liu spin_unlock_irqrestore(&conf->device_lock, flags);
1462a39f7afdSSong Liu }
1463a39f7afdSSong Liu
1464a39f7afdSSong Liu /* if log space is tight, flush stripes on stripe_in_journal_list */
1465a39f7afdSSong Liu if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1466a39f7afdSSong Liu spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1467a39f7afdSSong Liu spin_lock(&conf->device_lock);
1468a39f7afdSSong Liu list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1469a39f7afdSSong Liu /*
1470a39f7afdSSong Liu * stripes on stripe_in_journal_list could be in any
1471a39f7afdSSong Liu * state of the stripe_cache state machine. In this
1472a39f7afdSSong Liu * case, we only want to flush stripe on
1473a39f7afdSSong Liu * r5c_cached_full/partial_stripes. The following
1474a39f7afdSSong Liu * condition makes sure the stripe is on one of the
1475a39f7afdSSong Liu * two lists.
1476a39f7afdSSong Liu */
1477a39f7afdSSong Liu if (!list_empty(&sh->lru) &&
1478a39f7afdSSong Liu !test_bit(STRIPE_HANDLE, &sh->state) &&
1479a39f7afdSSong Liu atomic_read(&sh->count) == 0) {
1480a39f7afdSSong Liu r5c_flush_stripe(conf, sh);
1481a39f7afdSSong Liu if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1482a39f7afdSSong Liu break;
1483a39f7afdSSong Liu }
1484e8fd52eeSShaohua Li }
1485a39f7afdSSong Liu spin_unlock(&conf->device_lock);
1486a39f7afdSSong Liu spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1487a39f7afdSSong Liu }
1488f687a33eSSong Liu
1489f687a33eSSong Liu if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1490f687a33eSSong Liu r5l_run_no_space_stripes(log);
1491f687a33eSSong Liu
1492a39f7afdSSong Liu md_wakeup_thread(conf->mddev->thread);
1493a39f7afdSSong Liu }
14944b482044SShaohua Li
r5l_do_reclaim(struct r5l_log * log)14950576b1c6SShaohua Li static void r5l_do_reclaim(struct r5l_log *log)
14960576b1c6SShaohua Li {
1497a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private;
14980576b1c6SShaohua Li sector_t reclaim_target = xchg(&log->reclaim_target, 0);
149917036461SChristoph Hellwig sector_t reclaimable;
150017036461SChristoph Hellwig sector_t next_checkpoint;
1501a39f7afdSSong Liu bool write_super;
15020576b1c6SShaohua Li
15030576b1c6SShaohua Li spin_lock_irq(&log->io_list_lock);
1504a39f7afdSSong Liu write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1505a39f7afdSSong Liu reclaim_target != 0 || !list_empty(&log->no_space_stripes);
15060576b1c6SShaohua Li /*
15070576b1c6SShaohua Li * move proper io_unit to reclaim list. We should not change the order.
15080576b1c6SShaohua Li * reclaimable/unreclaimable io_unit can be mixed in the list, we
15090576b1c6SShaohua Li * shouldn't reuse space of an unreclaimable io_unit
15100576b1c6SShaohua Li */
15110576b1c6SShaohua Li while (1) {
151217036461SChristoph Hellwig reclaimable = r5l_reclaimable_space(log);
151317036461SChristoph Hellwig if (reclaimable >= reclaim_target ||
15140576b1c6SShaohua Li (list_empty(&log->running_ios) &&
15150576b1c6SShaohua Li list_empty(&log->io_end_ios) &&
1516a8c34f91SShaohua Li list_empty(&log->flushing_ios) &&
151704732f74SChristoph Hellwig list_empty(&log->finished_ios)))
15180576b1c6SShaohua Li break;
15190576b1c6SShaohua Li
152017036461SChristoph Hellwig md_wakeup_thread(log->rdev->mddev->thread);
152117036461SChristoph Hellwig wait_event_lock_irq(log->iounit_wait,
152217036461SChristoph Hellwig r5l_reclaimable_space(log) > reclaimable,
152317036461SChristoph Hellwig log->io_list_lock);
15240576b1c6SShaohua Li }
152517036461SChristoph Hellwig
1526a39f7afdSSong Liu next_checkpoint = r5c_calculate_new_cp(conf);
15270576b1c6SShaohua Li spin_unlock_irq(&log->io_list_lock);
15280576b1c6SShaohua Li
1529a39f7afdSSong Liu if (reclaimable == 0 || !write_super)
15300576b1c6SShaohua Li return;
15310576b1c6SShaohua Li
15320576b1c6SShaohua Li /*
15330576b1c6SShaohua Li * write_super will flush cache of each raid disk. We must write super
15340576b1c6SShaohua Li * here, because the log area might be reused soon and we don't want to
15350576b1c6SShaohua Li * confuse recovery
15360576b1c6SShaohua Li */
15374b482044SShaohua Li r5l_write_super_and_discard_space(log, next_checkpoint);
15380576b1c6SShaohua Li
15390576b1c6SShaohua Li mutex_lock(&log->io_mutex);
154017036461SChristoph Hellwig log->last_checkpoint = next_checkpoint;
1541a39f7afdSSong Liu r5c_update_log_state(log);
15420576b1c6SShaohua Li mutex_unlock(&log->io_mutex);
15430576b1c6SShaohua Li
154417036461SChristoph Hellwig r5l_run_no_space_stripes(log);
15450576b1c6SShaohua Li }
15460576b1c6SShaohua Li
r5l_reclaim_thread(struct md_thread * thread)15470576b1c6SShaohua Li static void r5l_reclaim_thread(struct md_thread *thread)
15480576b1c6SShaohua Li {
15490576b1c6SShaohua Li struct mddev *mddev = thread->mddev;
15500576b1c6SShaohua Li struct r5conf *conf = mddev->private;
1551*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
15520576b1c6SShaohua Li
15530576b1c6SShaohua Li if (!log)
15540576b1c6SShaohua Li return;
1555a39f7afdSSong Liu r5c_do_reclaim(conf);
15560576b1c6SShaohua Li r5l_do_reclaim(log);
15570576b1c6SShaohua Li }
15580576b1c6SShaohua Li
r5l_wake_reclaim(struct r5l_log * log,sector_t space)1559a39f7afdSSong Liu void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1560f6bed0efSShaohua Li {
15610576b1c6SShaohua Li unsigned long target;
15620576b1c6SShaohua Li unsigned long new = (unsigned long)space; /* overflow in theory */
15630576b1c6SShaohua Li
1564a39f7afdSSong Liu if (!log)
1565a39f7afdSSong Liu return;
15669487a0f6SUros Bizjak
15679487a0f6SUros Bizjak target = READ_ONCE(log->reclaim_target);
15680576b1c6SShaohua Li do {
15690576b1c6SShaohua Li if (new < target)
15700576b1c6SShaohua Li return;
15719487a0f6SUros Bizjak } while (!try_cmpxchg(&log->reclaim_target, &target, new));
15720576b1c6SShaohua Li md_wakeup_thread(log->reclaim_thread);
1573f6bed0efSShaohua Li }
1574f6bed0efSShaohua Li
r5l_quiesce(struct r5l_log * log,int quiesce)1575b03e0ccbSNeilBrown void r5l_quiesce(struct r5l_log *log, int quiesce)
1576e6c033f7SShaohua Li {
157744693154SYu Kuai struct mddev *mddev = log->rdev->mddev;
157844693154SYu Kuai struct md_thread *thread = rcu_dereference_protected(
157944693154SYu Kuai log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1580b03e0ccbSNeilBrown
1581b03e0ccbSNeilBrown if (quiesce) {
15824b482044SShaohua Li /* make sure r5l_write_super_and_discard_space exits */
15834b482044SShaohua Li wake_up(&mddev->sb_wait);
158444693154SYu Kuai kthread_park(thread->tsk);
1585a39f7afdSSong Liu r5l_wake_reclaim(log, MaxSector);
1586e6c033f7SShaohua Li r5l_do_reclaim(log);
1587b03e0ccbSNeilBrown } else
158844693154SYu Kuai kthread_unpark(thread->tsk);
1589e6c033f7SShaohua Li }
1590e6c033f7SShaohua Li
r5l_log_disk_error(struct r5conf * conf)15916e74a9cfSShaohua Li bool r5l_log_disk_error(struct r5conf *conf)
15926e74a9cfSShaohua Li {
1593*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
1594f6b6ec5cSShaohua Li
15957769085cSLogan Gunthorpe /* don't allow write if journal disk is missing */
1596f6b6ec5cSShaohua Li if (!log)
15977769085cSLogan Gunthorpe return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1598f6b6ec5cSShaohua Li else
15997769085cSLogan Gunthorpe return test_bit(Faulty, &log->rdev->flags);
16006e74a9cfSShaohua Li }
16016e74a9cfSShaohua Li
1602effe6ee7SSong Liu #define R5L_RECOVERY_PAGE_POOL_SIZE 256
1603effe6ee7SSong Liu
1604355810d1SShaohua Li struct r5l_recovery_ctx {
1605355810d1SShaohua Li struct page *meta_page; /* current meta */
1606355810d1SShaohua Li sector_t meta_total_blocks; /* total size of current meta and data */
1607355810d1SShaohua Li sector_t pos; /* recovery position */
1608355810d1SShaohua Li u64 seq; /* recovery position seq */
1609b4c625c6SSong Liu int data_parity_stripes; /* number of data_parity stripes */
1610b4c625c6SSong Liu int data_only_stripes; /* number of data_only stripes */
1611b4c625c6SSong Liu struct list_head cached_list;
1612effe6ee7SSong Liu
1613effe6ee7SSong Liu /*
1614effe6ee7SSong Liu * read ahead page pool (ra_pool)
1615effe6ee7SSong Liu * in recovery, log is read sequentially. It is not efficient to
1616effe6ee7SSong Liu * read every page with sync_page_io(). The read ahead page pool
1617effe6ee7SSong Liu * reads multiple pages with one IO, so further log read can
1618effe6ee7SSong Liu * just copy data from the pool.
1619effe6ee7SSong Liu */
1620effe6ee7SSong Liu struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
162189f94b64SChristoph Hellwig struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
1622effe6ee7SSong Liu sector_t pool_offset; /* offset of first page in the pool */
1623effe6ee7SSong Liu int total_pages; /* total allocated pages */
1624effe6ee7SSong Liu int valid_pages; /* pages with valid data */
1625355810d1SShaohua Li };
1626355810d1SShaohua Li
r5l_recovery_allocate_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1627effe6ee7SSong Liu static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1628effe6ee7SSong Liu struct r5l_recovery_ctx *ctx)
1629effe6ee7SSong Liu {
1630effe6ee7SSong Liu struct page *page;
1631effe6ee7SSong Liu
1632effe6ee7SSong Liu ctx->valid_pages = 0;
1633effe6ee7SSong Liu ctx->total_pages = 0;
1634effe6ee7SSong Liu while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1635effe6ee7SSong Liu page = alloc_page(GFP_KERNEL);
1636effe6ee7SSong Liu
1637effe6ee7SSong Liu if (!page)
1638effe6ee7SSong Liu break;
1639effe6ee7SSong Liu ctx->ra_pool[ctx->total_pages] = page;
1640effe6ee7SSong Liu ctx->total_pages += 1;
1641effe6ee7SSong Liu }
1642effe6ee7SSong Liu
164389f94b64SChristoph Hellwig if (ctx->total_pages == 0)
1644effe6ee7SSong Liu return -ENOMEM;
1645effe6ee7SSong Liu
1646effe6ee7SSong Liu ctx->pool_offset = 0;
1647effe6ee7SSong Liu return 0;
1648effe6ee7SSong Liu }
1649effe6ee7SSong Liu
r5l_recovery_free_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1650effe6ee7SSong Liu static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1651effe6ee7SSong Liu struct r5l_recovery_ctx *ctx)
1652effe6ee7SSong Liu {
1653effe6ee7SSong Liu int i;
1654effe6ee7SSong Liu
1655effe6ee7SSong Liu for (i = 0; i < ctx->total_pages; ++i)
1656effe6ee7SSong Liu put_page(ctx->ra_pool[i]);
1657effe6ee7SSong Liu }
1658effe6ee7SSong Liu
1659effe6ee7SSong Liu /*
1660effe6ee7SSong Liu * fetch ctx->valid_pages pages from offset
1661effe6ee7SSong Liu * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1662effe6ee7SSong Liu * However, if the offset is close to the end of the journal device,
1663effe6ee7SSong Liu * ctx->valid_pages could be smaller than ctx->total_pages
1664effe6ee7SSong Liu */
r5l_recovery_fetch_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx,sector_t offset)1665effe6ee7SSong Liu static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1666effe6ee7SSong Liu struct r5l_recovery_ctx *ctx,
1667effe6ee7SSong Liu sector_t offset)
1668effe6ee7SSong Liu {
166989f94b64SChristoph Hellwig struct bio bio;
167089f94b64SChristoph Hellwig int ret;
167189f94b64SChristoph Hellwig
167289f94b64SChristoph Hellwig bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
167389f94b64SChristoph Hellwig R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
167489f94b64SChristoph Hellwig bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1675effe6ee7SSong Liu
1676effe6ee7SSong Liu ctx->valid_pages = 0;
1677effe6ee7SSong Liu ctx->pool_offset = offset;
1678effe6ee7SSong Liu
1679effe6ee7SSong Liu while (ctx->valid_pages < ctx->total_pages) {
168089f94b64SChristoph Hellwig __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
168189f94b64SChristoph Hellwig 0);
1682effe6ee7SSong Liu ctx->valid_pages += 1;
1683effe6ee7SSong Liu
1684effe6ee7SSong Liu offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1685effe6ee7SSong Liu
1686effe6ee7SSong Liu if (offset == 0) /* reached end of the device */
1687effe6ee7SSong Liu break;
1688effe6ee7SSong Liu }
1689effe6ee7SSong Liu
169089f94b64SChristoph Hellwig ret = submit_bio_wait(&bio);
169189f94b64SChristoph Hellwig bio_uninit(&bio);
169289f94b64SChristoph Hellwig return ret;
1693effe6ee7SSong Liu }
1694effe6ee7SSong Liu
1695effe6ee7SSong Liu /*
1696effe6ee7SSong Liu * try read a page from the read ahead page pool, if the page is not in the
1697effe6ee7SSong Liu * pool, call r5l_recovery_fetch_ra_pool
1698effe6ee7SSong Liu */
r5l_recovery_read_page(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct page * page,sector_t offset)1699effe6ee7SSong Liu static int r5l_recovery_read_page(struct r5l_log *log,
1700effe6ee7SSong Liu struct r5l_recovery_ctx *ctx,
1701effe6ee7SSong Liu struct page *page,
1702effe6ee7SSong Liu sector_t offset)
1703effe6ee7SSong Liu {
1704effe6ee7SSong Liu int ret;
1705effe6ee7SSong Liu
1706effe6ee7SSong Liu if (offset < ctx->pool_offset ||
1707effe6ee7SSong Liu offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1708effe6ee7SSong Liu ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1709effe6ee7SSong Liu if (ret)
1710effe6ee7SSong Liu return ret;
1711effe6ee7SSong Liu }
1712effe6ee7SSong Liu
1713effe6ee7SSong Liu BUG_ON(offset < ctx->pool_offset ||
1714effe6ee7SSong Liu offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1715effe6ee7SSong Liu
1716effe6ee7SSong Liu memcpy(page_address(page),
1717effe6ee7SSong Liu page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1718effe6ee7SSong Liu BLOCK_SECTOR_SHIFT]),
1719effe6ee7SSong Liu PAGE_SIZE);
1720effe6ee7SSong Liu return 0;
1721effe6ee7SSong Liu }
1722effe6ee7SSong Liu
r5l_recovery_read_meta_block(struct r5l_log * log,struct r5l_recovery_ctx * ctx)17239ed988f5SSong Liu static int r5l_recovery_read_meta_block(struct r5l_log *log,
1724355810d1SShaohua Li struct r5l_recovery_ctx *ctx)
1725355810d1SShaohua Li {
1726355810d1SShaohua Li struct page *page = ctx->meta_page;
1727355810d1SShaohua Li struct r5l_meta_block *mb;
1728355810d1SShaohua Li u32 crc, stored_crc;
1729effe6ee7SSong Liu int ret;
1730355810d1SShaohua Li
1731effe6ee7SSong Liu ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1732effe6ee7SSong Liu if (ret != 0)
1733effe6ee7SSong Liu return ret;
1734355810d1SShaohua Li
1735355810d1SShaohua Li mb = page_address(page);
1736355810d1SShaohua Li stored_crc = le32_to_cpu(mb->checksum);
1737355810d1SShaohua Li mb->checksum = 0;
1738355810d1SShaohua Li
1739355810d1SShaohua Li if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1740355810d1SShaohua Li le64_to_cpu(mb->seq) != ctx->seq ||
1741355810d1SShaohua Li mb->version != R5LOG_VERSION ||
1742355810d1SShaohua Li le64_to_cpu(mb->position) != ctx->pos)
1743355810d1SShaohua Li return -EINVAL;
1744355810d1SShaohua Li
17455cb2fbd6SShaohua Li crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1746355810d1SShaohua Li if (stored_crc != crc)
1747355810d1SShaohua Li return -EINVAL;
1748355810d1SShaohua Li
1749355810d1SShaohua Li if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1750355810d1SShaohua Li return -EINVAL;
1751355810d1SShaohua Li
1752355810d1SShaohua Li ctx->meta_total_blocks = BLOCK_SECTORS;
1753355810d1SShaohua Li
1754355810d1SShaohua Li return 0;
1755355810d1SShaohua Li }
1756355810d1SShaohua Li
17579ed988f5SSong Liu static void
r5l_recovery_create_empty_meta_block(struct r5l_log * log,struct page * page,sector_t pos,u64 seq)17589ed988f5SSong Liu r5l_recovery_create_empty_meta_block(struct r5l_log *log,
17599ed988f5SSong Liu struct page *page,
17609ed988f5SSong Liu sector_t pos, u64 seq)
1761355810d1SShaohua Li {
1762355810d1SShaohua Li struct r5l_meta_block *mb;
1763355810d1SShaohua Li
1764355810d1SShaohua Li mb = page_address(page);
17659ed988f5SSong Liu clear_page(mb);
1766355810d1SShaohua Li mb->magic = cpu_to_le32(R5LOG_MAGIC);
1767355810d1SShaohua Li mb->version = R5LOG_VERSION;
1768355810d1SShaohua Li mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1769355810d1SShaohua Li mb->seq = cpu_to_le64(seq);
1770355810d1SShaohua Li mb->position = cpu_to_le64(pos);
1771355810d1SShaohua Li }
1772355810d1SShaohua Li
r5l_log_write_empty_meta_block(struct r5l_log * log,sector_t pos,u64 seq)1773355810d1SShaohua Li static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1774355810d1SShaohua Li u64 seq)
1775355810d1SShaohua Li {
1776355810d1SShaohua Li struct page *page;
1777355810d1SShaohua Li struct r5l_meta_block *mb;
1778355810d1SShaohua Li
17799ed988f5SSong Liu page = alloc_page(GFP_KERNEL);
1780355810d1SShaohua Li if (!page)
1781355810d1SShaohua Li return -ENOMEM;
17829ed988f5SSong Liu r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1783355810d1SShaohua Li mb = page_address(page);
17845c88f403SSong Liu mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
17855c88f403SSong Liu mb, PAGE_SIZE));
17864ce4c73fSBart Van Assche if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
17875a8948f8SJan Kara REQ_SYNC | REQ_FUA, false)) {
1788355810d1SShaohua Li __free_page(page);
1789355810d1SShaohua Li return -EIO;
1790355810d1SShaohua Li }
1791355810d1SShaohua Li __free_page(page);
1792355810d1SShaohua Li return 0;
1793355810d1SShaohua Li }
1794355810d1SShaohua Li
1795b4c625c6SSong Liu /*
1796b4c625c6SSong Liu * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1797b4c625c6SSong Liu * to mark valid (potentially not flushed) data in the journal.
1798b4c625c6SSong Liu *
1799b4c625c6SSong Liu * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1800b4c625c6SSong Liu * so there should not be any mismatch here.
1801b4c625c6SSong Liu */
r5l_recovery_load_data(struct r5l_log * log,struct stripe_head * sh,struct r5l_recovery_ctx * ctx,struct r5l_payload_data_parity * payload,sector_t log_offset)1802b4c625c6SSong Liu static void r5l_recovery_load_data(struct r5l_log *log,
1803b4c625c6SSong Liu struct stripe_head *sh,
1804b4c625c6SSong Liu struct r5l_recovery_ctx *ctx,
1805b4c625c6SSong Liu struct r5l_payload_data_parity *payload,
1806b4c625c6SSong Liu sector_t log_offset)
1807f6bed0efSShaohua Li {
1808b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
1809b4c625c6SSong Liu struct r5conf *conf = mddev->private;
1810b4c625c6SSong Liu int dd_idx;
1811355810d1SShaohua Li
1812b4c625c6SSong Liu raid5_compute_sector(conf,
1813b4c625c6SSong Liu le64_to_cpu(payload->location), 0,
1814b4c625c6SSong Liu &dd_idx, sh);
1815effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1816b4c625c6SSong Liu sh->dev[dd_idx].log_checksum =
1817b4c625c6SSong Liu le32_to_cpu(payload->checksum[0]);
1818b4c625c6SSong Liu ctx->meta_total_blocks += BLOCK_SECTORS;
1819b4c625c6SSong Liu
1820b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1821b4c625c6SSong Liu set_bit(STRIPE_R5C_CACHING, &sh->state);
1822b4c625c6SSong Liu }
1823b4c625c6SSong Liu
r5l_recovery_load_parity(struct r5l_log * log,struct stripe_head * sh,struct r5l_recovery_ctx * ctx,struct r5l_payload_data_parity * payload,sector_t log_offset)1824b4c625c6SSong Liu static void r5l_recovery_load_parity(struct r5l_log *log,
1825b4c625c6SSong Liu struct stripe_head *sh,
1826b4c625c6SSong Liu struct r5l_recovery_ctx *ctx,
1827b4c625c6SSong Liu struct r5l_payload_data_parity *payload,
1828b4c625c6SSong Liu sector_t log_offset)
1829b4c625c6SSong Liu {
1830b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
1831b4c625c6SSong Liu struct r5conf *conf = mddev->private;
1832b4c625c6SSong Liu
1833b4c625c6SSong Liu ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1834effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1835b4c625c6SSong Liu sh->dev[sh->pd_idx].log_checksum =
1836b4c625c6SSong Liu le32_to_cpu(payload->checksum[0]);
1837b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1838b4c625c6SSong Liu
1839b4c625c6SSong Liu if (sh->qd_idx >= 0) {
1840effe6ee7SSong Liu r5l_recovery_read_page(
1841effe6ee7SSong Liu log, ctx, sh->dev[sh->qd_idx].page,
1842effe6ee7SSong Liu r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1843b4c625c6SSong Liu sh->dev[sh->qd_idx].log_checksum =
1844b4c625c6SSong Liu le32_to_cpu(payload->checksum[1]);
1845b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1846b4c625c6SSong Liu }
1847b4c625c6SSong Liu clear_bit(STRIPE_R5C_CACHING, &sh->state);
1848b4c625c6SSong Liu }
1849b4c625c6SSong Liu
r5l_recovery_reset_stripe(struct stripe_head * sh)1850b4c625c6SSong Liu static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1851b4c625c6SSong Liu {
1852b4c625c6SSong Liu int i;
1853b4c625c6SSong Liu
1854b4c625c6SSong Liu sh->state = 0;
1855b4c625c6SSong Liu sh->log_start = MaxSector;
1856b4c625c6SSong Liu for (i = sh->disks; i--; )
1857b4c625c6SSong Liu sh->dev[i].flags = 0;
1858b4c625c6SSong Liu }
1859b4c625c6SSong Liu
1860b4c625c6SSong Liu static void
r5l_recovery_replay_one_stripe(struct r5conf * conf,struct stripe_head * sh,struct r5l_recovery_ctx * ctx)1861b4c625c6SSong Liu r5l_recovery_replay_one_stripe(struct r5conf *conf,
1862b4c625c6SSong Liu struct stripe_head *sh,
1863b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
1864b4c625c6SSong Liu {
1865b4c625c6SSong Liu struct md_rdev *rdev, *rrdev;
1866b4c625c6SSong Liu int disk_index;
1867b4c625c6SSong Liu int data_count = 0;
1868b4c625c6SSong Liu
1869b4c625c6SSong Liu for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1870b4c625c6SSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1871b4c625c6SSong Liu continue;
1872b4c625c6SSong Liu if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1873b4c625c6SSong Liu continue;
1874b4c625c6SSong Liu data_count++;
1875b4c625c6SSong Liu }
1876b4c625c6SSong Liu
1877b4c625c6SSong Liu /*
1878b4c625c6SSong Liu * stripes that only have parity must have been flushed
1879b4c625c6SSong Liu * before the crash that we are now recovering from, so
1880b4c625c6SSong Liu * there is nothing more to recovery.
1881b4c625c6SSong Liu */
1882b4c625c6SSong Liu if (data_count == 0)
1883b4c625c6SSong Liu goto out;
1884b4c625c6SSong Liu
1885b4c625c6SSong Liu for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1886b4c625c6SSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1887b4c625c6SSong Liu continue;
1888b4c625c6SSong Liu
1889b4c625c6SSong Liu /* in case device is broken */
1890b4c625c6SSong Liu rcu_read_lock();
1891b4c625c6SSong Liu rdev = rcu_dereference(conf->disks[disk_index].rdev);
1892b4c625c6SSong Liu if (rdev) {
1893b4c625c6SSong Liu atomic_inc(&rdev->nr_pending);
1894b4c625c6SSong Liu rcu_read_unlock();
1895b4c625c6SSong Liu sync_page_io(rdev, sh->sector, PAGE_SIZE,
18964ce4c73fSBart Van Assche sh->dev[disk_index].page, REQ_OP_WRITE,
1897b4c625c6SSong Liu false);
1898b4c625c6SSong Liu rdev_dec_pending(rdev, rdev->mddev);
1899b4c625c6SSong Liu rcu_read_lock();
1900b4c625c6SSong Liu }
1901b4c625c6SSong Liu rrdev = rcu_dereference(conf->disks[disk_index].replacement);
1902b4c625c6SSong Liu if (rrdev) {
1903b4c625c6SSong Liu atomic_inc(&rrdev->nr_pending);
1904b4c625c6SSong Liu rcu_read_unlock();
1905b4c625c6SSong Liu sync_page_io(rrdev, sh->sector, PAGE_SIZE,
19064ce4c73fSBart Van Assche sh->dev[disk_index].page, REQ_OP_WRITE,
1907b4c625c6SSong Liu false);
1908b4c625c6SSong Liu rdev_dec_pending(rrdev, rrdev->mddev);
1909b4c625c6SSong Liu rcu_read_lock();
1910b4c625c6SSong Liu }
1911b4c625c6SSong Liu rcu_read_unlock();
1912b4c625c6SSong Liu }
1913b4c625c6SSong Liu ctx->data_parity_stripes++;
1914b4c625c6SSong Liu out:
1915b4c625c6SSong Liu r5l_recovery_reset_stripe(sh);
1916b4c625c6SSong Liu }
1917b4c625c6SSong Liu
1918b4c625c6SSong Liu static struct stripe_head *
r5c_recovery_alloc_stripe(struct r5conf * conf,sector_t stripe_sect,int noblock)1919483cbbedSAlexei Naberezhnov r5c_recovery_alloc_stripe(
1920483cbbedSAlexei Naberezhnov struct r5conf *conf,
1921483cbbedSAlexei Naberezhnov sector_t stripe_sect,
1922483cbbedSAlexei Naberezhnov int noblock)
1923b4c625c6SSong Liu {
1924b4c625c6SSong Liu struct stripe_head *sh;
1925b4c625c6SSong Liu
19262f2d51efSLogan Gunthorpe sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
19272f2d51efSLogan Gunthorpe noblock ? R5_GAS_NOBLOCK : 0);
1928b4c625c6SSong Liu if (!sh)
1929b4c625c6SSong Liu return NULL; /* no more stripe available */
1930b4c625c6SSong Liu
1931b4c625c6SSong Liu r5l_recovery_reset_stripe(sh);
1932b4c625c6SSong Liu
1933b4c625c6SSong Liu return sh;
1934b4c625c6SSong Liu }
1935b4c625c6SSong Liu
1936b4c625c6SSong Liu static struct stripe_head *
r5c_recovery_lookup_stripe(struct list_head * list,sector_t sect)1937b4c625c6SSong Liu r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1938b4c625c6SSong Liu {
1939b4c625c6SSong Liu struct stripe_head *sh;
1940b4c625c6SSong Liu
1941b4c625c6SSong Liu list_for_each_entry(sh, list, lru)
1942b4c625c6SSong Liu if (sh->sector == sect)
1943b4c625c6SSong Liu return sh;
1944b4c625c6SSong Liu return NULL;
1945b4c625c6SSong Liu }
1946b4c625c6SSong Liu
1947b4c625c6SSong Liu static void
r5c_recovery_drop_stripes(struct list_head * cached_stripe_list,struct r5l_recovery_ctx * ctx)1948b4c625c6SSong Liu r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1949b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
1950b4c625c6SSong Liu {
1951b4c625c6SSong Liu struct stripe_head *sh, *next;
1952b4c625c6SSong Liu
1953b4c625c6SSong Liu list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1954b4c625c6SSong Liu r5l_recovery_reset_stripe(sh);
1955b4c625c6SSong Liu list_del_init(&sh->lru);
1956b4c625c6SSong Liu raid5_release_stripe(sh);
1957b4c625c6SSong Liu }
1958b4c625c6SSong Liu }
1959b4c625c6SSong Liu
1960b4c625c6SSong Liu static void
r5c_recovery_replay_stripes(struct list_head * cached_stripe_list,struct r5l_recovery_ctx * ctx)1961b4c625c6SSong Liu r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1962b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
1963b4c625c6SSong Liu {
1964b4c625c6SSong Liu struct stripe_head *sh, *next;
1965b4c625c6SSong Liu
1966b4c625c6SSong Liu list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1967b4c625c6SSong Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1968b4c625c6SSong Liu r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1969b4c625c6SSong Liu list_del_init(&sh->lru);
1970b4c625c6SSong Liu raid5_release_stripe(sh);
1971b4c625c6SSong Liu }
1972b4c625c6SSong Liu }
1973b4c625c6SSong Liu
1974b4c625c6SSong Liu /* if matches return 0; otherwise return -EINVAL */
1975b4c625c6SSong Liu static int
r5l_recovery_verify_data_checksum(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct page * page,sector_t log_offset,__le32 log_checksum)1976effe6ee7SSong Liu r5l_recovery_verify_data_checksum(struct r5l_log *log,
1977effe6ee7SSong Liu struct r5l_recovery_ctx *ctx,
1978effe6ee7SSong Liu struct page *page,
1979b4c625c6SSong Liu sector_t log_offset, __le32 log_checksum)
1980b4c625c6SSong Liu {
1981b4c625c6SSong Liu void *addr;
1982b4c625c6SSong Liu u32 checksum;
1983b4c625c6SSong Liu
1984effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, page, log_offset);
1985b4c625c6SSong Liu addr = kmap_atomic(page);
1986b4c625c6SSong Liu checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1987b4c625c6SSong Liu kunmap_atomic(addr);
1988b4c625c6SSong Liu return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1989b4c625c6SSong Liu }
1990b4c625c6SSong Liu
1991b4c625c6SSong Liu /*
1992b4c625c6SSong Liu * before loading data to stripe cache, we need verify checksum for all data,
1993b4c625c6SSong Liu * if there is mismatch for any data page, we drop all data in the mata block
1994b4c625c6SSong Liu */
1995b4c625c6SSong Liu static int
r5l_recovery_verify_data_checksum_for_mb(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1996b4c625c6SSong Liu r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1997b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
1998b4c625c6SSong Liu {
1999b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
2000b4c625c6SSong Liu struct r5conf *conf = mddev->private;
2001b4c625c6SSong Liu struct r5l_meta_block *mb = page_address(ctx->meta_page);
2002b4c625c6SSong Liu sector_t mb_offset = sizeof(struct r5l_meta_block);
2003b4c625c6SSong Liu sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2004b4c625c6SSong Liu struct page *page;
2005b4c625c6SSong Liu struct r5l_payload_data_parity *payload;
20062d4f4687SSong Liu struct r5l_payload_flush *payload_flush;
2007b4c625c6SSong Liu
2008b4c625c6SSong Liu page = alloc_page(GFP_KERNEL);
2009b4c625c6SSong Liu if (!page)
2010355810d1SShaohua Li return -ENOMEM;
2011355810d1SShaohua Li
2012b4c625c6SSong Liu while (mb_offset < le32_to_cpu(mb->meta_size)) {
2013b4c625c6SSong Liu payload = (void *)mb + mb_offset;
20142d4f4687SSong Liu payload_flush = (void *)mb + mb_offset;
2015b4c625c6SSong Liu
20161ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2017b4c625c6SSong Liu if (r5l_recovery_verify_data_checksum(
2018effe6ee7SSong Liu log, ctx, page, log_offset,
2019b4c625c6SSong Liu payload->checksum[0]) < 0)
2020b4c625c6SSong Liu goto mismatch;
20211ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2022b4c625c6SSong Liu if (r5l_recovery_verify_data_checksum(
2023effe6ee7SSong Liu log, ctx, page, log_offset,
2024b4c625c6SSong Liu payload->checksum[0]) < 0)
2025b4c625c6SSong Liu goto mismatch;
2026b4c625c6SSong Liu if (conf->max_degraded == 2 && /* q for RAID 6 */
2027b4c625c6SSong Liu r5l_recovery_verify_data_checksum(
2028effe6ee7SSong Liu log, ctx, page,
2029b4c625c6SSong Liu r5l_ring_add(log, log_offset,
2030b4c625c6SSong Liu BLOCK_SECTORS),
2031b4c625c6SSong Liu payload->checksum[1]) < 0)
2032b4c625c6SSong Liu goto mismatch;
20331ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
20342d4f4687SSong Liu /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
20352d4f4687SSong Liu } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2036b4c625c6SSong Liu goto mismatch;
2037b4c625c6SSong Liu
20381ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
20392d4f4687SSong Liu mb_offset += sizeof(struct r5l_payload_flush) +
20402d4f4687SSong Liu le32_to_cpu(payload_flush->size);
20412d4f4687SSong Liu } else {
20422d4f4687SSong Liu /* DATA or PARITY payload */
2043b4c625c6SSong Liu log_offset = r5l_ring_add(log, log_offset,
2044b4c625c6SSong Liu le32_to_cpu(payload->size));
2045b4c625c6SSong Liu mb_offset += sizeof(struct r5l_payload_data_parity) +
2046b4c625c6SSong Liu sizeof(__le32) *
2047b4c625c6SSong Liu (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2048b4c625c6SSong Liu }
2049b4c625c6SSong Liu
20502d4f4687SSong Liu }
20512d4f4687SSong Liu
2052b4c625c6SSong Liu put_page(page);
2053b4c625c6SSong Liu return 0;
2054b4c625c6SSong Liu
2055b4c625c6SSong Liu mismatch:
2056b4c625c6SSong Liu put_page(page);
2057b4c625c6SSong Liu return -EINVAL;
2058b4c625c6SSong Liu }
2059b4c625c6SSong Liu
2060b4c625c6SSong Liu /*
2061b4c625c6SSong Liu * Analyze all data/parity pages in one meta block
2062b4c625c6SSong Liu * Returns:
2063b4c625c6SSong Liu * 0 for success
2064b4c625c6SSong Liu * -EINVAL for unknown playload type
2065b4c625c6SSong Liu * -EAGAIN for checksum mismatch of data page
2066b4c625c6SSong Liu * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2067b4c625c6SSong Liu */
2068b4c625c6SSong Liu static int
r5c_recovery_analyze_meta_block(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct list_head * cached_stripe_list)2069b4c625c6SSong Liu r5c_recovery_analyze_meta_block(struct r5l_log *log,
2070b4c625c6SSong Liu struct r5l_recovery_ctx *ctx,
2071b4c625c6SSong Liu struct list_head *cached_stripe_list)
2072b4c625c6SSong Liu {
2073b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
2074b4c625c6SSong Liu struct r5conf *conf = mddev->private;
2075b4c625c6SSong Liu struct r5l_meta_block *mb;
2076b4c625c6SSong Liu struct r5l_payload_data_parity *payload;
20772d4f4687SSong Liu struct r5l_payload_flush *payload_flush;
2078b4c625c6SSong Liu int mb_offset;
2079b4c625c6SSong Liu sector_t log_offset;
2080b4c625c6SSong Liu sector_t stripe_sect;
2081b4c625c6SSong Liu struct stripe_head *sh;
2082b4c625c6SSong Liu int ret;
2083b4c625c6SSong Liu
2084b4c625c6SSong Liu /*
2085b4c625c6SSong Liu * for mismatch in data blocks, we will drop all data in this mb, but
2086b4c625c6SSong Liu * we will still read next mb for other data with FLUSH flag, as
2087b4c625c6SSong Liu * io_unit could finish out of order.
2088b4c625c6SSong Liu */
2089b4c625c6SSong Liu ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2090b4c625c6SSong Liu if (ret == -EINVAL)
2091b4c625c6SSong Liu return -EAGAIN;
2092b4c625c6SSong Liu else if (ret)
2093b4c625c6SSong Liu return ret; /* -ENOMEM duo to alloc_page() failed */
2094b4c625c6SSong Liu
2095b4c625c6SSong Liu mb = page_address(ctx->meta_page);
2096b4c625c6SSong Liu mb_offset = sizeof(struct r5l_meta_block);
2097b4c625c6SSong Liu log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2098b4c625c6SSong Liu
2099b4c625c6SSong Liu while (mb_offset < le32_to_cpu(mb->meta_size)) {
2100b4c625c6SSong Liu int dd;
2101b4c625c6SSong Liu
2102b4c625c6SSong Liu payload = (void *)mb + mb_offset;
21032d4f4687SSong Liu payload_flush = (void *)mb + mb_offset;
21042d4f4687SSong Liu
21051ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
21062d4f4687SSong Liu int i, count;
21072d4f4687SSong Liu
21082d4f4687SSong Liu count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
21092d4f4687SSong Liu for (i = 0; i < count; ++i) {
21102d4f4687SSong Liu stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
21112d4f4687SSong Liu sh = r5c_recovery_lookup_stripe(cached_stripe_list,
21122d4f4687SSong Liu stripe_sect);
21132d4f4687SSong Liu if (sh) {
21142d4f4687SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
21152d4f4687SSong Liu r5l_recovery_reset_stripe(sh);
21162d4f4687SSong Liu list_del_init(&sh->lru);
21172d4f4687SSong Liu raid5_release_stripe(sh);
21182d4f4687SSong Liu }
21192d4f4687SSong Liu }
21202d4f4687SSong Liu
21212d4f4687SSong Liu mb_offset += sizeof(struct r5l_payload_flush) +
21222d4f4687SSong Liu le32_to_cpu(payload_flush->size);
21232d4f4687SSong Liu continue;
21242d4f4687SSong Liu }
21252d4f4687SSong Liu
21262d4f4687SSong Liu /* DATA or PARITY payload */
21271ad45a9bSJason Yan stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2128b4c625c6SSong Liu raid5_compute_sector(
2129b4c625c6SSong Liu conf, le64_to_cpu(payload->location), 0, &dd,
2130b4c625c6SSong Liu NULL)
2131b4c625c6SSong Liu : le64_to_cpu(payload->location);
2132b4c625c6SSong Liu
2133b4c625c6SSong Liu sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2134b4c625c6SSong Liu stripe_sect);
2135b4c625c6SSong Liu
2136b4c625c6SSong Liu if (!sh) {
2137483cbbedSAlexei Naberezhnov sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2138b4c625c6SSong Liu /*
2139b4c625c6SSong Liu * cannot get stripe from raid5_get_active_stripe
2140b4c625c6SSong Liu * try replay some stripes
2141b4c625c6SSong Liu */
2142b4c625c6SSong Liu if (!sh) {
2143b4c625c6SSong Liu r5c_recovery_replay_stripes(
2144b4c625c6SSong Liu cached_stripe_list, ctx);
2145b4c625c6SSong Liu sh = r5c_recovery_alloc_stripe(
2146483cbbedSAlexei Naberezhnov conf, stripe_sect, 1);
2147b4c625c6SSong Liu }
2148b4c625c6SSong Liu if (!sh) {
2149483cbbedSAlexei Naberezhnov int new_size = conf->min_nr_stripes * 2;
2150b4c625c6SSong Liu pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2151b4c625c6SSong Liu mdname(mddev),
2152483cbbedSAlexei Naberezhnov new_size);
2153483cbbedSAlexei Naberezhnov ret = raid5_set_cache_size(mddev, new_size);
2154483cbbedSAlexei Naberezhnov if (conf->min_nr_stripes <= new_size / 2) {
2155483cbbedSAlexei Naberezhnov pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2156483cbbedSAlexei Naberezhnov mdname(mddev),
2157483cbbedSAlexei Naberezhnov ret,
2158483cbbedSAlexei Naberezhnov new_size,
2159483cbbedSAlexei Naberezhnov conf->min_nr_stripes,
2160483cbbedSAlexei Naberezhnov conf->max_nr_stripes);
2161483cbbedSAlexei Naberezhnov return -ENOMEM;
2162483cbbedSAlexei Naberezhnov }
2163483cbbedSAlexei Naberezhnov sh = r5c_recovery_alloc_stripe(
2164483cbbedSAlexei Naberezhnov conf, stripe_sect, 0);
2165b4c625c6SSong Liu }
2166b4c625c6SSong Liu if (!sh) {
2167b4c625c6SSong Liu pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2168b4c625c6SSong Liu mdname(mddev));
2169b4c625c6SSong Liu return -ENOMEM;
2170b4c625c6SSong Liu }
2171b4c625c6SSong Liu list_add_tail(&sh->lru, cached_stripe_list);
2172b4c625c6SSong Liu }
2173b4c625c6SSong Liu
21741ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2175f7b7bee7SZhengyuan Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2176f7b7bee7SZhengyuan Liu test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2177b4c625c6SSong Liu r5l_recovery_replay_one_stripe(conf, sh, ctx);
2178b4c625c6SSong Liu list_move_tail(&sh->lru, cached_stripe_list);
2179b4c625c6SSong Liu }
2180b4c625c6SSong Liu r5l_recovery_load_data(log, sh, ctx, payload,
2181b4c625c6SSong Liu log_offset);
21821ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2183b4c625c6SSong Liu r5l_recovery_load_parity(log, sh, ctx, payload,
2184b4c625c6SSong Liu log_offset);
2185b4c625c6SSong Liu else
2186b4c625c6SSong Liu return -EINVAL;
2187b4c625c6SSong Liu
2188b4c625c6SSong Liu log_offset = r5l_ring_add(log, log_offset,
2189b4c625c6SSong Liu le32_to_cpu(payload->size));
2190b4c625c6SSong Liu
2191b4c625c6SSong Liu mb_offset += sizeof(struct r5l_payload_data_parity) +
2192b4c625c6SSong Liu sizeof(__le32) *
2193b4c625c6SSong Liu (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2194b4c625c6SSong Liu }
2195b4c625c6SSong Liu
2196b4c625c6SSong Liu return 0;
2197b4c625c6SSong Liu }
2198b4c625c6SSong Liu
2199b4c625c6SSong Liu /*
2200b4c625c6SSong Liu * Load the stripe into cache. The stripe will be written out later by
2201b4c625c6SSong Liu * the stripe cache state machine.
2202b4c625c6SSong Liu */
r5c_recovery_load_one_stripe(struct r5l_log * log,struct stripe_head * sh)2203b4c625c6SSong Liu static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2204b4c625c6SSong Liu struct stripe_head *sh)
2205b4c625c6SSong Liu {
2206b4c625c6SSong Liu struct r5dev *dev;
2207b4c625c6SSong Liu int i;
2208b4c625c6SSong Liu
2209b4c625c6SSong Liu for (i = sh->disks; i--; ) {
2210b4c625c6SSong Liu dev = sh->dev + i;
2211b4c625c6SSong Liu if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2212b4c625c6SSong Liu set_bit(R5_InJournal, &dev->flags);
2213b4c625c6SSong Liu set_bit(R5_UPTODATE, &dev->flags);
2214b4c625c6SSong Liu }
2215b4c625c6SSong Liu }
2216b4c625c6SSong Liu }
2217b4c625c6SSong Liu
2218b4c625c6SSong Liu /*
2219b4c625c6SSong Liu * Scan through the log for all to-be-flushed data
2220b4c625c6SSong Liu *
2221b4c625c6SSong Liu * For stripes with data and parity, namely Data-Parity stripe
2222b4c625c6SSong Liu * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2223b4c625c6SSong Liu *
2224b4c625c6SSong Liu * For stripes with only data, namely Data-Only stripe
2225b4c625c6SSong Liu * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2226b4c625c6SSong Liu *
2227b4c625c6SSong Liu * For a stripe, if we see data after parity, we should discard all previous
2228b4c625c6SSong Liu * data and parity for this stripe, as these data are already flushed to
2229b4c625c6SSong Liu * the array.
2230b4c625c6SSong Liu *
2231b4c625c6SSong Liu * At the end of the scan, we return the new journal_tail, which points to
2232b4c625c6SSong Liu * first data-only stripe on the journal device, or next invalid meta block.
2233b4c625c6SSong Liu */
r5c_recovery_flush_log(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2234b4c625c6SSong Liu static int r5c_recovery_flush_log(struct r5l_log *log,
2235b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
2236b4c625c6SSong Liu {
2237bc8f167fSJackieLiu struct stripe_head *sh;
2238b4c625c6SSong Liu int ret = 0;
2239b4c625c6SSong Liu
2240b4c625c6SSong Liu /* scan through the log */
2241b4c625c6SSong Liu while (1) {
2242b4c625c6SSong Liu if (r5l_recovery_read_meta_block(log, ctx))
2243b4c625c6SSong Liu break;
2244b4c625c6SSong Liu
2245b4c625c6SSong Liu ret = r5c_recovery_analyze_meta_block(log, ctx,
2246b4c625c6SSong Liu &ctx->cached_list);
2247b4c625c6SSong Liu /*
2248b4c625c6SSong Liu * -EAGAIN means mismatch in data block, in this case, we still
2249b4c625c6SSong Liu * try scan the next metablock
2250b4c625c6SSong Liu */
2251b4c625c6SSong Liu if (ret && ret != -EAGAIN)
2252b4c625c6SSong Liu break; /* ret == -EINVAL or -ENOMEM */
2253b4c625c6SSong Liu ctx->seq++;
2254b4c625c6SSong Liu ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2255b4c625c6SSong Liu }
2256b4c625c6SSong Liu
2257b4c625c6SSong Liu if (ret == -ENOMEM) {
2258b4c625c6SSong Liu r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2259b4c625c6SSong Liu return ret;
2260b4c625c6SSong Liu }
2261b4c625c6SSong Liu
2262b4c625c6SSong Liu /* replay data-parity stripes */
2263b4c625c6SSong Liu r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2264b4c625c6SSong Liu
2265b4c625c6SSong Liu /* load data-only stripes to stripe cache */
2266bc8f167fSJackieLiu list_for_each_entry(sh, &ctx->cached_list, lru) {
2267b4c625c6SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2268b4c625c6SSong Liu r5c_recovery_load_one_stripe(log, sh);
2269b4c625c6SSong Liu ctx->data_only_stripes++;
2270b4c625c6SSong Liu }
2271b4c625c6SSong Liu
2272b4c625c6SSong Liu return 0;
2273b4c625c6SSong Liu }
2274355810d1SShaohua Li
2275355810d1SShaohua Li /*
2276355810d1SShaohua Li * we did a recovery. Now ctx.pos points to an invalid meta block. New
2277355810d1SShaohua Li * log will start here. but we can't let superblock point to last valid
2278355810d1SShaohua Li * meta block. The log might looks like:
2279355810d1SShaohua Li * | meta 1| meta 2| meta 3|
2280355810d1SShaohua Li * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2281355810d1SShaohua Li * superblock points to meta 1, we write a new valid meta 2n. if crash
2282355810d1SShaohua Li * happens again, new recovery will start from meta 1. Since meta 2n is
2283355810d1SShaohua Li * valid now, recovery will think meta 3 is valid, which is wrong.
2284355810d1SShaohua Li * The solution is we create a new meta in meta2 with its seq == meta
22853c6edc66SSong Liu * 1's seq + 10000 and let superblock points to meta2. The same recovery
22863c6edc66SSong Liu * will not think meta 3 is a valid meta, because its seq doesn't match
2287355810d1SShaohua Li */
2288355810d1SShaohua Li
2289b4c625c6SSong Liu /*
2290b4c625c6SSong Liu * Before recovery, the log looks like the following
2291b4c625c6SSong Liu *
2292b4c625c6SSong Liu * ---------------------------------------------
2293b4c625c6SSong Liu * | valid log | invalid log |
2294b4c625c6SSong Liu * ---------------------------------------------
2295b4c625c6SSong Liu * ^
2296b4c625c6SSong Liu * |- log->last_checkpoint
2297b4c625c6SSong Liu * |- log->last_cp_seq
2298b4c625c6SSong Liu *
2299b4c625c6SSong Liu * Now we scan through the log until we see invalid entry
2300b4c625c6SSong Liu *
2301b4c625c6SSong Liu * ---------------------------------------------
2302b4c625c6SSong Liu * | valid log | invalid log |
2303b4c625c6SSong Liu * ---------------------------------------------
2304b4c625c6SSong Liu * ^ ^
2305b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos
2306b4c625c6SSong Liu * |- log->last_cp_seq |- ctx->seq
2307b4c625c6SSong Liu *
2308b4c625c6SSong Liu * From this point, we need to increase seq number by 10 to avoid
2309b4c625c6SSong Liu * confusing next recovery.
2310b4c625c6SSong Liu *
2311b4c625c6SSong Liu * ---------------------------------------------
2312b4c625c6SSong Liu * | valid log | invalid log |
2313b4c625c6SSong Liu * ---------------------------------------------
2314b4c625c6SSong Liu * ^ ^
2315b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+1
23163c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10001
2317b4c625c6SSong Liu *
2318b4c625c6SSong Liu * However, it is not safe to start the state machine yet, because data only
2319b4c625c6SSong Liu * parities are not yet secured in RAID. To save these data only parities, we
2320b4c625c6SSong Liu * rewrite them from seq+11.
2321b4c625c6SSong Liu *
2322b4c625c6SSong Liu * -----------------------------------------------------------------
2323b4c625c6SSong Liu * | valid log | data only stripes | invalid log |
2324b4c625c6SSong Liu * -----------------------------------------------------------------
2325b4c625c6SSong Liu * ^ ^
2326b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+n
23273c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10000+n
2328b4c625c6SSong Liu *
2329b4c625c6SSong Liu * If failure happens again during this process, the recovery can safe start
2330b4c625c6SSong Liu * again from log->last_checkpoint.
2331b4c625c6SSong Liu *
2332b4c625c6SSong Liu * Once data only stripes are rewritten to journal, we move log_tail
2333b4c625c6SSong Liu *
2334b4c625c6SSong Liu * -----------------------------------------------------------------
2335b4c625c6SSong Liu * | old log | data only stripes | invalid log |
2336b4c625c6SSong Liu * -----------------------------------------------------------------
2337b4c625c6SSong Liu * ^ ^
2338b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+n
23393c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10000+n
2340b4c625c6SSong Liu *
2341b4c625c6SSong Liu * Then we can safely start the state machine. If failure happens from this
2342b4c625c6SSong Liu * point on, the recovery will start from new log->last_checkpoint.
2343b4c625c6SSong Liu */
2344b4c625c6SSong Liu static int
r5c_recovery_rewrite_data_only_stripes(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2345b4c625c6SSong Liu r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2346b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
2347b4c625c6SSong Liu {
2348a85dd7b8SSong Liu struct stripe_head *sh;
2349b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
2350b4c625c6SSong Liu struct page *page;
23513c66abbaSSong Liu sector_t next_checkpoint = MaxSector;
2352b4c625c6SSong Liu
2353b4c625c6SSong Liu page = alloc_page(GFP_KERNEL);
2354b4c625c6SSong Liu if (!page) {
2355b4c625c6SSong Liu pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2356b4c625c6SSong Liu mdname(mddev));
2357b4c625c6SSong Liu return -ENOMEM;
2358b4c625c6SSong Liu }
2359b4c625c6SSong Liu
23603c66abbaSSong Liu WARN_ON(list_empty(&ctx->cached_list));
23613c66abbaSSong Liu
2362a85dd7b8SSong Liu list_for_each_entry(sh, &ctx->cached_list, lru) {
2363b4c625c6SSong Liu struct r5l_meta_block *mb;
2364b4c625c6SSong Liu int i;
2365b4c625c6SSong Liu int offset;
2366b4c625c6SSong Liu sector_t write_pos;
2367b4c625c6SSong Liu
2368b4c625c6SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2369b4c625c6SSong Liu r5l_recovery_create_empty_meta_block(log, page,
2370b4c625c6SSong Liu ctx->pos, ctx->seq);
2371b4c625c6SSong Liu mb = page_address(page);
2372b4c625c6SSong Liu offset = le32_to_cpu(mb->meta_size);
2373fc833c2aSJackieLiu write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2374b4c625c6SSong Liu
2375b4c625c6SSong Liu for (i = sh->disks; i--; ) {
2376b4c625c6SSong Liu struct r5dev *dev = &sh->dev[i];
2377b4c625c6SSong Liu struct r5l_payload_data_parity *payload;
2378b4c625c6SSong Liu void *addr;
2379b4c625c6SSong Liu
2380b4c625c6SSong Liu if (test_bit(R5_InJournal, &dev->flags)) {
2381b4c625c6SSong Liu payload = (void *)mb + offset;
2382b4c625c6SSong Liu payload->header.type = cpu_to_le16(
2383b4c625c6SSong Liu R5LOG_PAYLOAD_DATA);
23841ad45a9bSJason Yan payload->size = cpu_to_le32(BLOCK_SECTORS);
2385b4c625c6SSong Liu payload->location = cpu_to_le64(
2386b4c625c6SSong Liu raid5_compute_blocknr(sh, i, 0));
2387b4c625c6SSong Liu addr = kmap_atomic(dev->page);
2388b4c625c6SSong Liu payload->checksum[0] = cpu_to_le32(
2389b4c625c6SSong Liu crc32c_le(log->uuid_checksum, addr,
2390b4c625c6SSong Liu PAGE_SIZE));
2391b4c625c6SSong Liu kunmap_atomic(addr);
2392b4c625c6SSong Liu sync_page_io(log->rdev, write_pos, PAGE_SIZE,
23934ce4c73fSBart Van Assche dev->page, REQ_OP_WRITE, false);
2394b4c625c6SSong Liu write_pos = r5l_ring_add(log, write_pos,
2395b4c625c6SSong Liu BLOCK_SECTORS);
2396b4c625c6SSong Liu offset += sizeof(__le32) +
2397b4c625c6SSong Liu sizeof(struct r5l_payload_data_parity);
2398b4c625c6SSong Liu
2399b4c625c6SSong Liu }
2400b4c625c6SSong Liu }
2401b4c625c6SSong Liu mb->meta_size = cpu_to_le32(offset);
24025c88f403SSong Liu mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
24035c88f403SSong Liu mb, PAGE_SIZE));
2404b4c625c6SSong Liu sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
24054ce4c73fSBart Van Assche REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
2406b4c625c6SSong Liu sh->log_start = ctx->pos;
24073c66abbaSSong Liu list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
24083c66abbaSSong Liu atomic_inc(&log->stripe_in_journal_count);
2409b4c625c6SSong Liu ctx->pos = write_pos;
2410b4c625c6SSong Liu ctx->seq += 1;
24113c66abbaSSong Liu next_checkpoint = sh->log_start;
2412b4c625c6SSong Liu }
24133c66abbaSSong Liu log->next_checkpoint = next_checkpoint;
2414b4c625c6SSong Liu __free_page(page);
2415b4c625c6SSong Liu return 0;
2416b4c625c6SSong Liu }
2417b4c625c6SSong Liu
r5c_recovery_flush_data_only_stripes(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2418a85dd7b8SSong Liu static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2419a85dd7b8SSong Liu struct r5l_recovery_ctx *ctx)
2420a85dd7b8SSong Liu {
2421a85dd7b8SSong Liu struct mddev *mddev = log->rdev->mddev;
2422a85dd7b8SSong Liu struct r5conf *conf = mddev->private;
2423a85dd7b8SSong Liu struct stripe_head *sh, *next;
2424c9020e64SSong Liu bool cleared_pending = false;
2425a85dd7b8SSong Liu
2426a85dd7b8SSong Liu if (ctx->data_only_stripes == 0)
2427a85dd7b8SSong Liu return;
2428a85dd7b8SSong Liu
2429c9020e64SSong Liu if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2430c9020e64SSong Liu cleared_pending = true;
2431c9020e64SSong Liu clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2432c9020e64SSong Liu }
2433a85dd7b8SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2434a85dd7b8SSong Liu
2435a85dd7b8SSong Liu list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2436a85dd7b8SSong Liu r5c_make_stripe_write_out(sh);
2437a85dd7b8SSong Liu set_bit(STRIPE_HANDLE, &sh->state);
2438a85dd7b8SSong Liu list_del_init(&sh->lru);
2439a85dd7b8SSong Liu raid5_release_stripe(sh);
2440a85dd7b8SSong Liu }
2441a85dd7b8SSong Liu
2442a85dd7b8SSong Liu /* reuse conf->wait_for_quiescent in recovery */
2443a85dd7b8SSong Liu wait_event(conf->wait_for_quiescent,
2444a85dd7b8SSong Liu atomic_read(&conf->active_stripes) == 0);
2445a85dd7b8SSong Liu
2446a85dd7b8SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2447c9020e64SSong Liu if (cleared_pending)
2448c9020e64SSong Liu set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2449a85dd7b8SSong Liu }
2450a85dd7b8SSong Liu
r5l_recovery_log(struct r5l_log * log)2451f6bed0efSShaohua Li static int r5l_recovery_log(struct r5l_log *log)
2452f6bed0efSShaohua Li {
24535aabf7c4SSong Liu struct mddev *mddev = log->rdev->mddev;
2454effe6ee7SSong Liu struct r5l_recovery_ctx *ctx;
24555aabf7c4SSong Liu int ret;
245643b96748SJackieLiu sector_t pos;
2457355810d1SShaohua Li
2458effe6ee7SSong Liu ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2459effe6ee7SSong Liu if (!ctx)
2460355810d1SShaohua Li return -ENOMEM;
2461355810d1SShaohua Li
2462effe6ee7SSong Liu ctx->pos = log->last_checkpoint;
2463effe6ee7SSong Liu ctx->seq = log->last_cp_seq;
2464effe6ee7SSong Liu INIT_LIST_HEAD(&ctx->cached_list);
2465effe6ee7SSong Liu ctx->meta_page = alloc_page(GFP_KERNEL);
2466effe6ee7SSong Liu
2467effe6ee7SSong Liu if (!ctx->meta_page) {
2468effe6ee7SSong Liu ret = -ENOMEM;
2469effe6ee7SSong Liu goto meta_page;
2470effe6ee7SSong Liu }
2471effe6ee7SSong Liu
2472effe6ee7SSong Liu if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2473effe6ee7SSong Liu ret = -ENOMEM;
2474effe6ee7SSong Liu goto ra_pool;
2475effe6ee7SSong Liu }
2476effe6ee7SSong Liu
2477effe6ee7SSong Liu ret = r5c_recovery_flush_log(log, ctx);
2478355810d1SShaohua Li
2479355810d1SShaohua Li if (ret)
2480effe6ee7SSong Liu goto error;
24815aabf7c4SSong Liu
2482effe6ee7SSong Liu pos = ctx->pos;
2483effe6ee7SSong Liu ctx->seq += 10000;
248443b96748SJackieLiu
2485effe6ee7SSong Liu if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
248692e6245dSSong Liu pr_info("md/raid:%s: starting from clean shutdown\n",
24875aabf7c4SSong Liu mdname(mddev));
2488a85dd7b8SSong Liu else
248992e6245dSSong Liu pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2490effe6ee7SSong Liu mdname(mddev), ctx->data_only_stripes,
2491effe6ee7SSong Liu ctx->data_parity_stripes);
24925aabf7c4SSong Liu
2493effe6ee7SSong Liu if (ctx->data_only_stripes == 0) {
2494effe6ee7SSong Liu log->next_checkpoint = ctx->pos;
2495effe6ee7SSong Liu r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2496effe6ee7SSong Liu ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2497effe6ee7SSong Liu } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
24985aabf7c4SSong Liu pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
24995aabf7c4SSong Liu mdname(mddev));
2500effe6ee7SSong Liu ret = -EIO;
2501effe6ee7SSong Liu goto error;
25025aabf7c4SSong Liu }
25035aabf7c4SSong Liu
2504effe6ee7SSong Liu log->log_start = ctx->pos;
2505effe6ee7SSong Liu log->seq = ctx->seq;
250643b96748SJackieLiu log->last_checkpoint = pos;
250743b96748SJackieLiu r5l_write_super(log, pos);
2508a85dd7b8SSong Liu
2509effe6ee7SSong Liu r5c_recovery_flush_data_only_stripes(log, ctx);
2510effe6ee7SSong Liu ret = 0;
2511effe6ee7SSong Liu error:
2512effe6ee7SSong Liu r5l_recovery_free_ra_pool(log, ctx);
2513effe6ee7SSong Liu ra_pool:
2514effe6ee7SSong Liu __free_page(ctx->meta_page);
2515effe6ee7SSong Liu meta_page:
2516effe6ee7SSong Liu kfree(ctx);
2517effe6ee7SSong Liu return ret;
2518f6bed0efSShaohua Li }
2519f6bed0efSShaohua Li
r5l_write_super(struct r5l_log * log,sector_t cp)2520f6bed0efSShaohua Li static void r5l_write_super(struct r5l_log *log, sector_t cp)
2521f6bed0efSShaohua Li {
2522f6bed0efSShaohua Li struct mddev *mddev = log->rdev->mddev;
2523f6bed0efSShaohua Li
2524f6bed0efSShaohua Li log->rdev->journal_tail = cp;
25252953079cSShaohua Li set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2526f6bed0efSShaohua Li }
2527f6bed0efSShaohua Li
r5c_journal_mode_show(struct mddev * mddev,char * page)25282c7da14bSSong Liu static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
25292c7da14bSSong Liu {
2530a72cbf83SSong Liu struct r5conf *conf;
25312c7da14bSSong Liu int ret;
25322c7da14bSSong Liu
253378ede6a0SLogan Gunthorpe ret = mddev_lock(mddev);
253478ede6a0SLogan Gunthorpe if (ret)
253578ede6a0SLogan Gunthorpe return ret;
253678ede6a0SLogan Gunthorpe
2537a72cbf83SSong Liu conf = mddev->private;
253878ede6a0SLogan Gunthorpe if (!conf || !conf->log)
253978ede6a0SLogan Gunthorpe goto out_unlock;
25402c7da14bSSong Liu
25412c7da14bSSong Liu switch (conf->log->r5c_journal_mode) {
25422c7da14bSSong Liu case R5C_JOURNAL_MODE_WRITE_THROUGH:
25432c7da14bSSong Liu ret = snprintf(
25442c7da14bSSong Liu page, PAGE_SIZE, "[%s] %s\n",
25452c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
25462c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
25472c7da14bSSong Liu break;
25482c7da14bSSong Liu case R5C_JOURNAL_MODE_WRITE_BACK:
25492c7da14bSSong Liu ret = snprintf(
25502c7da14bSSong Liu page, PAGE_SIZE, "%s [%s]\n",
25512c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
25522c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
25532c7da14bSSong Liu break;
25542c7da14bSSong Liu default:
25552c7da14bSSong Liu ret = 0;
25562c7da14bSSong Liu }
255778ede6a0SLogan Gunthorpe
255878ede6a0SLogan Gunthorpe out_unlock:
255978ede6a0SLogan Gunthorpe mddev_unlock(mddev);
25602c7da14bSSong Liu return ret;
25612c7da14bSSong Liu }
25622c7da14bSSong Liu
256378e470c2SHeinz Mauelshagen /*
256478e470c2SHeinz Mauelshagen * Set journal cache mode on @mddev (external API initially needed by dm-raid).
256578e470c2SHeinz Mauelshagen *
256678e470c2SHeinz Mauelshagen * @mode as defined in 'enum r5c_journal_mode'.
256778e470c2SHeinz Mauelshagen *
256878e470c2SHeinz Mauelshagen */
r5c_journal_mode_set(struct mddev * mddev,int mode)256978e470c2SHeinz Mauelshagen int r5c_journal_mode_set(struct mddev *mddev, int mode)
25702c7da14bSSong Liu {
2571b44886c5SSong Liu struct r5conf *conf;
25722c7da14bSSong Liu
257378e470c2SHeinz Mauelshagen if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
257478e470c2SHeinz Mauelshagen mode > R5C_JOURNAL_MODE_WRITE_BACK)
25752c7da14bSSong Liu return -EINVAL;
25762c7da14bSSong Liu
2577b44886c5SSong Liu conf = mddev->private;
2578ff35f58eSSong Liu if (!conf || !conf->log)
2579b44886c5SSong Liu return -ENODEV;
2580b44886c5SSong Liu
25812e38a37fSSong Liu if (raid5_calc_degraded(conf) > 0 &&
2582ff35f58eSSong Liu mode == R5C_JOURNAL_MODE_WRITE_BACK)
25832e38a37fSSong Liu return -EINVAL;
25842e38a37fSSong Liu
25852c7da14bSSong Liu mddev_suspend(mddev);
258678e470c2SHeinz Mauelshagen conf->log->r5c_journal_mode = mode;
25872c7da14bSSong Liu mddev_resume(mddev);
25882c7da14bSSong Liu
25892c7da14bSSong Liu pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
259078e470c2SHeinz Mauelshagen mdname(mddev), mode, r5c_journal_mode_str[mode]);
259178e470c2SHeinz Mauelshagen return 0;
259278e470c2SHeinz Mauelshagen }
259378e470c2SHeinz Mauelshagen EXPORT_SYMBOL(r5c_journal_mode_set);
259478e470c2SHeinz Mauelshagen
r5c_journal_mode_store(struct mddev * mddev,const char * page,size_t length)259578e470c2SHeinz Mauelshagen static ssize_t r5c_journal_mode_store(struct mddev *mddev,
259678e470c2SHeinz Mauelshagen const char *page, size_t length)
259778e470c2SHeinz Mauelshagen {
259878e470c2SHeinz Mauelshagen int mode = ARRAY_SIZE(r5c_journal_mode_str);
259978e470c2SHeinz Mauelshagen size_t len = length;
2600ff35f58eSSong Liu int ret;
260178e470c2SHeinz Mauelshagen
260278e470c2SHeinz Mauelshagen if (len < 2)
260378e470c2SHeinz Mauelshagen return -EINVAL;
260478e470c2SHeinz Mauelshagen
260578e470c2SHeinz Mauelshagen if (page[len - 1] == '\n')
260678e470c2SHeinz Mauelshagen len--;
260778e470c2SHeinz Mauelshagen
260878e470c2SHeinz Mauelshagen while (mode--)
260978e470c2SHeinz Mauelshagen if (strlen(r5c_journal_mode_str[mode]) == len &&
261078e470c2SHeinz Mauelshagen !strncmp(page, r5c_journal_mode_str[mode], len))
261178e470c2SHeinz Mauelshagen break;
2612ff35f58eSSong Liu ret = mddev_lock(mddev);
2613ff35f58eSSong Liu if (ret)
2614ff35f58eSSong Liu return ret;
2615ff35f58eSSong Liu ret = r5c_journal_mode_set(mddev, mode);
2616ff35f58eSSong Liu mddev_unlock(mddev);
2617ff35f58eSSong Liu return ret ?: length;
26182c7da14bSSong Liu }
26192c7da14bSSong Liu
26202c7da14bSSong Liu struct md_sysfs_entry
26212c7da14bSSong Liu r5c_journal_mode = __ATTR(journal_mode, 0644,
26222c7da14bSSong Liu r5c_journal_mode_show, r5c_journal_mode_store);
26232c7da14bSSong Liu
26242ded3703SSong Liu /*
26252ded3703SSong Liu * Try handle write operation in caching phase. This function should only
26262ded3703SSong Liu * be called in write-back mode.
26272ded3703SSong Liu *
26282ded3703SSong Liu * If all outstanding writes can be handled in caching phase, returns 0
26292ded3703SSong Liu * If writes requires write-out phase, call r5c_make_stripe_write_out()
26302ded3703SSong Liu * and returns -EAGAIN
26312ded3703SSong Liu */
r5c_try_caching_write(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s,int disks)26322ded3703SSong Liu int r5c_try_caching_write(struct r5conf *conf,
26332ded3703SSong Liu struct stripe_head *sh,
26342ded3703SSong Liu struct stripe_head_state *s,
26352ded3703SSong Liu int disks)
26362ded3703SSong Liu {
2637*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
26381e6d690bSSong Liu int i;
26391e6d690bSSong Liu struct r5dev *dev;
26401e6d690bSSong Liu int to_cache = 0;
26416f28c5c3SLogan Gunthorpe void __rcu **pslot;
264203b047f4SSong Liu sector_t tree_index;
264303b047f4SSong Liu int ret;
264403b047f4SSong Liu uintptr_t refcount;
26452ded3703SSong Liu
26462ded3703SSong Liu BUG_ON(!r5c_is_writeback(log));
26472ded3703SSong Liu
26481e6d690bSSong Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
26491e6d690bSSong Liu /*
26501e6d690bSSong Liu * There are two different scenarios here:
26511e6d690bSSong Liu * 1. The stripe has some data cached, and it is sent to
26521e6d690bSSong Liu * write-out phase for reclaim
26531e6d690bSSong Liu * 2. The stripe is clean, and this is the first write
26541e6d690bSSong Liu *
26551e6d690bSSong Liu * For 1, return -EAGAIN, so we continue with
26561e6d690bSSong Liu * handle_stripe_dirtying().
26571e6d690bSSong Liu *
26581e6d690bSSong Liu * For 2, set STRIPE_R5C_CACHING and continue with caching
26591e6d690bSSong Liu * write.
26601e6d690bSSong Liu */
26611e6d690bSSong Liu
26621e6d690bSSong Liu /* case 1: anything injournal or anything in written */
26631e6d690bSSong Liu if (s->injournal > 0 || s->written > 0)
26641e6d690bSSong Liu return -EAGAIN;
26651e6d690bSSong Liu /* case 2 */
26661e6d690bSSong Liu set_bit(STRIPE_R5C_CACHING, &sh->state);
26671e6d690bSSong Liu }
26681e6d690bSSong Liu
26692e38a37fSSong Liu /*
26702e38a37fSSong Liu * When run in degraded mode, array is set to write-through mode.
26712e38a37fSSong Liu * This check helps drain pending write safely in the transition to
26722e38a37fSSong Liu * write-through mode.
26735ddf0440SSong Liu *
26745ddf0440SSong Liu * When a stripe is syncing, the write is also handled in write
26755ddf0440SSong Liu * through mode.
26762e38a37fSSong Liu */
26775ddf0440SSong Liu if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
26782e38a37fSSong Liu r5c_make_stripe_write_out(sh);
26792e38a37fSSong Liu return -EAGAIN;
26802e38a37fSSong Liu }
26812e38a37fSSong Liu
26821e6d690bSSong Liu for (i = disks; i--; ) {
26831e6d690bSSong Liu dev = &sh->dev[i];
26841e6d690bSSong Liu /* if non-overwrite, use writing-out phase */
26851e6d690bSSong Liu if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
26861e6d690bSSong Liu !test_bit(R5_InJournal, &dev->flags)) {
26872ded3703SSong Liu r5c_make_stripe_write_out(sh);
26882ded3703SSong Liu return -EAGAIN;
26892ded3703SSong Liu }
26901e6d690bSSong Liu }
26911e6d690bSSong Liu
269203b047f4SSong Liu /* if the stripe is not counted in big_stripe_tree, add it now */
269303b047f4SSong Liu if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
269403b047f4SSong Liu !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
269503b047f4SSong Liu tree_index = r5c_tree_index(conf, sh->sector);
269603b047f4SSong Liu spin_lock(&log->tree_lock);
269703b047f4SSong Liu pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
269803b047f4SSong Liu tree_index);
269903b047f4SSong Liu if (pslot) {
270003b047f4SSong Liu refcount = (uintptr_t)radix_tree_deref_slot_protected(
270103b047f4SSong Liu pslot, &log->tree_lock) >>
270203b047f4SSong Liu R5C_RADIX_COUNT_SHIFT;
270303b047f4SSong Liu radix_tree_replace_slot(
270403b047f4SSong Liu &log->big_stripe_tree, pslot,
270503b047f4SSong Liu (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
270603b047f4SSong Liu } else {
270703b047f4SSong Liu /*
270803b047f4SSong Liu * this radix_tree_insert can fail safely, so no
270903b047f4SSong Liu * need to call radix_tree_preload()
271003b047f4SSong Liu */
271103b047f4SSong Liu ret = radix_tree_insert(
271203b047f4SSong Liu &log->big_stripe_tree, tree_index,
271303b047f4SSong Liu (void *)(1 << R5C_RADIX_COUNT_SHIFT));
271403b047f4SSong Liu if (ret) {
271503b047f4SSong Liu spin_unlock(&log->tree_lock);
271603b047f4SSong Liu r5c_make_stripe_write_out(sh);
271703b047f4SSong Liu return -EAGAIN;
271803b047f4SSong Liu }
271903b047f4SSong Liu }
272003b047f4SSong Liu spin_unlock(&log->tree_lock);
272103b047f4SSong Liu
272203b047f4SSong Liu /*
272303b047f4SSong Liu * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
272403b047f4SSong Liu * counted in the radix tree
272503b047f4SSong Liu */
272603b047f4SSong Liu set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
272703b047f4SSong Liu atomic_inc(&conf->r5c_cached_partial_stripes);
272803b047f4SSong Liu }
272903b047f4SSong Liu
27301e6d690bSSong Liu for (i = disks; i--; ) {
27311e6d690bSSong Liu dev = &sh->dev[i];
27321e6d690bSSong Liu if (dev->towrite) {
27331e6d690bSSong Liu set_bit(R5_Wantwrite, &dev->flags);
27341e6d690bSSong Liu set_bit(R5_Wantdrain, &dev->flags);
27351e6d690bSSong Liu set_bit(R5_LOCKED, &dev->flags);
27361e6d690bSSong Liu to_cache++;
27371e6d690bSSong Liu }
27381e6d690bSSong Liu }
27391e6d690bSSong Liu
27401e6d690bSSong Liu if (to_cache) {
27411e6d690bSSong Liu set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
27421e6d690bSSong Liu /*
27431e6d690bSSong Liu * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
27441e6d690bSSong Liu * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
27451e6d690bSSong Liu * r5c_handle_data_cached()
27461e6d690bSSong Liu */
27471e6d690bSSong Liu set_bit(STRIPE_LOG_TRAPPED, &sh->state);
27481e6d690bSSong Liu }
27491e6d690bSSong Liu
27501e6d690bSSong Liu return 0;
27511e6d690bSSong Liu }
27521e6d690bSSong Liu
27531e6d690bSSong Liu /*
27541e6d690bSSong Liu * free extra pages (orig_page) we allocated for prexor
27551e6d690bSSong Liu */
r5c_release_extra_page(struct stripe_head * sh)27561e6d690bSSong Liu void r5c_release_extra_page(struct stripe_head *sh)
27571e6d690bSSong Liu {
2758d7bd398eSSong Liu struct r5conf *conf = sh->raid_conf;
27591e6d690bSSong Liu int i;
2760d7bd398eSSong Liu bool using_disk_info_extra_page;
2761d7bd398eSSong Liu
2762d7bd398eSSong Liu using_disk_info_extra_page =
2763d7bd398eSSong Liu sh->dev[0].orig_page == conf->disks[0].extra_page;
27641e6d690bSSong Liu
27651e6d690bSSong Liu for (i = sh->disks; i--; )
27661e6d690bSSong Liu if (sh->dev[i].page != sh->dev[i].orig_page) {
27671e6d690bSSong Liu struct page *p = sh->dev[i].orig_page;
27681e6d690bSSong Liu
27691e6d690bSSong Liu sh->dev[i].orig_page = sh->dev[i].page;
277086aa1397SSong Liu clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
277186aa1397SSong Liu
2772d7bd398eSSong Liu if (!using_disk_info_extra_page)
27731e6d690bSSong Liu put_page(p);
27741e6d690bSSong Liu }
2775d7bd398eSSong Liu
2776d7bd398eSSong Liu if (using_disk_info_extra_page) {
2777d7bd398eSSong Liu clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2778d7bd398eSSong Liu md_wakeup_thread(conf->mddev->thread);
2779d7bd398eSSong Liu }
2780d7bd398eSSong Liu }
2781d7bd398eSSong Liu
r5c_use_extra_page(struct stripe_head * sh)2782d7bd398eSSong Liu void r5c_use_extra_page(struct stripe_head *sh)
2783d7bd398eSSong Liu {
2784d7bd398eSSong Liu struct r5conf *conf = sh->raid_conf;
2785d7bd398eSSong Liu int i;
2786d7bd398eSSong Liu struct r5dev *dev;
2787d7bd398eSSong Liu
2788d7bd398eSSong Liu for (i = sh->disks; i--; ) {
2789d7bd398eSSong Liu dev = &sh->dev[i];
2790d7bd398eSSong Liu if (dev->orig_page != dev->page)
2791d7bd398eSSong Liu put_page(dev->orig_page);
2792d7bd398eSSong Liu dev->orig_page = conf->disks[i].extra_page;
2793d7bd398eSSong Liu }
27941e6d690bSSong Liu }
27952ded3703SSong Liu
27962ded3703SSong Liu /*
27972ded3703SSong Liu * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
27982ded3703SSong Liu * stripe is committed to RAID disks.
27992ded3703SSong Liu */
r5c_finish_stripe_write_out(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s)28002ded3703SSong Liu void r5c_finish_stripe_write_out(struct r5conf *conf,
28012ded3703SSong Liu struct stripe_head *sh,
28022ded3703SSong Liu struct stripe_head_state *s)
28032ded3703SSong Liu {
2804*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
28051e6d690bSSong Liu int i;
28061e6d690bSSong Liu int do_wakeup = 0;
280703b047f4SSong Liu sector_t tree_index;
28086f28c5c3SLogan Gunthorpe void __rcu **pslot;
280903b047f4SSong Liu uintptr_t refcount;
28101e6d690bSSong Liu
281103b047f4SSong Liu if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
28122ded3703SSong Liu return;
28132ded3703SSong Liu
28142ded3703SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
28152ded3703SSong Liu clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
28162ded3703SSong Liu
281703b047f4SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
28182ded3703SSong Liu return;
28191e6d690bSSong Liu
28201e6d690bSSong Liu for (i = sh->disks; i--; ) {
28211e6d690bSSong Liu clear_bit(R5_InJournal, &sh->dev[i].flags);
28221e6d690bSSong Liu if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
28231e6d690bSSong Liu do_wakeup = 1;
28241e6d690bSSong Liu }
28251e6d690bSSong Liu
28261e6d690bSSong Liu /*
28271e6d690bSSong Liu * analyse_stripe() runs before r5c_finish_stripe_write_out(),
28281e6d690bSSong Liu * We updated R5_InJournal, so we also update s->injournal.
28291e6d690bSSong Liu */
28301e6d690bSSong Liu s->injournal = 0;
28311e6d690bSSong Liu
28321e6d690bSSong Liu if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
28331e6d690bSSong Liu if (atomic_dec_and_test(&conf->pending_full_writes))
28341e6d690bSSong Liu md_wakeup_thread(conf->mddev->thread);
28351e6d690bSSong Liu
28361e6d690bSSong Liu if (do_wakeup)
28371e6d690bSSong Liu wake_up(&conf->wait_for_overlap);
2838a39f7afdSSong Liu
283903b047f4SSong Liu spin_lock_irq(&log->stripe_in_journal_lock);
2840a39f7afdSSong Liu list_del_init(&sh->r5c);
284103b047f4SSong Liu spin_unlock_irq(&log->stripe_in_journal_lock);
2842a39f7afdSSong Liu sh->log_start = MaxSector;
284303b047f4SSong Liu
284403b047f4SSong Liu atomic_dec(&log->stripe_in_journal_count);
284503b047f4SSong Liu r5c_update_log_state(log);
284603b047f4SSong Liu
284703b047f4SSong Liu /* stop counting this stripe in big_stripe_tree */
284803b047f4SSong Liu if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
284903b047f4SSong Liu test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
285003b047f4SSong Liu tree_index = r5c_tree_index(conf, sh->sector);
285103b047f4SSong Liu spin_lock(&log->tree_lock);
285203b047f4SSong Liu pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
285303b047f4SSong Liu tree_index);
285403b047f4SSong Liu BUG_ON(pslot == NULL);
285503b047f4SSong Liu refcount = (uintptr_t)radix_tree_deref_slot_protected(
285603b047f4SSong Liu pslot, &log->tree_lock) >>
285703b047f4SSong Liu R5C_RADIX_COUNT_SHIFT;
285803b047f4SSong Liu if (refcount == 1)
285903b047f4SSong Liu radix_tree_delete(&log->big_stripe_tree, tree_index);
286003b047f4SSong Liu else
286103b047f4SSong Liu radix_tree_replace_slot(
286203b047f4SSong Liu &log->big_stripe_tree, pslot,
286303b047f4SSong Liu (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
286403b047f4SSong Liu spin_unlock(&log->tree_lock);
286503b047f4SSong Liu }
286603b047f4SSong Liu
286703b047f4SSong Liu if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
286803b047f4SSong Liu BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
2869e33fbb9cSShaohua Li atomic_dec(&conf->r5c_flushing_partial_stripes);
287003b047f4SSong Liu atomic_dec(&conf->r5c_cached_partial_stripes);
287103b047f4SSong Liu }
287203b047f4SSong Liu
287303b047f4SSong Liu if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
287403b047f4SSong Liu BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
2875e33fbb9cSShaohua Li atomic_dec(&conf->r5c_flushing_full_stripes);
287603b047f4SSong Liu atomic_dec(&conf->r5c_cached_full_stripes);
287703b047f4SSong Liu }
2878ea17481fSSong Liu
2879ea17481fSSong Liu r5l_append_flush_payload(log, sh->sector);
28805ddf0440SSong Liu /* stripe is flused to raid disks, we can do resync now */
28815ddf0440SSong Liu if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
28825ddf0440SSong Liu set_bit(STRIPE_HANDLE, &sh->state);
28831e6d690bSSong Liu }
28841e6d690bSSong Liu
r5c_cache_data(struct r5l_log * log,struct stripe_head * sh)2885ff875738SArtur Paszkiewicz int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
28861e6d690bSSong Liu {
2887a39f7afdSSong Liu struct r5conf *conf = sh->raid_conf;
28881e6d690bSSong Liu int pages = 0;
28891e6d690bSSong Liu int reserve;
28901e6d690bSSong Liu int i;
28911e6d690bSSong Liu int ret = 0;
28921e6d690bSSong Liu
28931e6d690bSSong Liu BUG_ON(!log);
28941e6d690bSSong Liu
28951e6d690bSSong Liu for (i = 0; i < sh->disks; i++) {
28961e6d690bSSong Liu void *addr;
28971e6d690bSSong Liu
28981e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
28991e6d690bSSong Liu continue;
29001e6d690bSSong Liu addr = kmap_atomic(sh->dev[i].page);
29011e6d690bSSong Liu sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
29021e6d690bSSong Liu addr, PAGE_SIZE);
29031e6d690bSSong Liu kunmap_atomic(addr);
29041e6d690bSSong Liu pages++;
29051e6d690bSSong Liu }
29061e6d690bSSong Liu WARN_ON(pages == 0);
29071e6d690bSSong Liu
29081e6d690bSSong Liu /*
29091e6d690bSSong Liu * The stripe must enter state machine again to call endio, so
29101e6d690bSSong Liu * don't delay.
29111e6d690bSSong Liu */
29121e6d690bSSong Liu clear_bit(STRIPE_DELAYED, &sh->state);
29131e6d690bSSong Liu atomic_inc(&sh->count);
29141e6d690bSSong Liu
29151e6d690bSSong Liu mutex_lock(&log->io_mutex);
29161e6d690bSSong Liu /* meta + data */
29171e6d690bSSong Liu reserve = (1 + pages) << (PAGE_SHIFT - 9);
29181e6d690bSSong Liu
2919a39f7afdSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2920a39f7afdSSong Liu sh->log_start == MaxSector)
2921a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
2922a39f7afdSSong Liu else if (!r5l_has_free_space(log, reserve)) {
2923a39f7afdSSong Liu if (sh->log_start == log->last_checkpoint)
2924a39f7afdSSong Liu BUG();
2925a39f7afdSSong Liu else
2926a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
29271e6d690bSSong Liu } else {
29281e6d690bSSong Liu ret = r5l_log_stripe(log, sh, pages, 0);
29291e6d690bSSong Liu if (ret) {
29301e6d690bSSong Liu spin_lock_irq(&log->io_list_lock);
29311e6d690bSSong Liu list_add_tail(&sh->log_list, &log->no_mem_stripes);
29321e6d690bSSong Liu spin_unlock_irq(&log->io_list_lock);
29331e6d690bSSong Liu }
29341e6d690bSSong Liu }
29351e6d690bSSong Liu
29361e6d690bSSong Liu mutex_unlock(&log->io_mutex);
29371e6d690bSSong Liu return 0;
2938f6bed0efSShaohua Li }
2939f6bed0efSShaohua Li
294003b047f4SSong Liu /* check whether this big stripe is in write back cache. */
r5c_big_stripe_cached(struct r5conf * conf,sector_t sect)294103b047f4SSong Liu bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
294203b047f4SSong Liu {
2943*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
294403b047f4SSong Liu sector_t tree_index;
294503b047f4SSong Liu void *slot;
294603b047f4SSong Liu
294703b047f4SSong Liu if (!log)
294803b047f4SSong Liu return false;
294903b047f4SSong Liu
295003b047f4SSong Liu WARN_ON_ONCE(!rcu_read_lock_held());
295103b047f4SSong Liu tree_index = r5c_tree_index(conf, sect);
295203b047f4SSong Liu slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
295303b047f4SSong Liu return slot != NULL;
295403b047f4SSong Liu }
295503b047f4SSong Liu
r5l_load_log(struct r5l_log * log)2956f6bed0efSShaohua Li static int r5l_load_log(struct r5l_log *log)
2957f6bed0efSShaohua Li {
2958f6bed0efSShaohua Li struct md_rdev *rdev = log->rdev;
2959f6bed0efSShaohua Li struct page *page;
2960f6bed0efSShaohua Li struct r5l_meta_block *mb;
2961f6bed0efSShaohua Li sector_t cp = log->rdev->journal_tail;
2962f6bed0efSShaohua Li u32 stored_crc, expected_crc;
2963f6bed0efSShaohua Li bool create_super = false;
2964d30dfeb9SJackieLiu int ret = 0;
2965f6bed0efSShaohua Li
2966f6bed0efSShaohua Li /* Make sure it's valid */
2967f6bed0efSShaohua Li if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2968f6bed0efSShaohua Li cp = 0;
2969f6bed0efSShaohua Li page = alloc_page(GFP_KERNEL);
2970f6bed0efSShaohua Li if (!page)
2971f6bed0efSShaohua Li return -ENOMEM;
2972f6bed0efSShaohua Li
29734ce4c73fSBart Van Assche if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
2974f6bed0efSShaohua Li ret = -EIO;
2975f6bed0efSShaohua Li goto ioerr;
2976f6bed0efSShaohua Li }
2977f6bed0efSShaohua Li mb = page_address(page);
2978f6bed0efSShaohua Li
2979f6bed0efSShaohua Li if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2980f6bed0efSShaohua Li mb->version != R5LOG_VERSION) {
2981f6bed0efSShaohua Li create_super = true;
2982f6bed0efSShaohua Li goto create;
2983f6bed0efSShaohua Li }
2984f6bed0efSShaohua Li stored_crc = le32_to_cpu(mb->checksum);
2985f6bed0efSShaohua Li mb->checksum = 0;
29865cb2fbd6SShaohua Li expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2987f6bed0efSShaohua Li if (stored_crc != expected_crc) {
2988f6bed0efSShaohua Li create_super = true;
2989f6bed0efSShaohua Li goto create;
2990f6bed0efSShaohua Li }
2991f6bed0efSShaohua Li if (le64_to_cpu(mb->position) != cp) {
2992f6bed0efSShaohua Li create_super = true;
2993f6bed0efSShaohua Li goto create;
2994f6bed0efSShaohua Li }
2995f6bed0efSShaohua Li create:
2996f6bed0efSShaohua Li if (create_super) {
2997a251c17aSJason A. Donenfeld log->last_cp_seq = get_random_u32();
2998f6bed0efSShaohua Li cp = 0;
299956056c2eSZhengyuan Liu r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
3000f6bed0efSShaohua Li /*
3001f6bed0efSShaohua Li * Make sure super points to correct address. Log might have
3002f6bed0efSShaohua Li * data very soon. If super hasn't correct log tail address,
3003f6bed0efSShaohua Li * recovery can't find the log
3004f6bed0efSShaohua Li */
3005f6bed0efSShaohua Li r5l_write_super(log, cp);
3006f6bed0efSShaohua Li } else
3007f6bed0efSShaohua Li log->last_cp_seq = le64_to_cpu(mb->seq);
3008f6bed0efSShaohua Li
3009f6bed0efSShaohua Li log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
30100576b1c6SShaohua Li log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
30110576b1c6SShaohua Li if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
30120576b1c6SShaohua Li log->max_free_space = RECLAIM_MAX_FREE_SPACE;
3013f6bed0efSShaohua Li log->last_checkpoint = cp;
3014f6bed0efSShaohua Li
3015f6bed0efSShaohua Li __free_page(page);
3016f6bed0efSShaohua Li
3017d30dfeb9SJackieLiu if (create_super) {
3018d30dfeb9SJackieLiu log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3019d30dfeb9SJackieLiu log->seq = log->last_cp_seq + 1;
3020d30dfeb9SJackieLiu log->next_checkpoint = cp;
3021d30dfeb9SJackieLiu } else
30223d7e7e1dSZhengyuan Liu ret = r5l_recovery_log(log);
3023d30dfeb9SJackieLiu
30243d7e7e1dSZhengyuan Liu r5c_update_log_state(log);
30253d7e7e1dSZhengyuan Liu return ret;
3026f6bed0efSShaohua Li ioerr:
3027f6bed0efSShaohua Li __free_page(page);
3028f6bed0efSShaohua Li return ret;
3029f6bed0efSShaohua Li }
3030f6bed0efSShaohua Li
r5l_start(struct r5l_log * log)3031d5d885fdSSong Liu int r5l_start(struct r5l_log *log)
3032d5d885fdSSong Liu {
3033d5d885fdSSong Liu int ret;
3034d5d885fdSSong Liu
3035d5d885fdSSong Liu if (!log)
3036d5d885fdSSong Liu return 0;
3037d5d885fdSSong Liu
3038d5d885fdSSong Liu ret = r5l_load_log(log);
3039d5d885fdSSong Liu if (ret) {
3040d5d885fdSSong Liu struct mddev *mddev = log->rdev->mddev;
3041d5d885fdSSong Liu struct r5conf *conf = mddev->private;
3042d5d885fdSSong Liu
3043d5d885fdSSong Liu r5l_exit_log(conf);
3044d5d885fdSSong Liu }
3045d5d885fdSSong Liu return ret;
3046d5d885fdSSong Liu }
3047d5d885fdSSong Liu
r5c_update_on_rdev_error(struct mddev * mddev,struct md_rdev * rdev)304870d466f7SSong Liu void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
30492e38a37fSSong Liu {
30502e38a37fSSong Liu struct r5conf *conf = mddev->private;
3051*ebf6f517SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
30522e38a37fSSong Liu
30532e38a37fSSong Liu if (!log)
30542e38a37fSSong Liu return;
30552e38a37fSSong Liu
305670d466f7SSong Liu if ((raid5_calc_degraded(conf) > 0 ||
305770d466f7SSong Liu test_bit(Journal, &rdev->flags)) &&
3058*ebf6f517SYu Kuai log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
30592e38a37fSSong Liu schedule_work(&log->disable_writeback_work);
30602e38a37fSSong Liu }
30612e38a37fSSong Liu
r5l_init_log(struct r5conf * conf,struct md_rdev * rdev)3062f6bed0efSShaohua Li int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
3063f6bed0efSShaohua Li {
3064f6bed0efSShaohua Li struct r5l_log *log;
306544693154SYu Kuai struct md_thread *thread;
3066afeee514SKent Overstreet int ret;
3067ff875738SArtur Paszkiewicz
3068913cce5aSChristoph Hellwig pr_debug("md/raid:%s: using device %pg as journal\n",
3069913cce5aSChristoph Hellwig mdname(conf->mddev), rdev->bdev);
3070f6bed0efSShaohua Li
3071f6bed0efSShaohua Li if (PAGE_SIZE != 4096)
3072f6bed0efSShaohua Li return -EINVAL;
3073c757ec95SSong Liu
3074c757ec95SSong Liu /*
3075c757ec95SSong Liu * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3076c757ec95SSong Liu * raid_disks r5l_payload_data_parity.
3077c757ec95SSong Liu *
3078c757ec95SSong Liu * Write journal and cache does not work for very big array
3079c757ec95SSong Liu * (raid_disks > 203)
3080c757ec95SSong Liu */
3081c757ec95SSong Liu if (sizeof(struct r5l_meta_block) +
3082c757ec95SSong Liu ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3083c757ec95SSong Liu conf->raid_disks) > PAGE_SIZE) {
3084c757ec95SSong Liu pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3085c757ec95SSong Liu mdname(conf->mddev), conf->raid_disks);
3086c757ec95SSong Liu return -EINVAL;
3087c757ec95SSong Liu }
3088c757ec95SSong Liu
3089f6bed0efSShaohua Li log = kzalloc(sizeof(*log), GFP_KERNEL);
3090f6bed0efSShaohua Li if (!log)
3091f6bed0efSShaohua Li return -ENOMEM;
3092f6bed0efSShaohua Li log->rdev = rdev;
3093ad831a16SChristoph Hellwig log->need_cache_flush = bdev_write_cache(rdev->bdev);
30945cb2fbd6SShaohua Li log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3095f6bed0efSShaohua Li sizeof(rdev->mddev->uuid));
3096f6bed0efSShaohua Li
3097f6bed0efSShaohua Li mutex_init(&log->io_mutex);
3098f6bed0efSShaohua Li
3099f6bed0efSShaohua Li spin_lock_init(&log->io_list_lock);
3100f6bed0efSShaohua Li INIT_LIST_HEAD(&log->running_ios);
31010576b1c6SShaohua Li INIT_LIST_HEAD(&log->io_end_ios);
3102a8c34f91SShaohua Li INIT_LIST_HEAD(&log->flushing_ios);
310304732f74SChristoph Hellwig INIT_LIST_HEAD(&log->finished_ios);
3104f6bed0efSShaohua Li
3105f6bed0efSShaohua Li log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3106f6bed0efSShaohua Li if (!log->io_kc)
3107f6bed0efSShaohua Li goto io_kc;
3108f6bed0efSShaohua Li
3109afeee514SKent Overstreet ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3110afeee514SKent Overstreet if (ret)
31115036c390SChristoph Hellwig goto io_pool;
31125036c390SChristoph Hellwig
3113afeee514SKent Overstreet ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3114afeee514SKent Overstreet if (ret)
3115c38d29b3SChristoph Hellwig goto io_bs;
3116c38d29b3SChristoph Hellwig
3117afeee514SKent Overstreet ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3118afeee514SKent Overstreet if (ret)
3119e8deb638SChristoph Hellwig goto out_mempool;
3120e8deb638SChristoph Hellwig
312103b047f4SSong Liu spin_lock_init(&log->tree_lock);
312203b047f4SSong Liu INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
312303b047f4SSong Liu
312444693154SYu Kuai thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
312544693154SYu Kuai "reclaim");
312644693154SYu Kuai if (!thread)
31270576b1c6SShaohua Li goto reclaim_thread;
312844693154SYu Kuai
312944693154SYu Kuai thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
313044693154SYu Kuai rcu_assign_pointer(log->reclaim_thread, thread);
3131a39f7afdSSong Liu
31320fd22b45SShaohua Li init_waitqueue_head(&log->iounit_wait);
31330576b1c6SShaohua Li
31345036c390SChristoph Hellwig INIT_LIST_HEAD(&log->no_mem_stripes);
31355036c390SChristoph Hellwig
3136f6bed0efSShaohua Li INIT_LIST_HEAD(&log->no_space_stripes);
3137f6bed0efSShaohua Li spin_lock_init(&log->no_space_stripes_lock);
3138f6bed0efSShaohua Li
31393bddb7f8SSong Liu INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
31402e38a37fSSong Liu INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
31413bddb7f8SSong Liu
31422ded3703SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3143a39f7afdSSong Liu INIT_LIST_HEAD(&log->stripe_in_journal_list);
3144a39f7afdSSong Liu spin_lock_init(&log->stripe_in_journal_lock);
3145a39f7afdSSong Liu atomic_set(&log->stripe_in_journal_count, 0);
31462ded3703SSong Liu
3147*ebf6f517SYu Kuai WRITE_ONCE(conf->log, log);
3148d2250f10SSong Liu
3149a62ab49eSShaohua Li set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
3150f6bed0efSShaohua Li return 0;
3151e8deb638SChristoph Hellwig
31520576b1c6SShaohua Li reclaim_thread:
3153afeee514SKent Overstreet mempool_exit(&log->meta_pool);
3154e8deb638SChristoph Hellwig out_mempool:
3155afeee514SKent Overstreet bioset_exit(&log->bs);
3156c38d29b3SChristoph Hellwig io_bs:
3157afeee514SKent Overstreet mempool_exit(&log->io_pool);
31585036c390SChristoph Hellwig io_pool:
3159f6bed0efSShaohua Li kmem_cache_destroy(log->io_kc);
3160f6bed0efSShaohua Li io_kc:
3161f6bed0efSShaohua Li kfree(log);
3162f6bed0efSShaohua Li return -EINVAL;
3163f6bed0efSShaohua Li }
3164f6bed0efSShaohua Li
r5l_exit_log(struct r5conf * conf)3165ff875738SArtur Paszkiewicz void r5l_exit_log(struct r5conf *conf)
3166f6bed0efSShaohua Li {
3167ff875738SArtur Paszkiewicz struct r5l_log *log = conf->log;
3168ff875738SArtur Paszkiewicz
31697eb8ff02SLi Lingfeng md_unregister_thread(conf->mddev, &log->reclaim_thread);
3170b13015afSLogan Gunthorpe
3171a705b11bSYu Kuai /*
3172a705b11bSYu Kuai * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3173a705b11bSYu Kuai * ensure disable_writeback_work wakes up and exits.
3174a705b11bSYu Kuai */
3175*ebf6f517SYu Kuai WRITE_ONCE(conf->log, NULL);
3176a705b11bSYu Kuai wake_up(&conf->mddev->sb_wait);
3177a705b11bSYu Kuai flush_work(&log->disable_writeback_work);
3178b13015afSLogan Gunthorpe
3179afeee514SKent Overstreet mempool_exit(&log->meta_pool);
3180afeee514SKent Overstreet bioset_exit(&log->bs);
3181afeee514SKent Overstreet mempool_exit(&log->io_pool);
3182f6bed0efSShaohua Li kmem_cache_destroy(log->io_kc);
3183f6bed0efSShaohua Li kfree(log);
3184f6bed0efSShaohua Li }
3185