request.c (f26e8817b235d8764363bffcc9cbfc61867371f2) request.c (be628be09563f8f6e81929efbd7cf3f45c344416)
1/*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8

--- 182 unchanged lines hidden (view full) ---

191 bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
192}
193
194static void bch_data_insert_start(struct closure *cl)
195{
196 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
197 struct bio *bio = op->bio, *n;
198
1/*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8

--- 182 unchanged lines hidden (view full) ---

191 bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
192}
193
194static void bch_data_insert_start(struct closure *cl)
195{
196 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
197 struct bio *bio = op->bio, *n;
198
199 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
200 set_gc_sectors(op->c);
199 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
201 wake_up_gc(op->c);
200 wake_up_gc(op->c);
202 }
203
204 if (op->bypass)
205 return bch_data_invalidate(cl);
206
207 /*
208 * Journal writes are marked REQ_PREFLUSH; if the original write was a
209 * flush, it'll wait on the journal write.
210 */

--- 188 unchanged lines hidden (view full) ---

399 goto rescale;
400 }
401
402 if (!congested && !dc->sequential_cutoff)
403 goto rescale;
404
405 if (!congested &&
406 mode == CACHE_MODE_WRITEBACK &&
201
202 if (op->bypass)
203 return bch_data_invalidate(cl);
204
205 /*
206 * Journal writes are marked REQ_PREFLUSH; if the original write was a
207 * flush, it'll wait on the journal write.
208 */

--- 188 unchanged lines hidden (view full) ---

397 goto rescale;
398 }
399
400 if (!congested && !dc->sequential_cutoff)
401 goto rescale;
402
403 if (!congested &&
404 mode == CACHE_MODE_WRITEBACK &&
407 op_is_write(bio_op(bio)) &&
408 (bio->bi_opf & REQ_SYNC))
405 op_is_write(bio->bi_opf) &&
406 op_is_sync(bio->bi_opf))
409 goto rescale;
410
411 spin_lock(&dc->io_lock);
412
413 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
414 if (i->last == bio->bi_iter.bi_sector &&
415 time_before(jiffies, i->jiffies))
416 goto found;

--- 201 unchanged lines hidden (view full) ---

618 s->orig_bio = NULL;
619 }
620}
621
622static void do_bio_hook(struct search *s, struct bio *orig_bio)
623{
624 struct bio *bio = &s->bio.bio;
625
407 goto rescale;
408
409 spin_lock(&dc->io_lock);
410
411 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
412 if (i->last == bio->bi_iter.bi_sector &&
413 time_before(jiffies, i->jiffies))
414 goto found;

--- 201 unchanged lines hidden (view full) ---

616 s->orig_bio = NULL;
617 }
618}
619
620static void do_bio_hook(struct search *s, struct bio *orig_bio)
621{
622 struct bio *bio = &s->bio.bio;
623
626 bio_init(bio);
624 bio_init(bio, NULL, 0);
627 __bio_clone_fast(bio, orig_bio);
628 bio->bi_end_io = request_endio;
629 bio->bi_private = &s->cl;
630
631 bio_cnt_set(bio, 3);
632}
633
634static void search_free(struct closure *cl)

--- 54 unchanged lines hidden (view full) ---

689
690static void cached_dev_cache_miss_done(struct closure *cl)
691{
692 struct search *s = container_of(cl, struct search, cl);
693
694 if (s->iop.replace_collision)
695 bch_mark_cache_miss_collision(s->iop.c, s->d);
696
625 __bio_clone_fast(bio, orig_bio);
626 bio->bi_end_io = request_endio;
627 bio->bi_private = &s->cl;
628
629 bio_cnt_set(bio, 3);
630}
631
632static void search_free(struct closure *cl)

--- 54 unchanged lines hidden (view full) ---

687
688static void cached_dev_cache_miss_done(struct closure *cl)
689{
690 struct search *s = container_of(cl, struct search, cl);
691
692 if (s->iop.replace_collision)
693 bch_mark_cache_miss_collision(s->iop.c, s->d);
694
697 if (s->iop.bio) {
698 int i;
699 struct bio_vec *bv;
695 if (s->iop.bio)
696 bio_free_pages(s->iop.bio);
700
697
701 bio_for_each_segment_all(bv, s->iop.bio, i)
702 __free_page(bv->bv_page);
703 }
704
705 cached_dev_bio_complete(cl);
706}
707
708static void cached_dev_read_error(struct closure *cl)
709{
710 struct search *s = container_of(cl, struct search, cl);
711 struct bio *bio = &s->bio.bio;
712

--- 210 unchanged lines hidden (view full) ---

923 if (bio->bi_opf & REQ_PREFLUSH) {
924 /* Also need to send a flush to the backing device */
925 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
926 dc->disk.bio_split);
927
928 flush->bi_bdev = bio->bi_bdev;
929 flush->bi_end_io = request_endio;
930 flush->bi_private = cl;
698 cached_dev_bio_complete(cl);
699}
700
701static void cached_dev_read_error(struct closure *cl)
702{
703 struct search *s = container_of(cl, struct search, cl);
704 struct bio *bio = &s->bio.bio;
705

--- 210 unchanged lines hidden (view full) ---

916 if (bio->bi_opf & REQ_PREFLUSH) {
917 /* Also need to send a flush to the backing device */
918 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
919 dc->disk.bio_split);
920
921 flush->bi_bdev = bio->bi_bdev;
922 flush->bi_end_io = request_endio;
923 flush->bi_private = cl;
931 bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
924 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
932
933 closure_bio_submit(flush, cl);
934 }
935 } else {
936 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
937
938 closure_bio_submit(bio, cl);
939 }

--- 225 unchanged lines hidden ---
925
926 closure_bio_submit(flush, cl);
927 }
928 } else {
929 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
930
931 closure_bio_submit(bio, cl);
932 }

--- 225 unchanged lines hidden ---