1 /* 2 * Moving/copying garbage collector 3 * 4 * Copyright 2012 Google, Inc. 5 */ 6 7 #include "bcache.h" 8 #include "btree.h" 9 #include "debug.h" 10 #include "request.h" 11 12 #include <trace/events/bcache.h> 13 14 struct moving_io { 15 struct closure cl; 16 struct keybuf_key *w; 17 struct data_insert_op op; 18 struct bbio bio; 19 }; 20 21 static bool moving_pred(struct keybuf *buf, struct bkey *k) 22 { 23 struct cache_set *c = container_of(buf, struct cache_set, 24 moving_gc_keys); 25 unsigned i; 26 27 for (i = 0; i < KEY_PTRS(k); i++) { 28 struct cache *ca = PTR_CACHE(c, k, i); 29 struct bucket *g = PTR_BUCKET(c, k, i); 30 31 if (GC_SECTORS_USED(g) < ca->gc_move_threshold) 32 return true; 33 } 34 35 return false; 36 } 37 38 /* Moving GC - IO loop */ 39 40 static void moving_io_destructor(struct closure *cl) 41 { 42 struct moving_io *io = container_of(cl, struct moving_io, cl); 43 kfree(io); 44 } 45 46 static void write_moving_finish(struct closure *cl) 47 { 48 struct moving_io *io = container_of(cl, struct moving_io, cl); 49 struct bio *bio = &io->bio.bio; 50 struct bio_vec *bv; 51 int i; 52 53 bio_for_each_segment_all(bv, bio, i) 54 __free_page(bv->bv_page); 55 56 if (io->op.replace_collision) 57 trace_bcache_gc_copy_collision(&io->w->key); 58 59 bch_keybuf_del(&io->op.c->moving_gc_keys, io->w); 60 61 up(&io->op.c->moving_in_flight); 62 63 closure_return_with_destructor(cl, moving_io_destructor); 64 } 65 66 static void read_moving_endio(struct bio *bio, int error) 67 { 68 struct moving_io *io = container_of(bio->bi_private, 69 struct moving_io, cl); 70 71 if (error) 72 io->op.error = error; 73 74 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); 75 } 76 77 static void moving_init(struct moving_io *io) 78 { 79 struct bio *bio = &io->bio.bio; 80 81 bio_init(bio); 82 bio_get(bio); 83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 84 85 bio->bi_size = KEY_SIZE(&io->w->key) << 9; 86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), 87 PAGE_SECTORS); 88 bio->bi_private = &io->cl; 89 bio->bi_io_vec = bio->bi_inline_vecs; 90 bch_bio_map(bio, NULL); 91 } 92 93 static void write_moving(struct closure *cl) 94 { 95 struct moving_io *io = container_of(cl, struct moving_io, cl); 96 struct data_insert_op *op = &io->op; 97 98 if (!op->error) { 99 moving_init(io); 100 101 io->bio.bio.bi_sector = KEY_START(&io->w->key); 102 op->write_prio = 1; 103 op->bio = &io->bio.bio; 104 105 op->writeback = KEY_DIRTY(&io->w->key); 106 op->csum = KEY_CSUM(&io->w->key); 107 108 bkey_copy(&op->replace_key, &io->w->key); 109 op->replace = true; 110 111 closure_call(&op->cl, bch_data_insert, NULL, cl); 112 } 113 114 continue_at(cl, write_moving_finish, system_wq); 115 } 116 117 static void read_moving_submit(struct closure *cl) 118 { 119 struct moving_io *io = container_of(cl, struct moving_io, cl); 120 struct bio *bio = &io->bio.bio; 121 122 bch_submit_bbio(bio, io->op.c, &io->w->key, 0); 123 124 continue_at(cl, write_moving, system_wq); 125 } 126 127 static void read_moving(struct cache_set *c) 128 { 129 struct keybuf_key *w; 130 struct moving_io *io; 131 struct bio *bio; 132 struct closure cl; 133 134 closure_init_stack(&cl); 135 136 /* XXX: if we error, background writeback could stall indefinitely */ 137 138 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) { 139 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, 140 &MAX_KEY, moving_pred); 141 if (!w) 142 break; 143 144 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) 145 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 146 GFP_KERNEL); 147 if (!io) 148 goto err; 149 150 w->private = io; 151 io->w = w; 152 io->op.inode = KEY_INODE(&w->key); 153 io->op.c = c; 154 155 moving_init(io); 156 bio = &io->bio.bio; 157 158 bio->bi_rw = READ; 159 bio->bi_end_io = read_moving_endio; 160 161 if (bio_alloc_pages(bio, GFP_KERNEL)) 162 goto err; 163 164 trace_bcache_gc_copy(&w->key); 165 166 down(&c->moving_in_flight); 167 closure_call(&io->cl, read_moving_submit, NULL, &cl); 168 } 169 170 if (0) { 171 err: if (!IS_ERR_OR_NULL(w->private)) 172 kfree(w->private); 173 174 bch_keybuf_del(&c->moving_gc_keys, w); 175 } 176 177 closure_sync(&cl); 178 } 179 180 static bool bucket_cmp(struct bucket *l, struct bucket *r) 181 { 182 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r); 183 } 184 185 static unsigned bucket_heap_top(struct cache *ca) 186 { 187 return GC_SECTORS_USED(heap_peek(&ca->heap)); 188 } 189 190 void bch_moving_gc(struct cache_set *c) 191 { 192 struct cache *ca; 193 struct bucket *b; 194 unsigned i; 195 196 if (!c->copy_gc_enabled) 197 return; 198 199 mutex_lock(&c->bucket_lock); 200 201 for_each_cache(ca, c, i) { 202 unsigned sectors_to_move = 0; 203 unsigned reserve_sectors = ca->sb.bucket_size * 204 min(fifo_used(&ca->free), ca->free.size / 2); 205 206 ca->heap.used = 0; 207 208 for_each_bucket(b, ca) { 209 if (!GC_SECTORS_USED(b)) 210 continue; 211 212 if (!heap_full(&ca->heap)) { 213 sectors_to_move += GC_SECTORS_USED(b); 214 heap_add(&ca->heap, b, bucket_cmp); 215 } else if (bucket_cmp(b, heap_peek(&ca->heap))) { 216 sectors_to_move -= bucket_heap_top(ca); 217 sectors_to_move += GC_SECTORS_USED(b); 218 219 ca->heap.data[0] = b; 220 heap_sift(&ca->heap, 0, bucket_cmp); 221 } 222 } 223 224 while (sectors_to_move > reserve_sectors) { 225 heap_pop(&ca->heap, b, bucket_cmp); 226 sectors_to_move -= GC_SECTORS_USED(b); 227 } 228 229 ca->gc_move_threshold = bucket_heap_top(ca); 230 231 pr_debug("threshold %u", ca->gc_move_threshold); 232 } 233 234 mutex_unlock(&c->bucket_lock); 235 236 c->moving_gc_keys.last_scanned = ZERO_KEY; 237 238 read_moving(c); 239 } 240 241 void bch_moving_init_cache_set(struct cache_set *c) 242 { 243 bch_keybuf_init(&c->moving_gc_keys); 244 sema_init(&c->moving_in_flight, 64); 245 } 246