xref: /openbmc/linux/drivers/md/dm-zoned-reclaim.c (revision ba61bb17)
1 /*
2  * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-zoned.h"
8 
9 #include <linux/module.h>
10 
11 #define	DM_MSG_PREFIX		"zoned reclaim"
12 
13 struct dmz_reclaim {
14 	struct dmz_metadata     *metadata;
15 	struct dmz_dev		*dev;
16 
17 	struct delayed_work	work;
18 	struct workqueue_struct *wq;
19 
20 	struct dm_kcopyd_client	*kc;
21 	struct dm_kcopyd_throttle kc_throttle;
22 	int			kc_err;
23 
24 	unsigned long		flags;
25 
26 	/* Last target access time */
27 	unsigned long		atime;
28 };
29 
30 /*
31  * Reclaim state flags.
32  */
33 enum {
34 	DMZ_RECLAIM_KCOPY,
35 };
36 
37 /*
38  * Number of seconds of target BIO inactivity to consider the target idle.
39  */
40 #define DMZ_IDLE_PERIOD		(10UL * HZ)
41 
42 /*
43  * Percentage of unmapped (free) random zones below which reclaim starts
44  * even if the target is busy.
45  */
46 #define DMZ_RECLAIM_LOW_UNMAP_RND	30
47 
48 /*
49  * Percentage of unmapped (free) random zones above which reclaim will
50  * stop if the target is busy.
51  */
52 #define DMZ_RECLAIM_HIGH_UNMAP_RND	50
53 
54 /*
55  * Align a sequential zone write pointer to chunk_block.
56  */
57 static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
58 				sector_t block)
59 {
60 	struct dmz_metadata *zmd = zrc->metadata;
61 	sector_t wp_block = zone->wp_block;
62 	unsigned int nr_blocks;
63 	int ret;
64 
65 	if (wp_block == block)
66 		return 0;
67 
68 	if (wp_block > block)
69 		return -EIO;
70 
71 	/*
72 	 * Zeroout the space between the write
73 	 * pointer and the requested position.
74 	 */
75 	nr_blocks = block - wp_block;
76 	ret = blkdev_issue_zeroout(zrc->dev->bdev,
77 				   dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
78 				   dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
79 	if (ret) {
80 		dmz_dev_err(zrc->dev,
81 			    "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
82 			    dmz_id(zmd, zone), (unsigned long long)wp_block,
83 			    (unsigned long long)block, nr_blocks, ret);
84 		return ret;
85 	}
86 
87 	zone->wp_block = block;
88 
89 	return 0;
90 }
91 
92 /*
93  * dm_kcopyd_copy end notification.
94  */
95 static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
96 				  void *context)
97 {
98 	struct dmz_reclaim *zrc = context;
99 
100 	if (read_err || write_err)
101 		zrc->kc_err = -EIO;
102 	else
103 		zrc->kc_err = 0;
104 
105 	clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
106 	smp_mb__after_atomic();
107 	wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
108 }
109 
110 /*
111  * Copy valid blocks of src_zone into dst_zone.
112  */
113 static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
114 			    struct dm_zone *src_zone, struct dm_zone *dst_zone)
115 {
116 	struct dmz_metadata *zmd = zrc->metadata;
117 	struct dmz_dev *dev = zrc->dev;
118 	struct dm_io_region src, dst;
119 	sector_t block = 0, end_block;
120 	sector_t nr_blocks;
121 	sector_t src_zone_block;
122 	sector_t dst_zone_block;
123 	unsigned long flags = 0;
124 	int ret;
125 
126 	if (dmz_is_seq(src_zone))
127 		end_block = src_zone->wp_block;
128 	else
129 		end_block = dev->zone_nr_blocks;
130 	src_zone_block = dmz_start_block(zmd, src_zone);
131 	dst_zone_block = dmz_start_block(zmd, dst_zone);
132 
133 	if (dmz_is_seq(dst_zone))
134 		set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
135 
136 	while (block < end_block) {
137 		/* Get a valid region from the source zone */
138 		ret = dmz_first_valid_block(zmd, src_zone, &block);
139 		if (ret <= 0)
140 			return ret;
141 		nr_blocks = ret;
142 
143 		/*
144 		 * If we are writing in a sequential zone, we must make sure
145 		 * that writes are sequential. So Zeroout any eventual hole
146 		 * between writes.
147 		 */
148 		if (dmz_is_seq(dst_zone)) {
149 			ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
150 			if (ret)
151 				return ret;
152 		}
153 
154 		src.bdev = dev->bdev;
155 		src.sector = dmz_blk2sect(src_zone_block + block);
156 		src.count = dmz_blk2sect(nr_blocks);
157 
158 		dst.bdev = dev->bdev;
159 		dst.sector = dmz_blk2sect(dst_zone_block + block);
160 		dst.count = src.count;
161 
162 		/* Copy the valid region */
163 		set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
164 		ret = dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
165 				     dmz_reclaim_kcopy_end, zrc);
166 		if (ret)
167 			return ret;
168 
169 		/* Wait for copy to complete */
170 		wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
171 			       TASK_UNINTERRUPTIBLE);
172 		if (zrc->kc_err)
173 			return zrc->kc_err;
174 
175 		block += nr_blocks;
176 		if (dmz_is_seq(dst_zone))
177 			dst_zone->wp_block = block;
178 	}
179 
180 	return 0;
181 }
182 
183 /*
184  * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
185  * and free the buffer zone.
186  */
187 static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
188 {
189 	struct dm_zone *bzone = dzone->bzone;
190 	sector_t chunk_block = dzone->wp_block;
191 	struct dmz_metadata *zmd = zrc->metadata;
192 	int ret;
193 
194 	dmz_dev_debug(zrc->dev,
195 		      "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
196 		      dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
197 		      dmz_id(zmd, dzone), dmz_weight(dzone));
198 
199 	/* Flush data zone into the buffer zone */
200 	ret = dmz_reclaim_copy(zrc, bzone, dzone);
201 	if (ret < 0)
202 		return ret;
203 
204 	dmz_lock_flush(zmd);
205 
206 	/* Validate copied blocks */
207 	ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
208 	if (ret == 0) {
209 		/* Free the buffer zone */
210 		dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
211 		dmz_lock_map(zmd);
212 		dmz_unmap_zone(zmd, bzone);
213 		dmz_unlock_zone_reclaim(dzone);
214 		dmz_free_zone(zmd, bzone);
215 		dmz_unlock_map(zmd);
216 	}
217 
218 	dmz_unlock_flush(zmd);
219 
220 	return 0;
221 }
222 
223 /*
224  * Merge valid blocks of dzone into its buffer zone and free dzone.
225  */
226 static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
227 {
228 	unsigned int chunk = dzone->chunk;
229 	struct dm_zone *bzone = dzone->bzone;
230 	struct dmz_metadata *zmd = zrc->metadata;
231 	int ret = 0;
232 
233 	dmz_dev_debug(zrc->dev,
234 		      "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
235 		      chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
236 		      dmz_id(zmd, bzone), dmz_weight(bzone));
237 
238 	/* Flush data zone into the buffer zone */
239 	ret = dmz_reclaim_copy(zrc, dzone, bzone);
240 	if (ret < 0)
241 		return ret;
242 
243 	dmz_lock_flush(zmd);
244 
245 	/* Validate copied blocks */
246 	ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
247 	if (ret == 0) {
248 		/*
249 		 * Free the data zone and remap the chunk to
250 		 * the buffer zone.
251 		 */
252 		dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
253 		dmz_lock_map(zmd);
254 		dmz_unmap_zone(zmd, bzone);
255 		dmz_unmap_zone(zmd, dzone);
256 		dmz_unlock_zone_reclaim(dzone);
257 		dmz_free_zone(zmd, dzone);
258 		dmz_map_zone(zmd, bzone, chunk);
259 		dmz_unlock_map(zmd);
260 	}
261 
262 	dmz_unlock_flush(zmd);
263 
264 	return 0;
265 }
266 
267 /*
268  * Move valid blocks of the random data zone dzone into a free sequential zone.
269  * Once blocks are moved, remap the zone chunk to the sequential zone.
270  */
271 static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
272 {
273 	unsigned int chunk = dzone->chunk;
274 	struct dm_zone *szone = NULL;
275 	struct dmz_metadata *zmd = zrc->metadata;
276 	int ret;
277 
278 	/* Get a free sequential zone */
279 	dmz_lock_map(zmd);
280 	szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
281 	dmz_unlock_map(zmd);
282 	if (!szone)
283 		return -ENOSPC;
284 
285 	dmz_dev_debug(zrc->dev,
286 		      "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
287 		      chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
288 		      dmz_id(zmd, szone));
289 
290 	/* Flush the random data zone into the sequential zone */
291 	ret = dmz_reclaim_copy(zrc, dzone, szone);
292 
293 	dmz_lock_flush(zmd);
294 
295 	if (ret == 0) {
296 		/* Validate copied blocks */
297 		ret = dmz_copy_valid_blocks(zmd, dzone, szone);
298 	}
299 	if (ret) {
300 		/* Free the sequential zone */
301 		dmz_lock_map(zmd);
302 		dmz_free_zone(zmd, szone);
303 		dmz_unlock_map(zmd);
304 	} else {
305 		/* Free the data zone and remap the chunk */
306 		dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
307 		dmz_lock_map(zmd);
308 		dmz_unmap_zone(zmd, dzone);
309 		dmz_unlock_zone_reclaim(dzone);
310 		dmz_free_zone(zmd, dzone);
311 		dmz_map_zone(zmd, szone, chunk);
312 		dmz_unlock_map(zmd);
313 	}
314 
315 	dmz_unlock_flush(zmd);
316 
317 	return 0;
318 }
319 
320 /*
321  * Reclaim an empty zone.
322  */
323 static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
324 {
325 	struct dmz_metadata *zmd = zrc->metadata;
326 
327 	dmz_lock_flush(zmd);
328 	dmz_lock_map(zmd);
329 	dmz_unmap_zone(zmd, dzone);
330 	dmz_unlock_zone_reclaim(dzone);
331 	dmz_free_zone(zmd, dzone);
332 	dmz_unlock_map(zmd);
333 	dmz_unlock_flush(zmd);
334 }
335 
336 /*
337  * Find a candidate zone for reclaim and process it.
338  */
339 static void dmz_reclaim(struct dmz_reclaim *zrc)
340 {
341 	struct dmz_metadata *zmd = zrc->metadata;
342 	struct dm_zone *dzone;
343 	struct dm_zone *rzone;
344 	unsigned long start;
345 	int ret;
346 
347 	/* Get a data zone */
348 	dzone = dmz_get_zone_for_reclaim(zmd);
349 	if (!dzone)
350 		return;
351 
352 	start = jiffies;
353 
354 	if (dmz_is_rnd(dzone)) {
355 		if (!dmz_weight(dzone)) {
356 			/* Empty zone */
357 			dmz_reclaim_empty(zrc, dzone);
358 			ret = 0;
359 		} else {
360 			/*
361 			 * Reclaim the random data zone by moving its
362 			 * valid data blocks to a free sequential zone.
363 			 */
364 			ret = dmz_reclaim_rnd_data(zrc, dzone);
365 		}
366 		rzone = dzone;
367 
368 	} else {
369 		struct dm_zone *bzone = dzone->bzone;
370 		sector_t chunk_block = 0;
371 
372 		ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
373 		if (ret < 0)
374 			goto out;
375 
376 		if (ret == 0 || chunk_block >= dzone->wp_block) {
377 			/*
378 			 * The buffer zone is empty or its valid blocks are
379 			 * after the data zone write pointer.
380 			 */
381 			ret = dmz_reclaim_buf(zrc, dzone);
382 			rzone = bzone;
383 		} else {
384 			/*
385 			 * Reclaim the data zone by merging it into the
386 			 * buffer zone so that the buffer zone itself can
387 			 * be later reclaimed.
388 			 */
389 			ret = dmz_reclaim_seq_data(zrc, dzone);
390 			rzone = dzone;
391 		}
392 	}
393 out:
394 	if (ret) {
395 		dmz_unlock_zone_reclaim(dzone);
396 		return;
397 	}
398 
399 	(void) dmz_flush_metadata(zrc->metadata);
400 
401 	dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
402 		      dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
403 }
404 
405 /*
406  * Test if the target device is idle.
407  */
408 static inline int dmz_target_idle(struct dmz_reclaim *zrc)
409 {
410 	return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
411 }
412 
413 /*
414  * Test if reclaim is necessary.
415  */
416 static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
417 {
418 	struct dmz_metadata *zmd = zrc->metadata;
419 	unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
420 	unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
421 	unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
422 
423 	/* Reclaim when idle */
424 	if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
425 		return true;
426 
427 	/* If there are still plenty of random zones, do not reclaim */
428 	if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
429 		return false;
430 
431 	/*
432 	 * If the percentage of unmappped random zones is low,
433 	 * reclaim even if the target is busy.
434 	 */
435 	return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
436 }
437 
438 /*
439  * Reclaim work function.
440  */
441 static void dmz_reclaim_work(struct work_struct *work)
442 {
443 	struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
444 	struct dmz_metadata *zmd = zrc->metadata;
445 	unsigned int nr_rnd, nr_unmap_rnd;
446 	unsigned int p_unmap_rnd;
447 
448 	if (!dmz_should_reclaim(zrc)) {
449 		mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
450 		return;
451 	}
452 
453 	/*
454 	 * We need to start reclaiming random zones: set up zone copy
455 	 * throttling to either go fast if we are very low on random zones
456 	 * and slower if there are still some free random zones to avoid
457 	 * as much as possible to negatively impact the user workload.
458 	 */
459 	nr_rnd = dmz_nr_rnd_zones(zmd);
460 	nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
461 	p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
462 	if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
463 		/* Idle or very low percentage: go fast */
464 		zrc->kc_throttle.throttle = 100;
465 	} else {
466 		/* Busy but we still have some random zone: throttle */
467 		zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
468 	}
469 
470 	dmz_dev_debug(zrc->dev,
471 		      "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
472 		      zrc->kc_throttle.throttle,
473 		      (dmz_target_idle(zrc) ? "Idle" : "Busy"),
474 		      p_unmap_rnd, nr_unmap_rnd, nr_rnd);
475 
476 	dmz_reclaim(zrc);
477 
478 	dmz_schedule_reclaim(zrc);
479 }
480 
481 /*
482  * Initialize reclaim.
483  */
484 int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
485 		    struct dmz_reclaim **reclaim)
486 {
487 	struct dmz_reclaim *zrc;
488 	int ret;
489 
490 	zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
491 	if (!zrc)
492 		return -ENOMEM;
493 
494 	zrc->dev = dev;
495 	zrc->metadata = zmd;
496 	zrc->atime = jiffies;
497 
498 	/* Reclaim kcopyd client */
499 	zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
500 	if (IS_ERR(zrc->kc)) {
501 		ret = PTR_ERR(zrc->kc);
502 		zrc->kc = NULL;
503 		goto err;
504 	}
505 
506 	/* Reclaim work */
507 	INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
508 	zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
509 					  dev->name);
510 	if (!zrc->wq) {
511 		ret = -ENOMEM;
512 		goto err;
513 	}
514 
515 	*reclaim = zrc;
516 	queue_delayed_work(zrc->wq, &zrc->work, 0);
517 
518 	return 0;
519 err:
520 	if (zrc->kc)
521 		dm_kcopyd_client_destroy(zrc->kc);
522 	kfree(zrc);
523 
524 	return ret;
525 }
526 
527 /*
528  * Terminate reclaim.
529  */
530 void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
531 {
532 	cancel_delayed_work_sync(&zrc->work);
533 	destroy_workqueue(zrc->wq);
534 	dm_kcopyd_client_destroy(zrc->kc);
535 	kfree(zrc);
536 }
537 
538 /*
539  * Suspend reclaim.
540  */
541 void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
542 {
543 	cancel_delayed_work_sync(&zrc->work);
544 }
545 
546 /*
547  * Resume reclaim.
548  */
549 void dmz_resume_reclaim(struct dmz_reclaim *zrc)
550 {
551 	queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
552 }
553 
554 /*
555  * BIO accounting.
556  */
557 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
558 {
559 	zrc->atime = jiffies;
560 }
561 
562 /*
563  * Start reclaim if necessary.
564  */
565 void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
566 {
567 	if (dmz_should_reclaim(zrc))
568 		mod_delayed_work(zrc->wq, &zrc->work, 0);
569 }
570 
571