xref: /openbmc/linux/drivers/md/dm-zoned-reclaim.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-zoned.h"
9 
10 #include <linux/module.h>
11 
12 #define	DM_MSG_PREFIX		"zoned reclaim"
13 
14 struct dmz_reclaim {
15 	struct dmz_metadata     *metadata;
16 
17 	struct delayed_work	work;
18 	struct workqueue_struct *wq;
19 
20 	struct dm_kcopyd_client	*kc;
21 	struct dm_kcopyd_throttle kc_throttle;
22 	int			kc_err;
23 
24 	int			dev_idx;
25 
26 	unsigned long		flags;
27 
28 	/* Last target access time */
29 	unsigned long		atime;
30 };
31 
32 /*
33  * Reclaim state flags.
34  */
35 enum {
36 	DMZ_RECLAIM_KCOPY,
37 };
38 
39 /*
40  * Number of seconds of target BIO inactivity to consider the target idle.
41  */
42 #define DMZ_IDLE_PERIOD			(10UL * HZ)
43 
44 /*
45  * Percentage of unmapped (free) random zones below which reclaim starts
46  * even if the target is busy.
47  */
48 #define DMZ_RECLAIM_LOW_UNMAP_ZONES	30
49 
50 /*
51  * Percentage of unmapped (free) random zones above which reclaim will
52  * stop if the target is busy.
53  */
54 #define DMZ_RECLAIM_HIGH_UNMAP_ZONES	50
55 
56 /*
57  * Align a sequential zone write pointer to chunk_block.
58  */
59 static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
60 				sector_t block)
61 {
62 	struct dmz_metadata *zmd = zrc->metadata;
63 	struct dmz_dev *dev = zone->dev;
64 	sector_t wp_block = zone->wp_block;
65 	unsigned int nr_blocks;
66 	int ret;
67 
68 	if (wp_block == block)
69 		return 0;
70 
71 	if (wp_block > block)
72 		return -EIO;
73 
74 	/*
75 	 * Zeroout the space between the write
76 	 * pointer and the requested position.
77 	 */
78 	nr_blocks = block - wp_block;
79 	ret = blkdev_issue_zeroout(dev->bdev,
80 				   dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
81 				   dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
82 	if (ret) {
83 		dmz_dev_err(dev,
84 			    "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
85 			    zone->id, (unsigned long long)wp_block,
86 			    (unsigned long long)block, nr_blocks, ret);
87 		dmz_check_bdev(dev);
88 		return ret;
89 	}
90 
91 	zone->wp_block = block;
92 
93 	return 0;
94 }
95 
96 /*
97  * dm_kcopyd_copy end notification.
98  */
99 static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
100 				  void *context)
101 {
102 	struct dmz_reclaim *zrc = context;
103 
104 	if (read_err || write_err)
105 		zrc->kc_err = -EIO;
106 	else
107 		zrc->kc_err = 0;
108 
109 	clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
110 	smp_mb__after_atomic();
111 	wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
112 }
113 
114 /*
115  * Copy valid blocks of src_zone into dst_zone.
116  */
117 static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
118 			    struct dm_zone *src_zone, struct dm_zone *dst_zone)
119 {
120 	struct dmz_metadata *zmd = zrc->metadata;
121 	struct dm_io_region src, dst;
122 	sector_t block = 0, end_block;
123 	sector_t nr_blocks;
124 	sector_t src_zone_block;
125 	sector_t dst_zone_block;
126 	unsigned long flags = 0;
127 	int ret;
128 
129 	if (dmz_is_seq(src_zone))
130 		end_block = src_zone->wp_block;
131 	else
132 		end_block = dmz_zone_nr_blocks(zmd);
133 	src_zone_block = dmz_start_block(zmd, src_zone);
134 	dst_zone_block = dmz_start_block(zmd, dst_zone);
135 
136 	if (dmz_is_seq(dst_zone))
137 		set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
138 
139 	while (block < end_block) {
140 		if (src_zone->dev->flags & DMZ_BDEV_DYING)
141 			return -EIO;
142 		if (dst_zone->dev->flags & DMZ_BDEV_DYING)
143 			return -EIO;
144 
145 		if (dmz_reclaim_should_terminate(src_zone))
146 			return -EINTR;
147 
148 		/* Get a valid region from the source zone */
149 		ret = dmz_first_valid_block(zmd, src_zone, &block);
150 		if (ret <= 0)
151 			return ret;
152 		nr_blocks = ret;
153 
154 		/*
155 		 * If we are writing in a sequential zone, we must make sure
156 		 * that writes are sequential. So Zeroout any eventual hole
157 		 * between writes.
158 		 */
159 		if (dmz_is_seq(dst_zone)) {
160 			ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
161 			if (ret)
162 				return ret;
163 		}
164 
165 		src.bdev = src_zone->dev->bdev;
166 		src.sector = dmz_blk2sect(src_zone_block + block);
167 		src.count = dmz_blk2sect(nr_blocks);
168 
169 		dst.bdev = dst_zone->dev->bdev;
170 		dst.sector = dmz_blk2sect(dst_zone_block + block);
171 		dst.count = src.count;
172 
173 		/* Copy the valid region */
174 		set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
175 		dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
176 			       dmz_reclaim_kcopy_end, zrc);
177 
178 		/* Wait for copy to complete */
179 		wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
180 			       TASK_UNINTERRUPTIBLE);
181 		if (zrc->kc_err)
182 			return zrc->kc_err;
183 
184 		block += nr_blocks;
185 		if (dmz_is_seq(dst_zone))
186 			dst_zone->wp_block = block;
187 	}
188 
189 	return 0;
190 }
191 
192 /*
193  * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
194  * and free the buffer zone.
195  */
196 static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
197 {
198 	struct dm_zone *bzone = dzone->bzone;
199 	sector_t chunk_block = dzone->wp_block;
200 	struct dmz_metadata *zmd = zrc->metadata;
201 	int ret;
202 
203 	DMDEBUG("(%s/%u): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
204 		dmz_metadata_label(zmd), zrc->dev_idx,
205 		dzone->chunk, bzone->id, dmz_weight(bzone),
206 		dzone->id, dmz_weight(dzone));
207 
208 	/* Flush data zone into the buffer zone */
209 	ret = dmz_reclaim_copy(zrc, bzone, dzone);
210 	if (ret < 0)
211 		return ret;
212 
213 	dmz_lock_flush(zmd);
214 
215 	/* Validate copied blocks */
216 	ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
217 	if (ret == 0) {
218 		/* Free the buffer zone */
219 		dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
220 		dmz_lock_map(zmd);
221 		dmz_unmap_zone(zmd, bzone);
222 		dmz_unlock_zone_reclaim(dzone);
223 		dmz_free_zone(zmd, bzone);
224 		dmz_unlock_map(zmd);
225 	}
226 
227 	dmz_unlock_flush(zmd);
228 
229 	return ret;
230 }
231 
232 /*
233  * Merge valid blocks of dzone into its buffer zone and free dzone.
234  */
235 static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
236 {
237 	unsigned int chunk = dzone->chunk;
238 	struct dm_zone *bzone = dzone->bzone;
239 	struct dmz_metadata *zmd = zrc->metadata;
240 	int ret = 0;
241 
242 	DMDEBUG("(%s/%u): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
243 		dmz_metadata_label(zmd), zrc->dev_idx,
244 		chunk, dzone->id, dmz_weight(dzone),
245 		bzone->id, dmz_weight(bzone));
246 
247 	/* Flush data zone into the buffer zone */
248 	ret = dmz_reclaim_copy(zrc, dzone, bzone);
249 	if (ret < 0)
250 		return ret;
251 
252 	dmz_lock_flush(zmd);
253 
254 	/* Validate copied blocks */
255 	ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
256 	if (ret == 0) {
257 		/*
258 		 * Free the data zone and remap the chunk to
259 		 * the buffer zone.
260 		 */
261 		dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
262 		dmz_lock_map(zmd);
263 		dmz_unmap_zone(zmd, bzone);
264 		dmz_unmap_zone(zmd, dzone);
265 		dmz_unlock_zone_reclaim(dzone);
266 		dmz_free_zone(zmd, dzone);
267 		dmz_map_zone(zmd, bzone, chunk);
268 		dmz_unlock_map(zmd);
269 	}
270 
271 	dmz_unlock_flush(zmd);
272 
273 	return ret;
274 }
275 
276 /*
277  * Move valid blocks of the random data zone dzone into a free sequential zone.
278  * Once blocks are moved, remap the zone chunk to the sequential zone.
279  */
280 static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
281 {
282 	unsigned int chunk = dzone->chunk;
283 	struct dm_zone *szone = NULL;
284 	struct dmz_metadata *zmd = zrc->metadata;
285 	int ret;
286 	int alloc_flags = DMZ_ALLOC_SEQ;
287 
288 	/* Get a free random or sequential zone */
289 	dmz_lock_map(zmd);
290 again:
291 	szone = dmz_alloc_zone(zmd, zrc->dev_idx,
292 			       alloc_flags | DMZ_ALLOC_RECLAIM);
293 	if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) {
294 		alloc_flags = DMZ_ALLOC_RND;
295 		goto again;
296 	}
297 	dmz_unlock_map(zmd);
298 	if (!szone)
299 		return -ENOSPC;
300 
301 	DMDEBUG("(%s/%u): Chunk %u, move %s zone %u (weight %u) to %s zone %u",
302 		dmz_metadata_label(zmd), zrc->dev_idx, chunk,
303 		dmz_is_cache(dzone) ? "cache" : "rnd",
304 		dzone->id, dmz_weight(dzone),
305 		dmz_is_rnd(szone) ? "rnd" : "seq", szone->id);
306 
307 	/* Flush the random data zone into the sequential zone */
308 	ret = dmz_reclaim_copy(zrc, dzone, szone);
309 
310 	dmz_lock_flush(zmd);
311 
312 	if (ret == 0) {
313 		/* Validate copied blocks */
314 		ret = dmz_copy_valid_blocks(zmd, dzone, szone);
315 	}
316 	if (ret) {
317 		/* Free the sequential zone */
318 		dmz_lock_map(zmd);
319 		dmz_free_zone(zmd, szone);
320 		dmz_unlock_map(zmd);
321 	} else {
322 		/* Free the data zone and remap the chunk */
323 		dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
324 		dmz_lock_map(zmd);
325 		dmz_unmap_zone(zmd, dzone);
326 		dmz_unlock_zone_reclaim(dzone);
327 		dmz_free_zone(zmd, dzone);
328 		dmz_map_zone(zmd, szone, chunk);
329 		dmz_unlock_map(zmd);
330 	}
331 
332 	dmz_unlock_flush(zmd);
333 
334 	return ret;
335 }
336 
337 /*
338  * Reclaim an empty zone.
339  */
340 static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
341 {
342 	struct dmz_metadata *zmd = zrc->metadata;
343 
344 	dmz_lock_flush(zmd);
345 	dmz_lock_map(zmd);
346 	dmz_unmap_zone(zmd, dzone);
347 	dmz_unlock_zone_reclaim(dzone);
348 	dmz_free_zone(zmd, dzone);
349 	dmz_unlock_map(zmd);
350 	dmz_unlock_flush(zmd);
351 }
352 
353 /*
354  * Test if the target device is idle.
355  */
356 static inline int dmz_target_idle(struct dmz_reclaim *zrc)
357 {
358 	return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
359 }
360 
361 /*
362  * Find a candidate zone for reclaim and process it.
363  */
364 static int dmz_do_reclaim(struct dmz_reclaim *zrc)
365 {
366 	struct dmz_metadata *zmd = zrc->metadata;
367 	struct dm_zone *dzone;
368 	struct dm_zone *rzone;
369 	unsigned long start;
370 	int ret;
371 
372 	/* Get a data zone */
373 	dzone = dmz_get_zone_for_reclaim(zmd, zrc->dev_idx,
374 					 dmz_target_idle(zrc));
375 	if (!dzone) {
376 		DMDEBUG("(%s/%u): No zone found to reclaim",
377 			dmz_metadata_label(zmd), zrc->dev_idx);
378 		return -EBUSY;
379 	}
380 
381 	start = jiffies;
382 	if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
383 		if (!dmz_weight(dzone)) {
384 			/* Empty zone */
385 			dmz_reclaim_empty(zrc, dzone);
386 			ret = 0;
387 		} else {
388 			/*
389 			 * Reclaim the random data zone by moving its
390 			 * valid data blocks to a free sequential zone.
391 			 */
392 			ret = dmz_reclaim_rnd_data(zrc, dzone);
393 		}
394 		rzone = dzone;
395 
396 	} else {
397 		struct dm_zone *bzone = dzone->bzone;
398 		sector_t chunk_block = 0;
399 
400 		ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
401 		if (ret < 0)
402 			goto out;
403 
404 		if (ret == 0 || chunk_block >= dzone->wp_block) {
405 			/*
406 			 * The buffer zone is empty or its valid blocks are
407 			 * after the data zone write pointer.
408 			 */
409 			ret = dmz_reclaim_buf(zrc, dzone);
410 			rzone = bzone;
411 		} else {
412 			/*
413 			 * Reclaim the data zone by merging it into the
414 			 * buffer zone so that the buffer zone itself can
415 			 * be later reclaimed.
416 			 */
417 			ret = dmz_reclaim_seq_data(zrc, dzone);
418 			rzone = dzone;
419 		}
420 	}
421 out:
422 	if (ret) {
423 		if (ret == -EINTR)
424 			DMDEBUG("(%s/%u): reclaim zone %u interrupted",
425 				dmz_metadata_label(zmd), zrc->dev_idx,
426 				rzone->id);
427 		else
428 			DMDEBUG("(%s/%u): Failed to reclaim zone %u, err %d",
429 				dmz_metadata_label(zmd), zrc->dev_idx,
430 				rzone->id, ret);
431 		dmz_unlock_zone_reclaim(dzone);
432 		return ret;
433 	}
434 
435 	ret = dmz_flush_metadata(zrc->metadata);
436 	if (ret) {
437 		DMDEBUG("(%s/%u): Metadata flush for zone %u failed, err %d",
438 			dmz_metadata_label(zmd), zrc->dev_idx, rzone->id, ret);
439 		return ret;
440 	}
441 
442 	DMDEBUG("(%s/%u): Reclaimed zone %u in %u ms",
443 		dmz_metadata_label(zmd), zrc->dev_idx,
444 		rzone->id, jiffies_to_msecs(jiffies - start));
445 	return 0;
446 }
447 
448 static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
449 {
450 	struct dmz_metadata *zmd = zrc->metadata;
451 	unsigned int nr_cache = dmz_nr_cache_zones(zmd);
452 	unsigned int nr_unmap, nr_zones;
453 
454 	if (nr_cache) {
455 		nr_zones = nr_cache;
456 		nr_unmap = dmz_nr_unmap_cache_zones(zmd);
457 	} else {
458 		nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
459 		nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
460 	}
461 	return nr_unmap * 100 / nr_zones;
462 }
463 
464 /*
465  * Test if reclaim is necessary.
466  */
467 static bool dmz_should_reclaim(struct dmz_reclaim *zrc, unsigned int p_unmap)
468 {
469 	unsigned int nr_reclaim;
470 
471 	nr_reclaim = dmz_nr_rnd_zones(zrc->metadata, zrc->dev_idx);
472 
473 	if (dmz_nr_cache_zones(zrc->metadata)) {
474 		/*
475 		 * The first device in a multi-device
476 		 * setup only contains cache zones, so
477 		 * never start reclaim there.
478 		 */
479 		if (zrc->dev_idx == 0)
480 			return false;
481 		nr_reclaim += dmz_nr_cache_zones(zrc->metadata);
482 	}
483 
484 	/* Reclaim when idle */
485 	if (dmz_target_idle(zrc) && nr_reclaim)
486 		return true;
487 
488 	/* If there are still plenty of cache zones, do not reclaim */
489 	if (p_unmap >= DMZ_RECLAIM_HIGH_UNMAP_ZONES)
490 		return false;
491 
492 	/*
493 	 * If the percentage of unmapped cache zones is low,
494 	 * reclaim even if the target is busy.
495 	 */
496 	return p_unmap <= DMZ_RECLAIM_LOW_UNMAP_ZONES;
497 }
498 
499 /*
500  * Reclaim work function.
501  */
502 static void dmz_reclaim_work(struct work_struct *work)
503 {
504 	struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
505 	struct dmz_metadata *zmd = zrc->metadata;
506 	unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
507 	int ret;
508 
509 	if (dmz_dev_is_dying(zmd))
510 		return;
511 
512 	p_unmap = dmz_reclaim_percentage(zrc);
513 	if (!dmz_should_reclaim(zrc, p_unmap)) {
514 		mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
515 		return;
516 	}
517 
518 	/*
519 	 * We need to start reclaiming random zones: set up zone copy
520 	 * throttling to either go fast if we are very low on random zones
521 	 * and slower if there are still some free random zones to avoid
522 	 * as much as possible to negatively impact the user workload.
523 	 */
524 	if (dmz_target_idle(zrc) || p_unmap < DMZ_RECLAIM_LOW_UNMAP_ZONES / 2) {
525 		/* Idle or very low percentage: go fast */
526 		zrc->kc_throttle.throttle = 100;
527 	} else {
528 		/* Busy but we still have some random zone: throttle */
529 		zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
530 	}
531 
532 	nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
533 	nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
534 
535 	DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
536 		dmz_metadata_label(zmd), zrc->dev_idx,
537 		zrc->kc_throttle.throttle,
538 		(dmz_target_idle(zrc) ? "Idle" : "Busy"),
539 		p_unmap, dmz_nr_unmap_cache_zones(zmd),
540 		dmz_nr_cache_zones(zmd),
541 		dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx),
542 		dmz_nr_rnd_zones(zmd, zrc->dev_idx));
543 
544 	ret = dmz_do_reclaim(zrc);
545 	if (ret && ret != -EINTR) {
546 		if (!dmz_check_dev(zmd))
547 			return;
548 	}
549 
550 	dmz_schedule_reclaim(zrc);
551 }
552 
553 /*
554  * Initialize reclaim.
555  */
556 int dmz_ctr_reclaim(struct dmz_metadata *zmd,
557 		    struct dmz_reclaim **reclaim, int idx)
558 {
559 	struct dmz_reclaim *zrc;
560 	int ret;
561 
562 	zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
563 	if (!zrc)
564 		return -ENOMEM;
565 
566 	zrc->metadata = zmd;
567 	zrc->atime = jiffies;
568 	zrc->dev_idx = idx;
569 
570 	/* Reclaim kcopyd client */
571 	zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
572 	if (IS_ERR(zrc->kc)) {
573 		ret = PTR_ERR(zrc->kc);
574 		zrc->kc = NULL;
575 		goto err;
576 	}
577 
578 	/* Reclaim work */
579 	INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
580 	zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s_%d", WQ_MEM_RECLAIM,
581 					  dmz_metadata_label(zmd), idx);
582 	if (!zrc->wq) {
583 		ret = -ENOMEM;
584 		goto err;
585 	}
586 
587 	*reclaim = zrc;
588 	queue_delayed_work(zrc->wq, &zrc->work, 0);
589 
590 	return 0;
591 err:
592 	if (zrc->kc)
593 		dm_kcopyd_client_destroy(zrc->kc);
594 	kfree(zrc);
595 
596 	return ret;
597 }
598 
599 /*
600  * Terminate reclaim.
601  */
602 void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
603 {
604 	cancel_delayed_work_sync(&zrc->work);
605 	destroy_workqueue(zrc->wq);
606 	dm_kcopyd_client_destroy(zrc->kc);
607 	kfree(zrc);
608 }
609 
610 /*
611  * Suspend reclaim.
612  */
613 void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
614 {
615 	cancel_delayed_work_sync(&zrc->work);
616 }
617 
618 /*
619  * Resume reclaim.
620  */
621 void dmz_resume_reclaim(struct dmz_reclaim *zrc)
622 {
623 	queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
624 }
625 
626 /*
627  * BIO accounting.
628  */
629 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
630 {
631 	zrc->atime = jiffies;
632 }
633 
634 /*
635  * Start reclaim if necessary.
636  */
637 void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
638 {
639 	unsigned int p_unmap = dmz_reclaim_percentage(zrc);
640 
641 	if (dmz_should_reclaim(zrc, p_unmap))
642 		mod_delayed_work(zrc->wq, &zrc->work, 0);
643 }
644