xref: /openbmc/linux/drivers/md/md-linear.c (revision b8d312aa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    linear.c : Multiple Devices driver for Linux
4 	      Copyright (C) 1994-96 Marc ZYNGIER
5 	      <zyngier@ufr-info-p7.ibp.fr> or
6 	      <maz@gloups.fdn.fr>
7 
8    Linear mode management functions.
9 
10 */
11 
12 #include <linux/blkdev.h>
13 #include <linux/raid/md_u.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
18 #include "md.h"
19 #include "md-linear.h"
20 
21 /*
22  * find which device holds a particular offset
23  */
24 static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
25 {
26 	int lo, mid, hi;
27 	struct linear_conf *conf;
28 
29 	lo = 0;
30 	hi = mddev->raid_disks - 1;
31 	conf = mddev->private;
32 
33 	/*
34 	 * Binary Search
35 	 */
36 
37 	while (hi > lo) {
38 
39 		mid = (hi + lo) / 2;
40 		if (sector < conf->disks[mid].end_sector)
41 			hi = mid;
42 		else
43 			lo = mid + 1;
44 	}
45 
46 	return conf->disks + lo;
47 }
48 
49 /*
50  * In linear_congested() conf->raid_disks is used as a copy of
51  * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks
52  * and conf->disks[] are created in linear_conf(), they are always
53  * consitent with each other, but mddev->raid_disks does not.
54  */
55 static int linear_congested(struct mddev *mddev, int bits)
56 {
57 	struct linear_conf *conf;
58 	int i, ret = 0;
59 
60 	rcu_read_lock();
61 	conf = rcu_dereference(mddev->private);
62 
63 	for (i = 0; i < conf->raid_disks && !ret ; i++) {
64 		struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
65 		ret |= bdi_congested(q->backing_dev_info, bits);
66 	}
67 
68 	rcu_read_unlock();
69 	return ret;
70 }
71 
72 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
73 {
74 	struct linear_conf *conf;
75 	sector_t array_sectors;
76 
77 	conf = mddev->private;
78 	WARN_ONCE(sectors || raid_disks,
79 		  "%s does not support generic reshape\n", __func__);
80 	array_sectors = conf->array_sectors;
81 
82 	return array_sectors;
83 }
84 
85 static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
86 {
87 	struct linear_conf *conf;
88 	struct md_rdev *rdev;
89 	int i, cnt;
90 	bool discard_supported = false;
91 
92 	conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
93 	if (!conf)
94 		return NULL;
95 
96 	cnt = 0;
97 	conf->array_sectors = 0;
98 
99 	rdev_for_each(rdev, mddev) {
100 		int j = rdev->raid_disk;
101 		struct dev_info *disk = conf->disks + j;
102 		sector_t sectors;
103 
104 		if (j < 0 || j >= raid_disks || disk->rdev) {
105 			pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
106 				mdname(mddev));
107 			goto out;
108 		}
109 
110 		disk->rdev = rdev;
111 		if (mddev->chunk_sectors) {
112 			sectors = rdev->sectors;
113 			sector_div(sectors, mddev->chunk_sectors);
114 			rdev->sectors = sectors * mddev->chunk_sectors;
115 		}
116 
117 		disk_stack_limits(mddev->gendisk, rdev->bdev,
118 				  rdev->data_offset << 9);
119 
120 		conf->array_sectors += rdev->sectors;
121 		cnt++;
122 
123 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
124 			discard_supported = true;
125 	}
126 	if (cnt != raid_disks) {
127 		pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
128 			mdname(mddev));
129 		goto out;
130 	}
131 
132 	if (!discard_supported)
133 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
134 	else
135 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
136 
137 	/*
138 	 * Here we calculate the device offsets.
139 	 */
140 	conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
141 
142 	for (i = 1; i < raid_disks; i++)
143 		conf->disks[i].end_sector =
144 			conf->disks[i-1].end_sector +
145 			conf->disks[i].rdev->sectors;
146 
147 	/*
148 	 * conf->raid_disks is copy of mddev->raid_disks. The reason to
149 	 * keep a copy of mddev->raid_disks in struct linear_conf is,
150 	 * mddev->raid_disks may not be consistent with pointers number of
151 	 * conf->disks[] when it is updated in linear_add() and used to
152 	 * iterate old conf->disks[] earray in linear_congested().
153 	 * Here conf->raid_disks is always consitent with number of
154 	 * pointers in conf->disks[] array, and mddev->private is updated
155 	 * with rcu_assign_pointer() in linear_addr(), such race can be
156 	 * avoided.
157 	 */
158 	conf->raid_disks = raid_disks;
159 
160 	return conf;
161 
162 out:
163 	kfree(conf);
164 	return NULL;
165 }
166 
167 static int linear_run (struct mddev *mddev)
168 {
169 	struct linear_conf *conf;
170 	int ret;
171 
172 	if (md_check_no_bitmap(mddev))
173 		return -EINVAL;
174 	conf = linear_conf(mddev, mddev->raid_disks);
175 
176 	if (!conf)
177 		return 1;
178 	mddev->private = conf;
179 	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
180 
181 	ret =  md_integrity_register(mddev);
182 	if (ret) {
183 		kfree(conf);
184 		mddev->private = NULL;
185 	}
186 	return ret;
187 }
188 
189 static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
190 {
191 	/* Adding a drive to a linear array allows the array to grow.
192 	 * It is permitted if the new drive has a matching superblock
193 	 * already on it, with raid_disk equal to raid_disks.
194 	 * It is achieved by creating a new linear_private_data structure
195 	 * and swapping it in in-place of the current one.
196 	 * The current one is never freed until the array is stopped.
197 	 * This avoids races.
198 	 */
199 	struct linear_conf *newconf, *oldconf;
200 
201 	if (rdev->saved_raid_disk != mddev->raid_disks)
202 		return -EINVAL;
203 
204 	rdev->raid_disk = rdev->saved_raid_disk;
205 	rdev->saved_raid_disk = -1;
206 
207 	newconf = linear_conf(mddev,mddev->raid_disks+1);
208 
209 	if (!newconf)
210 		return -ENOMEM;
211 
212 	/* newconf->raid_disks already keeps a copy of * the increased
213 	 * value of mddev->raid_disks, WARN_ONCE() is just used to make
214 	 * sure of this. It is possible that oldconf is still referenced
215 	 * in linear_congested(), therefore kfree_rcu() is used to free
216 	 * oldconf until no one uses it anymore.
217 	 */
218 	mddev_suspend(mddev);
219 	oldconf = rcu_dereference_protected(mddev->private,
220 			lockdep_is_held(&mddev->reconfig_mutex));
221 	mddev->raid_disks++;
222 	WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
223 		"copied raid_disks doesn't match mddev->raid_disks");
224 	rcu_assign_pointer(mddev->private, newconf);
225 	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
226 	set_capacity(mddev->gendisk, mddev->array_sectors);
227 	mddev_resume(mddev);
228 	revalidate_disk(mddev->gendisk);
229 	kfree_rcu(oldconf, rcu);
230 	return 0;
231 }
232 
233 static void linear_free(struct mddev *mddev, void *priv)
234 {
235 	struct linear_conf *conf = priv;
236 
237 	kfree(conf);
238 }
239 
240 static bool linear_make_request(struct mddev *mddev, struct bio *bio)
241 {
242 	char b[BDEVNAME_SIZE];
243 	struct dev_info *tmp_dev;
244 	sector_t start_sector, end_sector, data_offset;
245 	sector_t bio_sector = bio->bi_iter.bi_sector;
246 
247 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
248 		md_flush_request(mddev, bio);
249 		return true;
250 	}
251 
252 	tmp_dev = which_dev(mddev, bio_sector);
253 	start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
254 	end_sector = tmp_dev->end_sector;
255 	data_offset = tmp_dev->rdev->data_offset;
256 
257 	if (unlikely(bio_sector >= end_sector ||
258 		     bio_sector < start_sector))
259 		goto out_of_bounds;
260 
261 	if (unlikely(bio_end_sector(bio) > end_sector)) {
262 		/* This bio crosses a device boundary, so we have to split it */
263 		struct bio *split = bio_split(bio, end_sector - bio_sector,
264 					      GFP_NOIO, &mddev->bio_set);
265 		bio_chain(split, bio);
266 		generic_make_request(bio);
267 		bio = split;
268 	}
269 
270 	bio_set_dev(bio, tmp_dev->rdev->bdev);
271 	bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
272 		start_sector + data_offset;
273 
274 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
275 		     !blk_queue_discard(bio->bi_disk->queue))) {
276 		/* Just ignore it */
277 		bio_endio(bio);
278 	} else {
279 		if (mddev->gendisk)
280 			trace_block_bio_remap(bio->bi_disk->queue,
281 					      bio, disk_devt(mddev->gendisk),
282 					      bio_sector);
283 		mddev_check_writesame(mddev, bio);
284 		mddev_check_write_zeroes(mddev, bio);
285 		generic_make_request(bio);
286 	}
287 	return true;
288 
289 out_of_bounds:
290 	pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
291 	       mdname(mddev),
292 	       (unsigned long long)bio->bi_iter.bi_sector,
293 	       bdevname(tmp_dev->rdev->bdev, b),
294 	       (unsigned long long)tmp_dev->rdev->sectors,
295 	       (unsigned long long)start_sector);
296 	bio_io_error(bio);
297 	return true;
298 }
299 
300 static void linear_status (struct seq_file *seq, struct mddev *mddev)
301 {
302 	seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
303 }
304 
305 static void linear_quiesce(struct mddev *mddev, int state)
306 {
307 }
308 
309 static struct md_personality linear_personality =
310 {
311 	.name		= "linear",
312 	.level		= LEVEL_LINEAR,
313 	.owner		= THIS_MODULE,
314 	.make_request	= linear_make_request,
315 	.run		= linear_run,
316 	.free		= linear_free,
317 	.status		= linear_status,
318 	.hot_add_disk	= linear_add,
319 	.size		= linear_size,
320 	.quiesce	= linear_quiesce,
321 	.congested	= linear_congested,
322 };
323 
324 static int __init linear_init (void)
325 {
326 	return register_md_personality (&linear_personality);
327 }
328 
329 static void linear_exit (void)
330 {
331 	unregister_md_personality (&linear_personality);
332 }
333 
334 module_init(linear_init);
335 module_exit(linear_exit);
336 MODULE_LICENSE("GPL");
337 MODULE_DESCRIPTION("Linear device concatenation personality for MD");
338 MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
339 MODULE_ALIAS("md-linear");
340 MODULE_ALIAS("md-level--1");
341