xref: /openbmc/linux/block/blk-ia-ranges.c (revision 74de3792)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Block device concurrent positioning ranges.
4  *
5  *  Copyright (C) 2021 Western Digital Corporation or its Affiliates.
6  */
7 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 
12 #include "blk.h"
13 
14 static ssize_t
15 blk_ia_range_sector_show(struct blk_independent_access_range *iar,
16 			 char *buf)
17 {
18 	return sprintf(buf, "%llu\n", iar->sector);
19 }
20 
21 static ssize_t
22 blk_ia_range_nr_sectors_show(struct blk_independent_access_range *iar,
23 			     char *buf)
24 {
25 	return sprintf(buf, "%llu\n", iar->nr_sectors);
26 }
27 
28 struct blk_ia_range_sysfs_entry {
29 	struct attribute attr;
30 	ssize_t (*show)(struct blk_independent_access_range *iar, char *buf);
31 };
32 
33 static struct blk_ia_range_sysfs_entry blk_ia_range_sector_entry = {
34 	.attr = { .name = "sector", .mode = 0444 },
35 	.show = blk_ia_range_sector_show,
36 };
37 
38 static struct blk_ia_range_sysfs_entry blk_ia_range_nr_sectors_entry = {
39 	.attr = { .name = "nr_sectors", .mode = 0444 },
40 	.show = blk_ia_range_nr_sectors_show,
41 };
42 
43 static struct attribute *blk_ia_range_attrs[] = {
44 	&blk_ia_range_sector_entry.attr,
45 	&blk_ia_range_nr_sectors_entry.attr,
46 	NULL,
47 };
48 ATTRIBUTE_GROUPS(blk_ia_range);
49 
50 static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj,
51 				      struct attribute *attr, char *buf)
52 {
53 	struct blk_ia_range_sysfs_entry *entry =
54 		container_of(attr, struct blk_ia_range_sysfs_entry, attr);
55 	struct blk_independent_access_range *iar =
56 		container_of(kobj, struct blk_independent_access_range, kobj);
57 	ssize_t ret;
58 
59 	mutex_lock(&iar->queue->sysfs_lock);
60 	ret = entry->show(iar, buf);
61 	mutex_unlock(&iar->queue->sysfs_lock);
62 
63 	return ret;
64 }
65 
66 static const struct sysfs_ops blk_ia_range_sysfs_ops = {
67 	.show	= blk_ia_range_sysfs_show,
68 };
69 
70 /*
71  * Independent access range entries are not freed individually, but alltogether
72  * with struct blk_independent_access_ranges and its array of ranges. Since
73  * kobject_add() takes a reference on the parent kobject contained in
74  * struct blk_independent_access_ranges, the array of independent access range
75  * entries cannot be freed until kobject_del() is called for all entries.
76  * So we do not need to do anything here, but still need this no-op release
77  * operation to avoid complaints from the kobject code.
78  */
79 static void blk_ia_range_sysfs_nop_release(struct kobject *kobj)
80 {
81 }
82 
83 static struct kobj_type blk_ia_range_ktype = {
84 	.sysfs_ops	= &blk_ia_range_sysfs_ops,
85 	.default_groups	= blk_ia_range_groups,
86 	.release	= blk_ia_range_sysfs_nop_release,
87 };
88 
89 /*
90  * This will be executed only after all independent access range entries are
91  * removed with kobject_del(), at which point, it is safe to free everything,
92  * including the array of ranges.
93  */
94 static void blk_ia_ranges_sysfs_release(struct kobject *kobj)
95 {
96 	struct blk_independent_access_ranges *iars =
97 		container_of(kobj, struct blk_independent_access_ranges, kobj);
98 
99 	kfree(iars);
100 }
101 
102 static struct kobj_type blk_ia_ranges_ktype = {
103 	.release	= blk_ia_ranges_sysfs_release,
104 };
105 
106 /**
107  * disk_register_independent_access_ranges - register with sysfs a set of
108  *		independent access ranges
109  * @disk:	Target disk
110  * @new_iars:	New set of independent access ranges
111  *
112  * Register with sysfs a set of independent access ranges for @disk.
113  * If @new_iars is not NULL, this set of ranges is registered and the old set
114  * specified by q->ia_ranges is unregistered. Otherwise, q->ia_ranges is
115  * registered if it is not already.
116  */
117 int disk_register_independent_access_ranges(struct gendisk *disk,
118 				struct blk_independent_access_ranges *new_iars)
119 {
120 	struct request_queue *q = disk->queue;
121 	struct blk_independent_access_ranges *iars;
122 	int i, ret;
123 
124 	lockdep_assert_held(&q->sysfs_dir_lock);
125 	lockdep_assert_held(&q->sysfs_lock);
126 
127 	/* If a new range set is specified, unregister the old one */
128 	if (new_iars) {
129 		if (q->ia_ranges)
130 			disk_unregister_independent_access_ranges(disk);
131 		q->ia_ranges = new_iars;
132 	}
133 
134 	iars = q->ia_ranges;
135 	if (!iars)
136 		return 0;
137 
138 	/*
139 	 * At this point, iars is the new set of sector access ranges that needs
140 	 * to be registered with sysfs.
141 	 */
142 	WARN_ON(iars->sysfs_registered);
143 	ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype,
144 				   &q->kobj, "%s", "independent_access_ranges");
145 	if (ret) {
146 		q->ia_ranges = NULL;
147 		kobject_put(&iars->kobj);
148 		return ret;
149 	}
150 
151 	for (i = 0; i < iars->nr_ia_ranges; i++) {
152 		iars->ia_range[i].queue = q;
153 		ret = kobject_init_and_add(&iars->ia_range[i].kobj,
154 					   &blk_ia_range_ktype, &iars->kobj,
155 					   "%d", i);
156 		if (ret) {
157 			while (--i >= 0)
158 				kobject_del(&iars->ia_range[i].kobj);
159 			kobject_del(&iars->kobj);
160 			kobject_put(&iars->kobj);
161 			return ret;
162 		}
163 	}
164 
165 	iars->sysfs_registered = true;
166 
167 	return 0;
168 }
169 
170 void disk_unregister_independent_access_ranges(struct gendisk *disk)
171 {
172 	struct request_queue *q = disk->queue;
173 	struct blk_independent_access_ranges *iars = q->ia_ranges;
174 	int i;
175 
176 	lockdep_assert_held(&q->sysfs_dir_lock);
177 	lockdep_assert_held(&q->sysfs_lock);
178 
179 	if (!iars)
180 		return;
181 
182 	if (iars->sysfs_registered) {
183 		for (i = 0; i < iars->nr_ia_ranges; i++)
184 			kobject_del(&iars->ia_range[i].kobj);
185 		kobject_del(&iars->kobj);
186 		kobject_put(&iars->kobj);
187 	} else {
188 		kfree(iars);
189 	}
190 
191 	q->ia_ranges = NULL;
192 }
193 
194 static struct blk_independent_access_range *
195 disk_find_ia_range(struct blk_independent_access_ranges *iars,
196 		  sector_t sector)
197 {
198 	struct blk_independent_access_range *iar;
199 	int i;
200 
201 	for (i = 0; i < iars->nr_ia_ranges; i++) {
202 		iar = &iars->ia_range[i];
203 		if (sector >= iar->sector &&
204 		    sector < iar->sector + iar->nr_sectors)
205 			return iar;
206 	}
207 
208 	return NULL;
209 }
210 
211 static bool disk_check_ia_ranges(struct gendisk *disk,
212 				struct blk_independent_access_ranges *iars)
213 {
214 	struct blk_independent_access_range *iar, *tmp;
215 	sector_t capacity = get_capacity(disk);
216 	sector_t sector = 0;
217 	int i;
218 
219 	/*
220 	 * While sorting the ranges in increasing LBA order, check that the
221 	 * ranges do not overlap, that there are no sector holes and that all
222 	 * sectors belong to one range.
223 	 */
224 	for (i = 0; i < iars->nr_ia_ranges; i++) {
225 		tmp = disk_find_ia_range(iars, sector);
226 		if (!tmp || tmp->sector != sector) {
227 			pr_warn("Invalid non-contiguous independent access ranges\n");
228 			return false;
229 		}
230 
231 		iar = &iars->ia_range[i];
232 		if (tmp != iar) {
233 			swap(iar->sector, tmp->sector);
234 			swap(iar->nr_sectors, tmp->nr_sectors);
235 		}
236 
237 		sector += iar->nr_sectors;
238 	}
239 
240 	if (sector != capacity) {
241 		pr_warn("Independent access ranges do not match disk capacity\n");
242 		return false;
243 	}
244 
245 	return true;
246 }
247 
248 static bool disk_ia_ranges_changed(struct gendisk *disk,
249 				   struct blk_independent_access_ranges *new)
250 {
251 	struct blk_independent_access_ranges *old = disk->queue->ia_ranges;
252 	int i;
253 
254 	if (!old)
255 		return true;
256 
257 	if (old->nr_ia_ranges != new->nr_ia_ranges)
258 		return true;
259 
260 	for (i = 0; i < old->nr_ia_ranges; i++) {
261 		if (new->ia_range[i].sector != old->ia_range[i].sector ||
262 		    new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors)
263 			return true;
264 	}
265 
266 	return false;
267 }
268 
269 /**
270  * disk_alloc_independent_access_ranges - Allocate an independent access ranges
271  *                                        data structure
272  * @disk:		target disk
273  * @nr_ia_ranges:	Number of independent access ranges
274  *
275  * Allocate a struct blk_independent_access_ranges structure with @nr_ia_ranges
276  * access range descriptors.
277  */
278 struct blk_independent_access_ranges *
279 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges)
280 {
281 	struct blk_independent_access_ranges *iars;
282 
283 	iars = kzalloc_node(struct_size(iars, ia_range, nr_ia_ranges),
284 			    GFP_KERNEL, disk->queue->node);
285 	if (iars)
286 		iars->nr_ia_ranges = nr_ia_ranges;
287 	return iars;
288 }
289 EXPORT_SYMBOL_GPL(disk_alloc_independent_access_ranges);
290 
291 /**
292  * disk_set_independent_access_ranges - Set a disk independent access ranges
293  * @disk:	target disk
294  * @iars:	independent access ranges structure
295  *
296  * Set the independent access ranges information of the request queue
297  * of @disk to @iars. If @iars is NULL and the independent access ranges
298  * structure already set is cleared. If there are no differences between
299  * @iars and the independent access ranges structure already set, @iars
300  * is freed.
301  */
302 void disk_set_independent_access_ranges(struct gendisk *disk,
303 				struct blk_independent_access_ranges *iars)
304 {
305 	struct request_queue *q = disk->queue;
306 
307 	if (WARN_ON_ONCE(iars && !iars->nr_ia_ranges)) {
308 		kfree(iars);
309 		iars = NULL;
310 	}
311 
312 	mutex_lock(&q->sysfs_dir_lock);
313 	mutex_lock(&q->sysfs_lock);
314 
315 	if (iars) {
316 		if (!disk_check_ia_ranges(disk, iars)) {
317 			kfree(iars);
318 			iars = NULL;
319 			goto reg;
320 		}
321 
322 		if (!disk_ia_ranges_changed(disk, iars)) {
323 			kfree(iars);
324 			goto unlock;
325 		}
326 	}
327 
328 	/*
329 	 * This may be called for a registered queue. E.g. during a device
330 	 * revalidation. If that is the case, we need to unregister the old
331 	 * set of independent access ranges and register the new set. If the
332 	 * queue is not registered, registration of the device request queue
333 	 * will register the independent access ranges, so only swap in the
334 	 * new set and free the old one.
335 	 */
336 reg:
337 	if (blk_queue_registered(q)) {
338 		disk_register_independent_access_ranges(disk, iars);
339 	} else {
340 		swap(q->ia_ranges, iars);
341 		kfree(iars);
342 	}
343 
344 unlock:
345 	mutex_unlock(&q->sysfs_lock);
346 	mutex_unlock(&q->sysfs_dir_lock);
347 }
348 EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges);
349