xref: /openbmc/linux/block/blk-ia-ranges.c (revision 82806c25)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Block device concurrent positioning ranges.
4  *
5  *  Copyright (C) 2021 Western Digital Corporation or its Affiliates.
6  */
7 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 
12 #include "blk.h"
13 
14 static ssize_t
15 blk_ia_range_sector_show(struct blk_independent_access_range *iar,
16 			 char *buf)
17 {
18 	return sprintf(buf, "%llu\n", iar->sector);
19 }
20 
21 static ssize_t
22 blk_ia_range_nr_sectors_show(struct blk_independent_access_range *iar,
23 			     char *buf)
24 {
25 	return sprintf(buf, "%llu\n", iar->nr_sectors);
26 }
27 
28 struct blk_ia_range_sysfs_entry {
29 	struct attribute attr;
30 	ssize_t (*show)(struct blk_independent_access_range *iar, char *buf);
31 };
32 
33 static struct blk_ia_range_sysfs_entry blk_ia_range_sector_entry = {
34 	.attr = { .name = "sector", .mode = 0444 },
35 	.show = blk_ia_range_sector_show,
36 };
37 
38 static struct blk_ia_range_sysfs_entry blk_ia_range_nr_sectors_entry = {
39 	.attr = { .name = "nr_sectors", .mode = 0444 },
40 	.show = blk_ia_range_nr_sectors_show,
41 };
42 
43 static struct attribute *blk_ia_range_attrs[] = {
44 	&blk_ia_range_sector_entry.attr,
45 	&blk_ia_range_nr_sectors_entry.attr,
46 	NULL,
47 };
48 ATTRIBUTE_GROUPS(blk_ia_range);
49 
50 static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj,
51 				      struct attribute *attr, char *buf)
52 {
53 	struct blk_ia_range_sysfs_entry *entry =
54 		container_of(attr, struct blk_ia_range_sysfs_entry, attr);
55 	struct blk_independent_access_range *iar =
56 		container_of(kobj, struct blk_independent_access_range, kobj);
57 
58 	return entry->show(iar, buf);
59 }
60 
61 static const struct sysfs_ops blk_ia_range_sysfs_ops = {
62 	.show	= blk_ia_range_sysfs_show,
63 };
64 
65 /*
66  * Independent access range entries are not freed individually, but alltogether
67  * with struct blk_independent_access_ranges and its array of ranges. Since
68  * kobject_add() takes a reference on the parent kobject contained in
69  * struct blk_independent_access_ranges, the array of independent access range
70  * entries cannot be freed until kobject_del() is called for all entries.
71  * So we do not need to do anything here, but still need this no-op release
72  * operation to avoid complaints from the kobject code.
73  */
74 static void blk_ia_range_sysfs_nop_release(struct kobject *kobj)
75 {
76 }
77 
78 static struct kobj_type blk_ia_range_ktype = {
79 	.sysfs_ops	= &blk_ia_range_sysfs_ops,
80 	.default_groups	= blk_ia_range_groups,
81 	.release	= blk_ia_range_sysfs_nop_release,
82 };
83 
84 /*
85  * This will be executed only after all independent access range entries are
86  * removed with kobject_del(), at which point, it is safe to free everything,
87  * including the array of ranges.
88  */
89 static void blk_ia_ranges_sysfs_release(struct kobject *kobj)
90 {
91 	struct blk_independent_access_ranges *iars =
92 		container_of(kobj, struct blk_independent_access_ranges, kobj);
93 
94 	kfree(iars);
95 }
96 
97 static struct kobj_type blk_ia_ranges_ktype = {
98 	.release	= blk_ia_ranges_sysfs_release,
99 };
100 
101 /**
102  * disk_register_independent_access_ranges - register with sysfs a set of
103  *		independent access ranges
104  * @disk:	Target disk
105  *
106  * Register with sysfs a set of independent access ranges for @disk.
107  */
108 int disk_register_independent_access_ranges(struct gendisk *disk)
109 {
110 	struct blk_independent_access_ranges *iars = disk->ia_ranges;
111 	struct request_queue *q = disk->queue;
112 	int i, ret;
113 
114 	lockdep_assert_held(&q->sysfs_dir_lock);
115 	lockdep_assert_held(&q->sysfs_lock);
116 
117 	if (!iars)
118 		return 0;
119 
120 	/*
121 	 * At this point, iars is the new set of sector access ranges that needs
122 	 * to be registered with sysfs.
123 	 */
124 	WARN_ON(iars->sysfs_registered);
125 	ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype,
126 				   &q->kobj, "%s", "independent_access_ranges");
127 	if (ret) {
128 		disk->ia_ranges = NULL;
129 		kobject_put(&iars->kobj);
130 		return ret;
131 	}
132 
133 	for (i = 0; i < iars->nr_ia_ranges; i++) {
134 		ret = kobject_init_and_add(&iars->ia_range[i].kobj,
135 					   &blk_ia_range_ktype, &iars->kobj,
136 					   "%d", i);
137 		if (ret) {
138 			while (--i >= 0)
139 				kobject_del(&iars->ia_range[i].kobj);
140 			kobject_del(&iars->kobj);
141 			kobject_put(&iars->kobj);
142 			return ret;
143 		}
144 	}
145 
146 	iars->sysfs_registered = true;
147 
148 	return 0;
149 }
150 
151 void disk_unregister_independent_access_ranges(struct gendisk *disk)
152 {
153 	struct request_queue *q = disk->queue;
154 	struct blk_independent_access_ranges *iars = disk->ia_ranges;
155 	int i;
156 
157 	lockdep_assert_held(&q->sysfs_dir_lock);
158 	lockdep_assert_held(&q->sysfs_lock);
159 
160 	if (!iars)
161 		return;
162 
163 	if (iars->sysfs_registered) {
164 		for (i = 0; i < iars->nr_ia_ranges; i++)
165 			kobject_del(&iars->ia_range[i].kobj);
166 		kobject_del(&iars->kobj);
167 		kobject_put(&iars->kobj);
168 	} else {
169 		kfree(iars);
170 	}
171 
172 	disk->ia_ranges = NULL;
173 }
174 
175 static struct blk_independent_access_range *
176 disk_find_ia_range(struct blk_independent_access_ranges *iars,
177 		  sector_t sector)
178 {
179 	struct blk_independent_access_range *iar;
180 	int i;
181 
182 	for (i = 0; i < iars->nr_ia_ranges; i++) {
183 		iar = &iars->ia_range[i];
184 		if (sector >= iar->sector &&
185 		    sector < iar->sector + iar->nr_sectors)
186 			return iar;
187 	}
188 
189 	return NULL;
190 }
191 
192 static bool disk_check_ia_ranges(struct gendisk *disk,
193 				struct blk_independent_access_ranges *iars)
194 {
195 	struct blk_independent_access_range *iar, *tmp;
196 	sector_t capacity = get_capacity(disk);
197 	sector_t sector = 0;
198 	int i;
199 
200 	if (WARN_ON_ONCE(!iars->nr_ia_ranges))
201 		return false;
202 
203 	/*
204 	 * While sorting the ranges in increasing LBA order, check that the
205 	 * ranges do not overlap, that there are no sector holes and that all
206 	 * sectors belong to one range.
207 	 */
208 	for (i = 0; i < iars->nr_ia_ranges; i++) {
209 		tmp = disk_find_ia_range(iars, sector);
210 		if (!tmp || tmp->sector != sector) {
211 			pr_warn("Invalid non-contiguous independent access ranges\n");
212 			return false;
213 		}
214 
215 		iar = &iars->ia_range[i];
216 		if (tmp != iar) {
217 			swap(iar->sector, tmp->sector);
218 			swap(iar->nr_sectors, tmp->nr_sectors);
219 		}
220 
221 		sector += iar->nr_sectors;
222 	}
223 
224 	if (sector != capacity) {
225 		pr_warn("Independent access ranges do not match disk capacity\n");
226 		return false;
227 	}
228 
229 	return true;
230 }
231 
232 static bool disk_ia_ranges_changed(struct gendisk *disk,
233 				   struct blk_independent_access_ranges *new)
234 {
235 	struct blk_independent_access_ranges *old = disk->ia_ranges;
236 	int i;
237 
238 	if (!old)
239 		return true;
240 
241 	if (old->nr_ia_ranges != new->nr_ia_ranges)
242 		return true;
243 
244 	for (i = 0; i < old->nr_ia_ranges; i++) {
245 		if (new->ia_range[i].sector != old->ia_range[i].sector ||
246 		    new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors)
247 			return true;
248 	}
249 
250 	return false;
251 }
252 
253 /**
254  * disk_alloc_independent_access_ranges - Allocate an independent access ranges
255  *                                        data structure
256  * @disk:		target disk
257  * @nr_ia_ranges:	Number of independent access ranges
258  *
259  * Allocate a struct blk_independent_access_ranges structure with @nr_ia_ranges
260  * access range descriptors.
261  */
262 struct blk_independent_access_ranges *
263 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges)
264 {
265 	struct blk_independent_access_ranges *iars;
266 
267 	iars = kzalloc_node(struct_size(iars, ia_range, nr_ia_ranges),
268 			    GFP_KERNEL, disk->queue->node);
269 	if (iars)
270 		iars->nr_ia_ranges = nr_ia_ranges;
271 	return iars;
272 }
273 EXPORT_SYMBOL_GPL(disk_alloc_independent_access_ranges);
274 
275 /**
276  * disk_set_independent_access_ranges - Set a disk independent access ranges
277  * @disk:	target disk
278  * @iars:	independent access ranges structure
279  *
280  * Set the independent access ranges information of the request queue
281  * of @disk to @iars. If @iars is NULL and the independent access ranges
282  * structure already set is cleared. If there are no differences between
283  * @iars and the independent access ranges structure already set, @iars
284  * is freed.
285  */
286 void disk_set_independent_access_ranges(struct gendisk *disk,
287 				struct blk_independent_access_ranges *iars)
288 {
289 	struct request_queue *q = disk->queue;
290 
291 	mutex_lock(&q->sysfs_dir_lock);
292 	mutex_lock(&q->sysfs_lock);
293 	if (iars && !disk_check_ia_ranges(disk, iars)) {
294 		kfree(iars);
295 		iars = NULL;
296 	}
297 	if (iars && !disk_ia_ranges_changed(disk, iars)) {
298 		kfree(iars);
299 		goto unlock;
300 	}
301 
302 	/*
303 	 * This may be called for a registered queue. E.g. during a device
304 	 * revalidation. If that is the case, we need to unregister the old
305 	 * set of independent access ranges and register the new set. If the
306 	 * queue is not registered, registration of the device request queue
307 	 * will register the independent access ranges.
308 	 */
309 	disk_unregister_independent_access_ranges(disk);
310 	disk->ia_ranges = iars;
311 	if (blk_queue_registered(q))
312 		disk_register_independent_access_ranges(disk);
313 unlock:
314 	mutex_unlock(&q->sysfs_lock);
315 	mutex_unlock(&q->sysfs_dir_lock);
316 }
317 EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges);
318