xref: /openbmc/linux/drivers/nvme/host/multipath.c (revision c94b731d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017-2018 Christoph Hellwig.
4  */
5 
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <trace/events/block.h>
9 #include "nvme.h"
10 
11 static bool multipath = true;
12 module_param(multipath, bool, 0444);
13 MODULE_PARM_DESC(multipath,
14 	"turn on native support for multiple controllers per subsystem");
15 
16 static const char *nvme_iopolicy_names[] = {
17 	[NVME_IOPOLICY_NUMA]	= "numa",
18 	[NVME_IOPOLICY_RR]	= "round-robin",
19 };
20 
21 static int iopolicy = NVME_IOPOLICY_NUMA;
22 
23 static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
24 {
25 	if (!val)
26 		return -EINVAL;
27 	if (!strncmp(val, "numa", 4))
28 		iopolicy = NVME_IOPOLICY_NUMA;
29 	else if (!strncmp(val, "round-robin", 11))
30 		iopolicy = NVME_IOPOLICY_RR;
31 	else
32 		return -EINVAL;
33 
34 	return 0;
35 }
36 
37 static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
38 {
39 	return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
40 }
41 
42 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
43 	&iopolicy, 0644);
44 MODULE_PARM_DESC(iopolicy,
45 	"Default multipath I/O policy; 'numa' (default) or 'round-robin'");
46 
47 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
48 {
49 	subsys->iopolicy = iopolicy;
50 }
51 
52 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
53 {
54 	struct nvme_ns_head *h;
55 
56 	lockdep_assert_held(&subsys->lock);
57 	list_for_each_entry(h, &subsys->nsheads, entry)
58 		if (h->disk)
59 			blk_mq_unfreeze_queue(h->disk->queue);
60 }
61 
62 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
63 {
64 	struct nvme_ns_head *h;
65 
66 	lockdep_assert_held(&subsys->lock);
67 	list_for_each_entry(h, &subsys->nsheads, entry)
68 		if (h->disk)
69 			blk_mq_freeze_queue_wait(h->disk->queue);
70 }
71 
72 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
73 {
74 	struct nvme_ns_head *h;
75 
76 	lockdep_assert_held(&subsys->lock);
77 	list_for_each_entry(h, &subsys->nsheads, entry)
78 		if (h->disk)
79 			blk_freeze_queue_start(h->disk->queue);
80 }
81 
82 /*
83  * If multipathing is enabled we need to always use the subsystem instance
84  * number for numbering our devices to avoid conflicts between subsystems that
85  * have multiple controllers and thus use the multipath-aware subsystem node
86  * and those that have a single controller and use the controller node
87  * directly.
88  */
89 bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)
90 {
91 	if (!multipath)
92 		return false;
93 	if (!ns->head->disk) {
94 		sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance,
95 			ns->head->instance);
96 		return true;
97 	}
98 	sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance,
99 		ns->ctrl->instance, ns->head->instance);
100 	*flags = GENHD_FL_HIDDEN;
101 	return true;
102 }
103 
104 void nvme_failover_req(struct request *req)
105 {
106 	struct nvme_ns *ns = req->q->queuedata;
107 	u16 status = nvme_req(req)->status & 0x7ff;
108 	unsigned long flags;
109 	struct bio *bio;
110 
111 	nvme_mpath_clear_current_path(ns);
112 
113 	/*
114 	 * If we got back an ANA error, we know the controller is alive but not
115 	 * ready to serve this namespace.  Kick of a re-read of the ANA
116 	 * information page, and just try any other available path for now.
117 	 */
118 	if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
119 		set_bit(NVME_NS_ANA_PENDING, &ns->flags);
120 		queue_work(nvme_wq, &ns->ctrl->ana_work);
121 	}
122 
123 	spin_lock_irqsave(&ns->head->requeue_lock, flags);
124 	for (bio = req->bio; bio; bio = bio->bi_next) {
125 		bio_set_dev(bio, ns->head->disk->part0);
126 		if (bio->bi_opf & REQ_POLLED) {
127 			bio->bi_opf &= ~REQ_POLLED;
128 			bio->bi_cookie = BLK_QC_T_NONE;
129 		}
130 	}
131 	blk_steal_bios(&ns->head->requeue_list, req);
132 	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
133 
134 	blk_mq_end_request(req, 0);
135 	kblockd_schedule_work(&ns->head->requeue_work);
136 }
137 
138 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
139 {
140 	struct nvme_ns *ns;
141 
142 	down_read(&ctrl->namespaces_rwsem);
143 	list_for_each_entry(ns, &ctrl->namespaces, list) {
144 		if (!ns->head->disk)
145 			continue;
146 		kblockd_schedule_work(&ns->head->requeue_work);
147 		if (ctrl->state == NVME_CTRL_LIVE)
148 			disk_uevent(ns->head->disk, KOBJ_CHANGE);
149 	}
150 	up_read(&ctrl->namespaces_rwsem);
151 }
152 
153 static const char *nvme_ana_state_names[] = {
154 	[0]				= "invalid state",
155 	[NVME_ANA_OPTIMIZED]		= "optimized",
156 	[NVME_ANA_NONOPTIMIZED]		= "non-optimized",
157 	[NVME_ANA_INACCESSIBLE]		= "inaccessible",
158 	[NVME_ANA_PERSISTENT_LOSS]	= "persistent-loss",
159 	[NVME_ANA_CHANGE]		= "change",
160 };
161 
162 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
163 {
164 	struct nvme_ns_head *head = ns->head;
165 	bool changed = false;
166 	int node;
167 
168 	if (!head)
169 		goto out;
170 
171 	for_each_node(node) {
172 		if (ns == rcu_access_pointer(head->current_path[node])) {
173 			rcu_assign_pointer(head->current_path[node], NULL);
174 			changed = true;
175 		}
176 	}
177 out:
178 	return changed;
179 }
180 
181 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
182 {
183 	struct nvme_ns *ns;
184 
185 	down_read(&ctrl->namespaces_rwsem);
186 	list_for_each_entry(ns, &ctrl->namespaces, list) {
187 		nvme_mpath_clear_current_path(ns);
188 		kblockd_schedule_work(&ns->head->requeue_work);
189 	}
190 	up_read(&ctrl->namespaces_rwsem);
191 }
192 
193 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
194 {
195 	struct nvme_ns_head *head = ns->head;
196 	sector_t capacity = get_capacity(head->disk);
197 	int node;
198 
199 	list_for_each_entry_rcu(ns, &head->list, siblings) {
200 		if (capacity != get_capacity(ns->disk))
201 			clear_bit(NVME_NS_READY, &ns->flags);
202 	}
203 
204 	for_each_node(node)
205 		rcu_assign_pointer(head->current_path[node], NULL);
206 }
207 
208 static bool nvme_path_is_disabled(struct nvme_ns *ns)
209 {
210 	/*
211 	 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
212 	 * still be able to complete assuming that the controller is connected.
213 	 * Otherwise it will fail immediately and return to the requeue list.
214 	 */
215 	if (ns->ctrl->state != NVME_CTRL_LIVE &&
216 	    ns->ctrl->state != NVME_CTRL_DELETING)
217 		return true;
218 	if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
219 	    !test_bit(NVME_NS_READY, &ns->flags))
220 		return true;
221 	return false;
222 }
223 
224 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
225 {
226 	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
227 	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
228 
229 	list_for_each_entry_rcu(ns, &head->list, siblings) {
230 		if (nvme_path_is_disabled(ns))
231 			continue;
232 
233 		if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
234 			distance = node_distance(node, ns->ctrl->numa_node);
235 		else
236 			distance = LOCAL_DISTANCE;
237 
238 		switch (ns->ana_state) {
239 		case NVME_ANA_OPTIMIZED:
240 			if (distance < found_distance) {
241 				found_distance = distance;
242 				found = ns;
243 			}
244 			break;
245 		case NVME_ANA_NONOPTIMIZED:
246 			if (distance < fallback_distance) {
247 				fallback_distance = distance;
248 				fallback = ns;
249 			}
250 			break;
251 		default:
252 			break;
253 		}
254 	}
255 
256 	if (!found)
257 		found = fallback;
258 	if (found)
259 		rcu_assign_pointer(head->current_path[node], found);
260 	return found;
261 }
262 
263 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
264 		struct nvme_ns *ns)
265 {
266 	ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
267 			siblings);
268 	if (ns)
269 		return ns;
270 	return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
271 }
272 
273 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
274 		int node, struct nvme_ns *old)
275 {
276 	struct nvme_ns *ns, *found = NULL;
277 
278 	if (list_is_singular(&head->list)) {
279 		if (nvme_path_is_disabled(old))
280 			return NULL;
281 		return old;
282 	}
283 
284 	for (ns = nvme_next_ns(head, old);
285 	     ns && ns != old;
286 	     ns = nvme_next_ns(head, ns)) {
287 		if (nvme_path_is_disabled(ns))
288 			continue;
289 
290 		if (ns->ana_state == NVME_ANA_OPTIMIZED) {
291 			found = ns;
292 			goto out;
293 		}
294 		if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
295 			found = ns;
296 	}
297 
298 	/*
299 	 * The loop above skips the current path for round-robin semantics.
300 	 * Fall back to the current path if either:
301 	 *  - no other optimized path found and current is optimized,
302 	 *  - no other usable path found and current is usable.
303 	 */
304 	if (!nvme_path_is_disabled(old) &&
305 	    (old->ana_state == NVME_ANA_OPTIMIZED ||
306 	     (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
307 		return old;
308 
309 	if (!found)
310 		return NULL;
311 out:
312 	rcu_assign_pointer(head->current_path[node], found);
313 	return found;
314 }
315 
316 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
317 {
318 	return ns->ctrl->state == NVME_CTRL_LIVE &&
319 		ns->ana_state == NVME_ANA_OPTIMIZED;
320 }
321 
322 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
323 {
324 	int node = numa_node_id();
325 	struct nvme_ns *ns;
326 
327 	ns = srcu_dereference(head->current_path[node], &head->srcu);
328 	if (unlikely(!ns))
329 		return __nvme_find_path(head, node);
330 
331 	if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
332 		return nvme_round_robin_path(head, node, ns);
333 	if (unlikely(!nvme_path_is_optimized(ns)))
334 		return __nvme_find_path(head, node);
335 	return ns;
336 }
337 
338 static bool nvme_available_path(struct nvme_ns_head *head)
339 {
340 	struct nvme_ns *ns;
341 
342 	list_for_each_entry_rcu(ns, &head->list, siblings) {
343 		if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
344 			continue;
345 		switch (ns->ctrl->state) {
346 		case NVME_CTRL_LIVE:
347 		case NVME_CTRL_RESETTING:
348 		case NVME_CTRL_CONNECTING:
349 			/* fallthru */
350 			return true;
351 		default:
352 			break;
353 		}
354 	}
355 	return false;
356 }
357 
358 static void nvme_ns_head_submit_bio(struct bio *bio)
359 {
360 	struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
361 	struct device *dev = disk_to_dev(head->disk);
362 	struct nvme_ns *ns;
363 	int srcu_idx;
364 
365 	/*
366 	 * The namespace might be going away and the bio might be moved to a
367 	 * different queue via blk_steal_bios(), so we need to use the bio_split
368 	 * pool from the original queue to allocate the bvecs from.
369 	 */
370 	blk_queue_split(&bio);
371 
372 	srcu_idx = srcu_read_lock(&head->srcu);
373 	ns = nvme_find_path(head);
374 	if (likely(ns)) {
375 		bio_set_dev(bio, ns->disk->part0);
376 		bio->bi_opf |= REQ_NVME_MPATH;
377 		trace_block_bio_remap(bio, disk_devt(ns->head->disk),
378 				      bio->bi_iter.bi_sector);
379 		submit_bio_noacct(bio);
380 	} else if (nvme_available_path(head)) {
381 		dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
382 
383 		spin_lock_irq(&head->requeue_lock);
384 		bio_list_add(&head->requeue_list, bio);
385 		spin_unlock_irq(&head->requeue_lock);
386 	} else {
387 		dev_warn_ratelimited(dev, "no available path - failing I/O\n");
388 
389 		bio->bi_status = BLK_STS_IOERR;
390 		bio_endio(bio);
391 	}
392 
393 	srcu_read_unlock(&head->srcu, srcu_idx);
394 }
395 
396 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
397 {
398 	if (!nvme_tryget_ns_head(bdev->bd_disk->private_data))
399 		return -ENXIO;
400 	return 0;
401 }
402 
403 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
404 {
405 	nvme_put_ns_head(disk->private_data);
406 }
407 
408 #ifdef CONFIG_BLK_DEV_ZONED
409 static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
410 		unsigned int nr_zones, report_zones_cb cb, void *data)
411 {
412 	struct nvme_ns_head *head = disk->private_data;
413 	struct nvme_ns *ns;
414 	int srcu_idx, ret = -EWOULDBLOCK;
415 
416 	srcu_idx = srcu_read_lock(&head->srcu);
417 	ns = nvme_find_path(head);
418 	if (ns)
419 		ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
420 	srcu_read_unlock(&head->srcu, srcu_idx);
421 	return ret;
422 }
423 #else
424 #define nvme_ns_head_report_zones	NULL
425 #endif /* CONFIG_BLK_DEV_ZONED */
426 
427 const struct block_device_operations nvme_ns_head_ops = {
428 	.owner		= THIS_MODULE,
429 	.submit_bio	= nvme_ns_head_submit_bio,
430 	.open		= nvme_ns_head_open,
431 	.release	= nvme_ns_head_release,
432 	.ioctl		= nvme_ns_head_ioctl,
433 	.getgeo		= nvme_getgeo,
434 	.report_zones	= nvme_ns_head_report_zones,
435 	.pr_ops		= &nvme_pr_ops,
436 };
437 
438 static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
439 {
440 	return container_of(cdev, struct nvme_ns_head, cdev);
441 }
442 
443 static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
444 {
445 	if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
446 		return -ENXIO;
447 	return 0;
448 }
449 
450 static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
451 {
452 	nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
453 	return 0;
454 }
455 
456 static const struct file_operations nvme_ns_head_chr_fops = {
457 	.owner		= THIS_MODULE,
458 	.open		= nvme_ns_head_chr_open,
459 	.release	= nvme_ns_head_chr_release,
460 	.unlocked_ioctl	= nvme_ns_head_chr_ioctl,
461 	.compat_ioctl	= compat_ptr_ioctl,
462 };
463 
464 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
465 {
466 	int ret;
467 
468 	head->cdev_device.parent = &head->subsys->dev;
469 	ret = dev_set_name(&head->cdev_device, "ng%dn%d",
470 			   head->subsys->instance, head->instance);
471 	if (ret)
472 		return ret;
473 	ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
474 			    &nvme_ns_head_chr_fops, THIS_MODULE);
475 	return ret;
476 }
477 
478 static void nvme_requeue_work(struct work_struct *work)
479 {
480 	struct nvme_ns_head *head =
481 		container_of(work, struct nvme_ns_head, requeue_work);
482 	struct bio *bio, *next;
483 
484 	spin_lock_irq(&head->requeue_lock);
485 	next = bio_list_get(&head->requeue_list);
486 	spin_unlock_irq(&head->requeue_lock);
487 
488 	while ((bio = next) != NULL) {
489 		next = bio->bi_next;
490 		bio->bi_next = NULL;
491 
492 		submit_bio_noacct(bio);
493 	}
494 }
495 
496 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
497 {
498 	bool vwc = false;
499 
500 	mutex_init(&head->lock);
501 	bio_list_init(&head->requeue_list);
502 	spin_lock_init(&head->requeue_lock);
503 	INIT_WORK(&head->requeue_work, nvme_requeue_work);
504 
505 	/*
506 	 * Add a multipath node if the subsystems supports multiple controllers.
507 	 * We also do this for private namespaces as the namespace sharing data could
508 	 * change after a rescan.
509 	 */
510 	if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
511 		return 0;
512 
513 	head->disk = blk_alloc_disk(ctrl->numa_node);
514 	if (!head->disk)
515 		return -ENOMEM;
516 	head->disk->fops = &nvme_ns_head_ops;
517 	head->disk->private_data = head;
518 	sprintf(head->disk->disk_name, "nvme%dn%d",
519 			ctrl->subsys->instance, head->instance);
520 
521 	blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
522 	blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
523 	/*
524 	 * This assumes all controllers that refer to a namespace either
525 	 * support poll queues or not.  That is not a strict guarantee,
526 	 * but if the assumption is wrong the effect is only suboptimal
527 	 * performance but not correctness problem.
528 	 */
529 	if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
530 	    ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
531 		blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
532 
533 	/* set to a default value of 512 until the disk is validated */
534 	blk_queue_logical_block_size(head->disk->queue, 512);
535 	blk_set_stacking_limits(&head->disk->queue->limits);
536 
537 	/* we need to propagate up the VMC settings */
538 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
539 		vwc = true;
540 	blk_queue_write_cache(head->disk->queue, vwc, vwc);
541 	return 0;
542 }
543 
544 static void nvme_mpath_set_live(struct nvme_ns *ns)
545 {
546 	struct nvme_ns_head *head = ns->head;
547 	int rc;
548 
549 	if (!head->disk)
550 		return;
551 
552 	/*
553 	 * test_and_set_bit() is used because it is protecting against two nvme
554 	 * paths simultaneously calling device_add_disk() on the same namespace
555 	 * head.
556 	 */
557 	if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
558 		rc = device_add_disk(&head->subsys->dev, head->disk,
559 				     nvme_ns_id_attr_groups);
560 		if (rc) {
561 			clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
562 			return;
563 		}
564 		nvme_add_ns_head_cdev(head);
565 	}
566 
567 	mutex_lock(&head->lock);
568 	if (nvme_path_is_optimized(ns)) {
569 		int node, srcu_idx;
570 
571 		srcu_idx = srcu_read_lock(&head->srcu);
572 		for_each_node(node)
573 			__nvme_find_path(head, node);
574 		srcu_read_unlock(&head->srcu, srcu_idx);
575 	}
576 	mutex_unlock(&head->lock);
577 
578 	synchronize_srcu(&head->srcu);
579 	kblockd_schedule_work(&head->requeue_work);
580 }
581 
582 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
583 		int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
584 			void *))
585 {
586 	void *base = ctrl->ana_log_buf;
587 	size_t offset = sizeof(struct nvme_ana_rsp_hdr);
588 	int error, i;
589 
590 	lockdep_assert_held(&ctrl->ana_lock);
591 
592 	for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
593 		struct nvme_ana_group_desc *desc = base + offset;
594 		u32 nr_nsids;
595 		size_t nsid_buf_size;
596 
597 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
598 			return -EINVAL;
599 
600 		nr_nsids = le32_to_cpu(desc->nnsids);
601 		nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
602 
603 		if (WARN_ON_ONCE(desc->grpid == 0))
604 			return -EINVAL;
605 		if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
606 			return -EINVAL;
607 		if (WARN_ON_ONCE(desc->state == 0))
608 			return -EINVAL;
609 		if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
610 			return -EINVAL;
611 
612 		offset += sizeof(*desc);
613 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
614 			return -EINVAL;
615 
616 		error = cb(ctrl, desc, data);
617 		if (error)
618 			return error;
619 
620 		offset += nsid_buf_size;
621 	}
622 
623 	return 0;
624 }
625 
626 static inline bool nvme_state_is_live(enum nvme_ana_state state)
627 {
628 	return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
629 }
630 
631 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
632 		struct nvme_ns *ns)
633 {
634 	ns->ana_grpid = le32_to_cpu(desc->grpid);
635 	ns->ana_state = desc->state;
636 	clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
637 
638 	if (nvme_state_is_live(ns->ana_state))
639 		nvme_mpath_set_live(ns);
640 }
641 
642 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
643 		struct nvme_ana_group_desc *desc, void *data)
644 {
645 	u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
646 	unsigned *nr_change_groups = data;
647 	struct nvme_ns *ns;
648 
649 	dev_dbg(ctrl->device, "ANA group %d: %s.\n",
650 			le32_to_cpu(desc->grpid),
651 			nvme_ana_state_names[desc->state]);
652 
653 	if (desc->state == NVME_ANA_CHANGE)
654 		(*nr_change_groups)++;
655 
656 	if (!nr_nsids)
657 		return 0;
658 
659 	down_read(&ctrl->namespaces_rwsem);
660 	list_for_each_entry(ns, &ctrl->namespaces, list) {
661 		unsigned nsid;
662 again:
663 		nsid = le32_to_cpu(desc->nsids[n]);
664 		if (ns->head->ns_id < nsid)
665 			continue;
666 		if (ns->head->ns_id == nsid)
667 			nvme_update_ns_ana_state(desc, ns);
668 		if (++n == nr_nsids)
669 			break;
670 		if (ns->head->ns_id > nsid)
671 			goto again;
672 	}
673 	up_read(&ctrl->namespaces_rwsem);
674 	return 0;
675 }
676 
677 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
678 {
679 	u32 nr_change_groups = 0;
680 	int error;
681 
682 	mutex_lock(&ctrl->ana_lock);
683 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
684 			ctrl->ana_log_buf, ctrl->ana_log_size, 0);
685 	if (error) {
686 		dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
687 		goto out_unlock;
688 	}
689 
690 	error = nvme_parse_ana_log(ctrl, &nr_change_groups,
691 			nvme_update_ana_state);
692 	if (error)
693 		goto out_unlock;
694 
695 	/*
696 	 * In theory we should have an ANATT timer per group as they might enter
697 	 * the change state at different times.  But that is a lot of overhead
698 	 * just to protect against a target that keeps entering new changes
699 	 * states while never finishing previous ones.  But we'll still
700 	 * eventually time out once all groups are in change state, so this
701 	 * isn't a big deal.
702 	 *
703 	 * We also double the ANATT value to provide some slack for transports
704 	 * or AEN processing overhead.
705 	 */
706 	if (nr_change_groups)
707 		mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
708 	else
709 		del_timer_sync(&ctrl->anatt_timer);
710 out_unlock:
711 	mutex_unlock(&ctrl->ana_lock);
712 	return error;
713 }
714 
715 static void nvme_ana_work(struct work_struct *work)
716 {
717 	struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
718 
719 	if (ctrl->state != NVME_CTRL_LIVE)
720 		return;
721 
722 	nvme_read_ana_log(ctrl);
723 }
724 
725 static void nvme_anatt_timeout(struct timer_list *t)
726 {
727 	struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
728 
729 	dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
730 	nvme_reset_ctrl(ctrl);
731 }
732 
733 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
734 {
735 	if (!nvme_ctrl_use_ana(ctrl))
736 		return;
737 	del_timer_sync(&ctrl->anatt_timer);
738 	cancel_work_sync(&ctrl->ana_work);
739 }
740 
741 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store)  \
742 	struct device_attribute subsys_attr_##_name =	\
743 		__ATTR(_name, _mode, _show, _store)
744 
745 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
746 		struct device_attribute *attr, char *buf)
747 {
748 	struct nvme_subsystem *subsys =
749 		container_of(dev, struct nvme_subsystem, dev);
750 
751 	return sysfs_emit(buf, "%s\n",
752 			  nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
753 }
754 
755 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
756 		struct device_attribute *attr, const char *buf, size_t count)
757 {
758 	struct nvme_subsystem *subsys =
759 		container_of(dev, struct nvme_subsystem, dev);
760 	int i;
761 
762 	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
763 		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
764 			WRITE_ONCE(subsys->iopolicy, i);
765 			return count;
766 		}
767 	}
768 
769 	return -EINVAL;
770 }
771 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
772 		      nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
773 
774 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
775 		char *buf)
776 {
777 	return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
778 }
779 DEVICE_ATTR_RO(ana_grpid);
780 
781 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
782 		char *buf)
783 {
784 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
785 
786 	return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
787 }
788 DEVICE_ATTR_RO(ana_state);
789 
790 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
791 		struct nvme_ana_group_desc *desc, void *data)
792 {
793 	struct nvme_ana_group_desc *dst = data;
794 
795 	if (desc->grpid != dst->grpid)
796 		return 0;
797 
798 	*dst = *desc;
799 	return -ENXIO; /* just break out of the loop */
800 }
801 
802 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
803 {
804 	if (nvme_ctrl_use_ana(ns->ctrl)) {
805 		struct nvme_ana_group_desc desc = {
806 			.grpid = id->anagrpid,
807 			.state = 0,
808 		};
809 
810 		mutex_lock(&ns->ctrl->ana_lock);
811 		ns->ana_grpid = le32_to_cpu(id->anagrpid);
812 		nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
813 		mutex_unlock(&ns->ctrl->ana_lock);
814 		if (desc.state) {
815 			/* found the group desc: update */
816 			nvme_update_ns_ana_state(&desc, ns);
817 		} else {
818 			/* group desc not found: trigger a re-read */
819 			set_bit(NVME_NS_ANA_PENDING, &ns->flags);
820 			queue_work(nvme_wq, &ns->ctrl->ana_work);
821 		}
822 	} else {
823 		ns->ana_state = NVME_ANA_OPTIMIZED;
824 		nvme_mpath_set_live(ns);
825 	}
826 
827 	if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
828 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
829 				   ns->head->disk->queue);
830 #ifdef CONFIG_BLK_DEV_ZONED
831 	if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
832 		ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
833 #endif
834 }
835 
836 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
837 {
838 	if (!head->disk)
839 		return;
840 	kblockd_schedule_work(&head->requeue_work);
841 	if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
842 		nvme_cdev_del(&head->cdev, &head->cdev_device);
843 		del_gendisk(head->disk);
844 	}
845 }
846 
847 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
848 {
849 	if (!head->disk)
850 		return;
851 	blk_mark_disk_dead(head->disk);
852 	/* make sure all pending bios are cleaned up */
853 	kblockd_schedule_work(&head->requeue_work);
854 	flush_work(&head->requeue_work);
855 	blk_cleanup_disk(head->disk);
856 }
857 
858 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
859 {
860 	mutex_init(&ctrl->ana_lock);
861 	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
862 	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
863 }
864 
865 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
866 {
867 	size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
868 	size_t ana_log_size;
869 	int error = 0;
870 
871 	/* check if multipath is enabled and we have the capability */
872 	if (!multipath || !ctrl->subsys ||
873 	    !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
874 		return 0;
875 
876 	if (!ctrl->max_namespaces ||
877 	    ctrl->max_namespaces > le32_to_cpu(id->nn)) {
878 		dev_err(ctrl->device,
879 			"Invalid MNAN value %u\n", ctrl->max_namespaces);
880 		return -EINVAL;
881 	}
882 
883 	ctrl->anacap = id->anacap;
884 	ctrl->anatt = id->anatt;
885 	ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
886 	ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
887 
888 	ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
889 		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
890 		ctrl->max_namespaces * sizeof(__le32);
891 	if (ana_log_size > max_transfer_size) {
892 		dev_err(ctrl->device,
893 			"ANA log page size (%zd) larger than MDTS (%zd).\n",
894 			ana_log_size, max_transfer_size);
895 		dev_err(ctrl->device, "disabling ANA support.\n");
896 		goto out_uninit;
897 	}
898 	if (ana_log_size > ctrl->ana_log_size) {
899 		nvme_mpath_stop(ctrl);
900 		nvme_mpath_uninit(ctrl);
901 		ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
902 		if (!ctrl->ana_log_buf)
903 			return -ENOMEM;
904 	}
905 	ctrl->ana_log_size = ana_log_size;
906 	error = nvme_read_ana_log(ctrl);
907 	if (error)
908 		goto out_uninit;
909 	return 0;
910 
911 out_uninit:
912 	nvme_mpath_uninit(ctrl);
913 	return error;
914 }
915 
916 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
917 {
918 	kfree(ctrl->ana_log_buf);
919 	ctrl->ana_log_buf = NULL;
920 	ctrl->ana_log_size = 0;
921 }
922