xref: /openbmc/linux/drivers/nvme/host/multipath.c (revision 943126417891372d56aa3fe46295cbf53db31370)
1 /*
2  * Copyright (c) 2017-2018 Christoph Hellwig.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  */
13 
14 #include <linux/moduleparam.h>
15 #include <trace/events/block.h>
16 #include "nvme.h"
17 
18 static bool multipath = true;
19 module_param(multipath, bool, 0444);
20 MODULE_PARM_DESC(multipath,
21 	"turn on native support for multiple controllers per subsystem");
22 
23 inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
24 {
25 	return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
26 }
27 
28 /*
29  * If multipathing is enabled we need to always use the subsystem instance
30  * number for numbering our devices to avoid conflicts between subsystems that
31  * have multiple controllers and thus use the multipath-aware subsystem node
32  * and those that have a single controller and use the controller node
33  * directly.
34  */
35 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
36 			struct nvme_ctrl *ctrl, int *flags)
37 {
38 	if (!multipath) {
39 		sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
40 	} else if (ns->head->disk) {
41 		sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
42 				ctrl->cntlid, ns->head->instance);
43 		*flags = GENHD_FL_HIDDEN;
44 	} else {
45 		sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
46 				ns->head->instance);
47 	}
48 }
49 
50 void nvme_failover_req(struct request *req)
51 {
52 	struct nvme_ns *ns = req->q->queuedata;
53 	u16 status = nvme_req(req)->status;
54 	unsigned long flags;
55 
56 	spin_lock_irqsave(&ns->head->requeue_lock, flags);
57 	blk_steal_bios(&ns->head->requeue_list, req);
58 	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
59 	blk_mq_end_request(req, 0);
60 
61 	switch (status & 0x7ff) {
62 	case NVME_SC_ANA_TRANSITION:
63 	case NVME_SC_ANA_INACCESSIBLE:
64 	case NVME_SC_ANA_PERSISTENT_LOSS:
65 		/*
66 		 * If we got back an ANA error we know the controller is alive,
67 		 * but not ready to serve this namespaces.  The spec suggests
68 		 * we should update our general state here, but due to the fact
69 		 * that the admin and I/O queues are not serialized that is
70 		 * fundamentally racy.  So instead just clear the current path,
71 		 * mark the the path as pending and kick of a re-read of the ANA
72 		 * log page ASAP.
73 		 */
74 		nvme_mpath_clear_current_path(ns);
75 		if (ns->ctrl->ana_log_buf) {
76 			set_bit(NVME_NS_ANA_PENDING, &ns->flags);
77 			queue_work(nvme_wq, &ns->ctrl->ana_work);
78 		}
79 		break;
80 	case NVME_SC_HOST_PATH_ERROR:
81 		/*
82 		 * Temporary transport disruption in talking to the controller.
83 		 * Try to send on a new path.
84 		 */
85 		nvme_mpath_clear_current_path(ns);
86 		break;
87 	default:
88 		/*
89 		 * Reset the controller for any non-ANA error as we don't know
90 		 * what caused the error.
91 		 */
92 		nvme_reset_ctrl(ns->ctrl);
93 		break;
94 	}
95 
96 	kblockd_schedule_work(&ns->head->requeue_work);
97 }
98 
99 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
100 {
101 	struct nvme_ns *ns;
102 
103 	down_read(&ctrl->namespaces_rwsem);
104 	list_for_each_entry(ns, &ctrl->namespaces, list) {
105 		if (ns->head->disk)
106 			kblockd_schedule_work(&ns->head->requeue_work);
107 	}
108 	up_read(&ctrl->namespaces_rwsem);
109 }
110 
111 static const char *nvme_ana_state_names[] = {
112 	[0]				= "invalid state",
113 	[NVME_ANA_OPTIMIZED]		= "optimized",
114 	[NVME_ANA_NONOPTIMIZED]		= "non-optimized",
115 	[NVME_ANA_INACCESSIBLE]		= "inaccessible",
116 	[NVME_ANA_PERSISTENT_LOSS]	= "persistent-loss",
117 	[NVME_ANA_CHANGE]		= "change",
118 };
119 
120 void nvme_mpath_clear_current_path(struct nvme_ns *ns)
121 {
122 	struct nvme_ns_head *head = ns->head;
123 	int node;
124 
125 	if (!head)
126 		return;
127 
128 	for_each_node(node) {
129 		if (ns == rcu_access_pointer(head->current_path[node]))
130 			rcu_assign_pointer(head->current_path[node], NULL);
131 	}
132 }
133 
134 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
135 {
136 	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
137 	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
138 
139 	list_for_each_entry_rcu(ns, &head->list, siblings) {
140 		if (ns->ctrl->state != NVME_CTRL_LIVE ||
141 		    test_bit(NVME_NS_ANA_PENDING, &ns->flags))
142 			continue;
143 
144 		distance = node_distance(node, dev_to_node(ns->ctrl->dev));
145 
146 		switch (ns->ana_state) {
147 		case NVME_ANA_OPTIMIZED:
148 			if (distance < found_distance) {
149 				found_distance = distance;
150 				found = ns;
151 			}
152 			break;
153 		case NVME_ANA_NONOPTIMIZED:
154 			if (distance < fallback_distance) {
155 				fallback_distance = distance;
156 				fallback = ns;
157 			}
158 			break;
159 		default:
160 			break;
161 		}
162 	}
163 
164 	if (!found)
165 		found = fallback;
166 	if (found)
167 		rcu_assign_pointer(head->current_path[node], found);
168 	return found;
169 }
170 
171 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
172 {
173 	return ns->ctrl->state == NVME_CTRL_LIVE &&
174 		ns->ana_state == NVME_ANA_OPTIMIZED;
175 }
176 
177 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
178 {
179 	int node = numa_node_id();
180 	struct nvme_ns *ns;
181 
182 	ns = srcu_dereference(head->current_path[node], &head->srcu);
183 	if (unlikely(!ns || !nvme_path_is_optimized(ns)))
184 		ns = __nvme_find_path(head, node);
185 	return ns;
186 }
187 
188 static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
189 		struct bio *bio)
190 {
191 	struct nvme_ns_head *head = q->queuedata;
192 	struct device *dev = disk_to_dev(head->disk);
193 	struct nvme_ns *ns;
194 	blk_qc_t ret = BLK_QC_T_NONE;
195 	int srcu_idx;
196 
197 	srcu_idx = srcu_read_lock(&head->srcu);
198 	ns = nvme_find_path(head);
199 	if (likely(ns)) {
200 		bio->bi_disk = ns->disk;
201 		bio->bi_opf |= REQ_NVME_MPATH;
202 		trace_block_bio_remap(bio->bi_disk->queue, bio,
203 				      disk_devt(ns->head->disk),
204 				      bio->bi_iter.bi_sector);
205 		ret = direct_make_request(bio);
206 	} else if (!list_empty_careful(&head->list)) {
207 		dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
208 
209 		spin_lock_irq(&head->requeue_lock);
210 		bio_list_add(&head->requeue_list, bio);
211 		spin_unlock_irq(&head->requeue_lock);
212 	} else {
213 		dev_warn_ratelimited(dev, "no path - failing I/O\n");
214 
215 		bio->bi_status = BLK_STS_IOERR;
216 		bio_endio(bio);
217 	}
218 
219 	srcu_read_unlock(&head->srcu, srcu_idx);
220 	return ret;
221 }
222 
223 static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
224 {
225 	struct nvme_ns_head *head = q->queuedata;
226 	struct nvme_ns *ns;
227 	bool found = false;
228 	int srcu_idx;
229 
230 	srcu_idx = srcu_read_lock(&head->srcu);
231 	ns = srcu_dereference(head->current_path[numa_node_id()], &head->srcu);
232 	if (likely(ns && nvme_path_is_optimized(ns)))
233 		found = ns->queue->poll_fn(q, qc);
234 	srcu_read_unlock(&head->srcu, srcu_idx);
235 	return found;
236 }
237 
238 static void nvme_requeue_work(struct work_struct *work)
239 {
240 	struct nvme_ns_head *head =
241 		container_of(work, struct nvme_ns_head, requeue_work);
242 	struct bio *bio, *next;
243 
244 	spin_lock_irq(&head->requeue_lock);
245 	next = bio_list_get(&head->requeue_list);
246 	spin_unlock_irq(&head->requeue_lock);
247 
248 	while ((bio = next) != NULL) {
249 		next = bio->bi_next;
250 		bio->bi_next = NULL;
251 
252 		/*
253 		 * Reset disk to the mpath node and resubmit to select a new
254 		 * path.
255 		 */
256 		bio->bi_disk = head->disk;
257 		generic_make_request(bio);
258 	}
259 }
260 
261 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
262 {
263 	struct request_queue *q;
264 	bool vwc = false;
265 
266 	mutex_init(&head->lock);
267 	bio_list_init(&head->requeue_list);
268 	spin_lock_init(&head->requeue_lock);
269 	INIT_WORK(&head->requeue_work, nvme_requeue_work);
270 
271 	/*
272 	 * Add a multipath node if the subsystems supports multiple controllers.
273 	 * We also do this for private namespaces as the namespace sharing data could
274 	 * change after a rescan.
275 	 */
276 	if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
277 		return 0;
278 
279 	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
280 	if (!q)
281 		goto out;
282 	q->queuedata = head;
283 	blk_queue_make_request(q, nvme_ns_head_make_request);
284 	q->poll_fn = nvme_ns_head_poll;
285 	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
286 	/* set to a default value for 512 until disk is validated */
287 	blk_queue_logical_block_size(q, 512);
288 
289 	/* we need to propagate up the VMC settings */
290 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
291 		vwc = true;
292 	blk_queue_write_cache(q, vwc, vwc);
293 
294 	head->disk = alloc_disk(0);
295 	if (!head->disk)
296 		goto out_cleanup_queue;
297 	head->disk->fops = &nvme_ns_head_ops;
298 	head->disk->private_data = head;
299 	head->disk->queue = q;
300 	head->disk->flags = GENHD_FL_EXT_DEVT;
301 	sprintf(head->disk->disk_name, "nvme%dn%d",
302 			ctrl->subsys->instance, head->instance);
303 	return 0;
304 
305 out_cleanup_queue:
306 	blk_cleanup_queue(q);
307 out:
308 	return -ENOMEM;
309 }
310 
311 static void nvme_mpath_set_live(struct nvme_ns *ns)
312 {
313 	struct nvme_ns_head *head = ns->head;
314 
315 	lockdep_assert_held(&ns->head->lock);
316 
317 	if (!head->disk)
318 		return;
319 
320 	if (!(head->disk->flags & GENHD_FL_UP))
321 		device_add_disk(&head->subsys->dev, head->disk,
322 				nvme_ns_id_attr_groups);
323 
324 	if (nvme_path_is_optimized(ns)) {
325 		int node, srcu_idx;
326 
327 		srcu_idx = srcu_read_lock(&head->srcu);
328 		for_each_node(node)
329 			__nvme_find_path(head, node);
330 		srcu_read_unlock(&head->srcu, srcu_idx);
331 	}
332 
333 	kblockd_schedule_work(&ns->head->requeue_work);
334 }
335 
336 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
337 		int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
338 			void *))
339 {
340 	void *base = ctrl->ana_log_buf;
341 	size_t offset = sizeof(struct nvme_ana_rsp_hdr);
342 	int error, i;
343 
344 	lockdep_assert_held(&ctrl->ana_lock);
345 
346 	for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
347 		struct nvme_ana_group_desc *desc = base + offset;
348 		u32 nr_nsids = le32_to_cpu(desc->nnsids);
349 		size_t nsid_buf_size = nr_nsids * sizeof(__le32);
350 
351 		if (WARN_ON_ONCE(desc->grpid == 0))
352 			return -EINVAL;
353 		if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
354 			return -EINVAL;
355 		if (WARN_ON_ONCE(desc->state == 0))
356 			return -EINVAL;
357 		if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
358 			return -EINVAL;
359 
360 		offset += sizeof(*desc);
361 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
362 			return -EINVAL;
363 
364 		error = cb(ctrl, desc, data);
365 		if (error)
366 			return error;
367 
368 		offset += nsid_buf_size;
369 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
370 			return -EINVAL;
371 	}
372 
373 	return 0;
374 }
375 
376 static inline bool nvme_state_is_live(enum nvme_ana_state state)
377 {
378 	return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
379 }
380 
381 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
382 		struct nvme_ns *ns)
383 {
384 	enum nvme_ana_state old;
385 
386 	mutex_lock(&ns->head->lock);
387 	old = ns->ana_state;
388 	ns->ana_grpid = le32_to_cpu(desc->grpid);
389 	ns->ana_state = desc->state;
390 	clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
391 
392 	if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
393 		nvme_mpath_set_live(ns);
394 	mutex_unlock(&ns->head->lock);
395 }
396 
397 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
398 		struct nvme_ana_group_desc *desc, void *data)
399 {
400 	u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
401 	unsigned *nr_change_groups = data;
402 	struct nvme_ns *ns;
403 
404 	dev_info(ctrl->device, "ANA group %d: %s.\n",
405 			le32_to_cpu(desc->grpid),
406 			nvme_ana_state_names[desc->state]);
407 
408 	if (desc->state == NVME_ANA_CHANGE)
409 		(*nr_change_groups)++;
410 
411 	if (!nr_nsids)
412 		return 0;
413 
414 	down_write(&ctrl->namespaces_rwsem);
415 	list_for_each_entry(ns, &ctrl->namespaces, list) {
416 		if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
417 			continue;
418 		nvme_update_ns_ana_state(desc, ns);
419 		if (++n == nr_nsids)
420 			break;
421 	}
422 	up_write(&ctrl->namespaces_rwsem);
423 	WARN_ON_ONCE(n < nr_nsids);
424 	return 0;
425 }
426 
427 static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
428 {
429 	u32 nr_change_groups = 0;
430 	int error;
431 
432 	mutex_lock(&ctrl->ana_lock);
433 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
434 			groups_only ? NVME_ANA_LOG_RGO : 0,
435 			ctrl->ana_log_buf, ctrl->ana_log_size, 0);
436 	if (error) {
437 		dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
438 		goto out_unlock;
439 	}
440 
441 	error = nvme_parse_ana_log(ctrl, &nr_change_groups,
442 			nvme_update_ana_state);
443 	if (error)
444 		goto out_unlock;
445 
446 	/*
447 	 * In theory we should have an ANATT timer per group as they might enter
448 	 * the change state at different times.  But that is a lot of overhead
449 	 * just to protect against a target that keeps entering new changes
450 	 * states while never finishing previous ones.  But we'll still
451 	 * eventually time out once all groups are in change state, so this
452 	 * isn't a big deal.
453 	 *
454 	 * We also double the ANATT value to provide some slack for transports
455 	 * or AEN processing overhead.
456 	 */
457 	if (nr_change_groups)
458 		mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
459 	else
460 		del_timer_sync(&ctrl->anatt_timer);
461 out_unlock:
462 	mutex_unlock(&ctrl->ana_lock);
463 	return error;
464 }
465 
466 static void nvme_ana_work(struct work_struct *work)
467 {
468 	struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
469 
470 	nvme_read_ana_log(ctrl, false);
471 }
472 
473 static void nvme_anatt_timeout(struct timer_list *t)
474 {
475 	struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
476 
477 	dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
478 	nvme_reset_ctrl(ctrl);
479 }
480 
481 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
482 {
483 	if (!nvme_ctrl_use_ana(ctrl))
484 		return;
485 	del_timer_sync(&ctrl->anatt_timer);
486 	cancel_work_sync(&ctrl->ana_work);
487 }
488 
489 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
490 		char *buf)
491 {
492 	return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
493 }
494 DEVICE_ATTR_RO(ana_grpid);
495 
496 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
497 		char *buf)
498 {
499 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
500 
501 	return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
502 }
503 DEVICE_ATTR_RO(ana_state);
504 
505 static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
506 		struct nvme_ana_group_desc *desc, void *data)
507 {
508 	struct nvme_ns *ns = data;
509 
510 	if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
511 		nvme_update_ns_ana_state(desc, ns);
512 		return -ENXIO; /* just break out of the loop */
513 	}
514 
515 	return 0;
516 }
517 
518 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
519 {
520 	if (nvme_ctrl_use_ana(ns->ctrl)) {
521 		mutex_lock(&ns->ctrl->ana_lock);
522 		ns->ana_grpid = le32_to_cpu(id->anagrpid);
523 		nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
524 		mutex_unlock(&ns->ctrl->ana_lock);
525 	} else {
526 		mutex_lock(&ns->head->lock);
527 		ns->ana_state = NVME_ANA_OPTIMIZED;
528 		nvme_mpath_set_live(ns);
529 		mutex_unlock(&ns->head->lock);
530 	}
531 }
532 
533 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
534 {
535 	if (!head->disk)
536 		return;
537 	if (head->disk->flags & GENHD_FL_UP)
538 		del_gendisk(head->disk);
539 	blk_set_queue_dying(head->disk->queue);
540 	/* make sure all pending bios are cleaned up */
541 	kblockd_schedule_work(&head->requeue_work);
542 	flush_work(&head->requeue_work);
543 	blk_cleanup_queue(head->disk->queue);
544 	put_disk(head->disk);
545 }
546 
547 int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
548 {
549 	int error;
550 
551 	if (!nvme_ctrl_use_ana(ctrl))
552 		return 0;
553 
554 	ctrl->anacap = id->anacap;
555 	ctrl->anatt = id->anatt;
556 	ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
557 	ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
558 
559 	mutex_init(&ctrl->ana_lock);
560 	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
561 	ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
562 		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
563 	if (!(ctrl->anacap & (1 << 6)))
564 		ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
565 
566 	if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
567 		dev_err(ctrl->device,
568 			"ANA log page size (%zd) larger than MDTS (%d).\n",
569 			ctrl->ana_log_size,
570 			ctrl->max_hw_sectors << SECTOR_SHIFT);
571 		dev_err(ctrl->device, "disabling ANA support.\n");
572 		return 0;
573 	}
574 
575 	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
576 	ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
577 	if (!ctrl->ana_log_buf) {
578 		error = -ENOMEM;
579 		goto out;
580 	}
581 
582 	error = nvme_read_ana_log(ctrl, true);
583 	if (error)
584 		goto out_free_ana_log_buf;
585 	return 0;
586 out_free_ana_log_buf:
587 	kfree(ctrl->ana_log_buf);
588 out:
589 	return error;
590 }
591 
592 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
593 {
594 	kfree(ctrl->ana_log_buf);
595 }
596 
597