1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2017-2018 Christoph Hellwig.
4 */
5
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <linux/vmalloc.h>
9 #include <trace/events/block.h>
10 #include "nvme.h"
11
12 bool multipath = true;
13 module_param(multipath, bool, 0444);
14 MODULE_PARM_DESC(multipath,
15 "turn on native support for multiple controllers per subsystem");
16
17 static const char *nvme_iopolicy_names[] = {
18 [NVME_IOPOLICY_NUMA] = "numa",
19 [NVME_IOPOLICY_RR] = "round-robin",
20 };
21
22 static int iopolicy = NVME_IOPOLICY_NUMA;
23
nvme_set_iopolicy(const char * val,const struct kernel_param * kp)24 static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
25 {
26 if (!val)
27 return -EINVAL;
28 if (!strncmp(val, "numa", 4))
29 iopolicy = NVME_IOPOLICY_NUMA;
30 else if (!strncmp(val, "round-robin", 11))
31 iopolicy = NVME_IOPOLICY_RR;
32 else
33 return -EINVAL;
34
35 return 0;
36 }
37
nvme_get_iopolicy(char * buf,const struct kernel_param * kp)38 static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
39 {
40 return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
41 }
42
43 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
44 &iopolicy, 0644);
45 MODULE_PARM_DESC(iopolicy,
46 "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
47
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)48 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
49 {
50 subsys->iopolicy = iopolicy;
51 }
52
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)53 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
54 {
55 struct nvme_ns_head *h;
56
57 lockdep_assert_held(&subsys->lock);
58 list_for_each_entry(h, &subsys->nsheads, entry)
59 if (h->disk)
60 blk_mq_unfreeze_queue(h->disk->queue);
61 }
62
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)63 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
64 {
65 struct nvme_ns_head *h;
66
67 lockdep_assert_held(&subsys->lock);
68 list_for_each_entry(h, &subsys->nsheads, entry)
69 if (h->disk)
70 blk_mq_freeze_queue_wait(h->disk->queue);
71 }
72
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)73 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
74 {
75 struct nvme_ns_head *h;
76
77 lockdep_assert_held(&subsys->lock);
78 list_for_each_entry(h, &subsys->nsheads, entry)
79 if (h->disk)
80 blk_freeze_queue_start(h->disk->queue);
81 }
82
nvme_failover_req(struct request * req)83 void nvme_failover_req(struct request *req)
84 {
85 struct nvme_ns *ns = req->q->queuedata;
86 u16 status = nvme_req(req)->status & 0x7ff;
87 unsigned long flags;
88 struct bio *bio;
89
90 nvme_mpath_clear_current_path(ns);
91
92 /*
93 * If we got back an ANA error, we know the controller is alive but not
94 * ready to serve this namespace. Kick of a re-read of the ANA
95 * information page, and just try any other available path for now.
96 */
97 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
98 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
99 queue_work(nvme_wq, &ns->ctrl->ana_work);
100 }
101
102 spin_lock_irqsave(&ns->head->requeue_lock, flags);
103 for (bio = req->bio; bio; bio = bio->bi_next) {
104 bio_set_dev(bio, ns->head->disk->part0);
105 if (bio->bi_opf & REQ_POLLED) {
106 bio->bi_opf &= ~REQ_POLLED;
107 bio->bi_cookie = BLK_QC_T_NONE;
108 }
109 /*
110 * The alternate request queue that we may end up submitting
111 * the bio to may be frozen temporarily, in this case REQ_NOWAIT
112 * will fail the I/O immediately with EAGAIN to the issuer.
113 * We are not in the issuer context which cannot block. Clear
114 * the flag to avoid spurious EAGAIN I/O failures.
115 */
116 bio->bi_opf &= ~REQ_NOWAIT;
117 }
118 blk_steal_bios(&ns->head->requeue_list, req);
119 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
120
121 nvme_req(req)->status = 0;
122 nvme_end_req(req);
123 kblockd_schedule_work(&ns->head->requeue_work);
124 }
125
nvme_mpath_start_request(struct request * rq)126 void nvme_mpath_start_request(struct request *rq)
127 {
128 struct nvme_ns *ns = rq->q->queuedata;
129 struct gendisk *disk = ns->head->disk;
130
131 if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
132 return;
133
134 nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
135 nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
136 jiffies);
137 }
138 EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
139
nvme_mpath_end_request(struct request * rq)140 void nvme_mpath_end_request(struct request *rq)
141 {
142 struct nvme_ns *ns = rq->q->queuedata;
143
144 if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
145 return;
146 bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
147 blk_rq_bytes(rq) >> SECTOR_SHIFT,
148 nvme_req(rq)->start_time);
149 }
150
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)151 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
152 {
153 struct nvme_ns *ns;
154
155 down_read(&ctrl->namespaces_rwsem);
156 list_for_each_entry(ns, &ctrl->namespaces, list) {
157 if (!ns->head->disk)
158 continue;
159 kblockd_schedule_work(&ns->head->requeue_work);
160 if (ctrl->state == NVME_CTRL_LIVE)
161 disk_uevent(ns->head->disk, KOBJ_CHANGE);
162 }
163 up_read(&ctrl->namespaces_rwsem);
164 }
165
166 static const char *nvme_ana_state_names[] = {
167 [0] = "invalid state",
168 [NVME_ANA_OPTIMIZED] = "optimized",
169 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
170 [NVME_ANA_INACCESSIBLE] = "inaccessible",
171 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
172 [NVME_ANA_CHANGE] = "change",
173 };
174
nvme_mpath_clear_current_path(struct nvme_ns * ns)175 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
176 {
177 struct nvme_ns_head *head = ns->head;
178 bool changed = false;
179 int node;
180
181 if (!head)
182 goto out;
183
184 for_each_node(node) {
185 if (ns == rcu_access_pointer(head->current_path[node])) {
186 rcu_assign_pointer(head->current_path[node], NULL);
187 changed = true;
188 }
189 }
190 out:
191 return changed;
192 }
193
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)194 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
195 {
196 struct nvme_ns *ns;
197
198 down_read(&ctrl->namespaces_rwsem);
199 list_for_each_entry(ns, &ctrl->namespaces, list) {
200 nvme_mpath_clear_current_path(ns);
201 kblockd_schedule_work(&ns->head->requeue_work);
202 }
203 up_read(&ctrl->namespaces_rwsem);
204 }
205
nvme_mpath_revalidate_paths(struct nvme_ns * ns)206 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
207 {
208 struct nvme_ns_head *head = ns->head;
209 sector_t capacity = get_capacity(head->disk);
210 int node;
211 int srcu_idx;
212
213 srcu_idx = srcu_read_lock(&head->srcu);
214 list_for_each_entry_rcu(ns, &head->list, siblings) {
215 if (capacity != get_capacity(ns->disk))
216 clear_bit(NVME_NS_READY, &ns->flags);
217 }
218 srcu_read_unlock(&head->srcu, srcu_idx);
219
220 for_each_node(node)
221 rcu_assign_pointer(head->current_path[node], NULL);
222 kblockd_schedule_work(&head->requeue_work);
223 }
224
nvme_path_is_disabled(struct nvme_ns * ns)225 static bool nvme_path_is_disabled(struct nvme_ns *ns)
226 {
227 /*
228 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
229 * still be able to complete assuming that the controller is connected.
230 * Otherwise it will fail immediately and return to the requeue list.
231 */
232 if (ns->ctrl->state != NVME_CTRL_LIVE &&
233 ns->ctrl->state != NVME_CTRL_DELETING)
234 return true;
235 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
236 !test_bit(NVME_NS_READY, &ns->flags))
237 return true;
238 return false;
239 }
240
__nvme_find_path(struct nvme_ns_head * head,int node)241 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
242 {
243 int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
244 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
245
246 list_for_each_entry_rcu(ns, &head->list, siblings) {
247 if (nvme_path_is_disabled(ns))
248 continue;
249
250 if (ns->ctrl->numa_node != NUMA_NO_NODE &&
251 READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
252 distance = node_distance(node, ns->ctrl->numa_node);
253 else
254 distance = LOCAL_DISTANCE;
255
256 switch (ns->ana_state) {
257 case NVME_ANA_OPTIMIZED:
258 if (distance < found_distance) {
259 found_distance = distance;
260 found = ns;
261 }
262 break;
263 case NVME_ANA_NONOPTIMIZED:
264 if (distance < fallback_distance) {
265 fallback_distance = distance;
266 fallback = ns;
267 }
268 break;
269 default:
270 break;
271 }
272 }
273
274 if (!found)
275 found = fallback;
276 if (found)
277 rcu_assign_pointer(head->current_path[node], found);
278 return found;
279 }
280
nvme_next_ns(struct nvme_ns_head * head,struct nvme_ns * ns)281 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
282 struct nvme_ns *ns)
283 {
284 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
285 siblings);
286 if (ns)
287 return ns;
288 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
289 }
290
nvme_round_robin_path(struct nvme_ns_head * head,int node,struct nvme_ns * old)291 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
292 int node, struct nvme_ns *old)
293 {
294 struct nvme_ns *ns, *found = NULL;
295
296 if (list_is_singular(&head->list)) {
297 if (nvme_path_is_disabled(old))
298 return NULL;
299 return old;
300 }
301
302 for (ns = nvme_next_ns(head, old);
303 ns && ns != old;
304 ns = nvme_next_ns(head, ns)) {
305 if (nvme_path_is_disabled(ns))
306 continue;
307
308 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
309 found = ns;
310 goto out;
311 }
312 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
313 found = ns;
314 }
315
316 /*
317 * The loop above skips the current path for round-robin semantics.
318 * Fall back to the current path if either:
319 * - no other optimized path found and current is optimized,
320 * - no other usable path found and current is usable.
321 */
322 if (!nvme_path_is_disabled(old) &&
323 (old->ana_state == NVME_ANA_OPTIMIZED ||
324 (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
325 return old;
326
327 if (!found)
328 return NULL;
329 out:
330 rcu_assign_pointer(head->current_path[node], found);
331 return found;
332 }
333
nvme_path_is_optimized(struct nvme_ns * ns)334 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
335 {
336 return ns->ctrl->state == NVME_CTRL_LIVE &&
337 ns->ana_state == NVME_ANA_OPTIMIZED;
338 }
339
nvme_find_path(struct nvme_ns_head * head)340 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
341 {
342 int node = numa_node_id();
343 struct nvme_ns *ns;
344
345 ns = srcu_dereference(head->current_path[node], &head->srcu);
346 if (unlikely(!ns))
347 return __nvme_find_path(head, node);
348
349 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
350 return nvme_round_robin_path(head, node, ns);
351 if (unlikely(!nvme_path_is_optimized(ns)))
352 return __nvme_find_path(head, node);
353 return ns;
354 }
355
nvme_available_path(struct nvme_ns_head * head)356 static bool nvme_available_path(struct nvme_ns_head *head)
357 {
358 struct nvme_ns *ns;
359
360 list_for_each_entry_rcu(ns, &head->list, siblings) {
361 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
362 continue;
363 switch (ns->ctrl->state) {
364 case NVME_CTRL_LIVE:
365 case NVME_CTRL_RESETTING:
366 case NVME_CTRL_CONNECTING:
367 /* fallthru */
368 return true;
369 default:
370 break;
371 }
372 }
373 return false;
374 }
375
nvme_ns_head_submit_bio(struct bio * bio)376 static void nvme_ns_head_submit_bio(struct bio *bio)
377 {
378 struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
379 struct device *dev = disk_to_dev(head->disk);
380 struct nvme_ns *ns;
381 int srcu_idx;
382
383 /*
384 * The namespace might be going away and the bio might be moved to a
385 * different queue via blk_steal_bios(), so we need to use the bio_split
386 * pool from the original queue to allocate the bvecs from.
387 */
388 bio = bio_split_to_limits(bio);
389 if (!bio)
390 return;
391
392 srcu_idx = srcu_read_lock(&head->srcu);
393 ns = nvme_find_path(head);
394 if (likely(ns)) {
395 bio_set_dev(bio, ns->disk->part0);
396 bio->bi_opf |= REQ_NVME_MPATH;
397 trace_block_bio_remap(bio, disk_devt(ns->head->disk),
398 bio->bi_iter.bi_sector);
399 submit_bio_noacct(bio);
400 } else if (nvme_available_path(head)) {
401 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
402
403 spin_lock_irq(&head->requeue_lock);
404 bio_list_add(&head->requeue_list, bio);
405 spin_unlock_irq(&head->requeue_lock);
406 } else {
407 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
408
409 bio_io_error(bio);
410 }
411
412 srcu_read_unlock(&head->srcu, srcu_idx);
413 }
414
nvme_ns_head_open(struct gendisk * disk,blk_mode_t mode)415 static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode)
416 {
417 if (!nvme_tryget_ns_head(disk->private_data))
418 return -ENXIO;
419 return 0;
420 }
421
nvme_ns_head_release(struct gendisk * disk)422 static void nvme_ns_head_release(struct gendisk *disk)
423 {
424 nvme_put_ns_head(disk->private_data);
425 }
426
427 #ifdef CONFIG_BLK_DEV_ZONED
nvme_ns_head_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)428 static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
429 unsigned int nr_zones, report_zones_cb cb, void *data)
430 {
431 struct nvme_ns_head *head = disk->private_data;
432 struct nvme_ns *ns;
433 int srcu_idx, ret = -EWOULDBLOCK;
434
435 srcu_idx = srcu_read_lock(&head->srcu);
436 ns = nvme_find_path(head);
437 if (ns)
438 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
439 srcu_read_unlock(&head->srcu, srcu_idx);
440 return ret;
441 }
442 #else
443 #define nvme_ns_head_report_zones NULL
444 #endif /* CONFIG_BLK_DEV_ZONED */
445
446 const struct block_device_operations nvme_ns_head_ops = {
447 .owner = THIS_MODULE,
448 .submit_bio = nvme_ns_head_submit_bio,
449 .open = nvme_ns_head_open,
450 .release = nvme_ns_head_release,
451 .ioctl = nvme_ns_head_ioctl,
452 .compat_ioctl = blkdev_compat_ptr_ioctl,
453 .getgeo = nvme_getgeo,
454 .report_zones = nvme_ns_head_report_zones,
455 .pr_ops = &nvme_pr_ops,
456 };
457
cdev_to_ns_head(struct cdev * cdev)458 static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
459 {
460 return container_of(cdev, struct nvme_ns_head, cdev);
461 }
462
nvme_ns_head_chr_open(struct inode * inode,struct file * file)463 static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
464 {
465 if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
466 return -ENXIO;
467 return 0;
468 }
469
nvme_ns_head_chr_release(struct inode * inode,struct file * file)470 static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
471 {
472 nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
473 return 0;
474 }
475
476 static const struct file_operations nvme_ns_head_chr_fops = {
477 .owner = THIS_MODULE,
478 .open = nvme_ns_head_chr_open,
479 .release = nvme_ns_head_chr_release,
480 .unlocked_ioctl = nvme_ns_head_chr_ioctl,
481 .compat_ioctl = compat_ptr_ioctl,
482 .uring_cmd = nvme_ns_head_chr_uring_cmd,
483 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
484 };
485
nvme_add_ns_head_cdev(struct nvme_ns_head * head)486 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
487 {
488 int ret;
489
490 head->cdev_device.parent = &head->subsys->dev;
491 ret = dev_set_name(&head->cdev_device, "ng%dn%d",
492 head->subsys->instance, head->instance);
493 if (ret)
494 return ret;
495 ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
496 &nvme_ns_head_chr_fops, THIS_MODULE);
497 return ret;
498 }
499
nvme_requeue_work(struct work_struct * work)500 static void nvme_requeue_work(struct work_struct *work)
501 {
502 struct nvme_ns_head *head =
503 container_of(work, struct nvme_ns_head, requeue_work);
504 struct bio *bio, *next;
505
506 spin_lock_irq(&head->requeue_lock);
507 next = bio_list_get(&head->requeue_list);
508 spin_unlock_irq(&head->requeue_lock);
509
510 while ((bio = next) != NULL) {
511 next = bio->bi_next;
512 bio->bi_next = NULL;
513
514 submit_bio_noacct(bio);
515 }
516 }
517
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)518 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
519 {
520 bool vwc = false;
521
522 mutex_init(&head->lock);
523 bio_list_init(&head->requeue_list);
524 spin_lock_init(&head->requeue_lock);
525 INIT_WORK(&head->requeue_work, nvme_requeue_work);
526
527 /*
528 * Add a multipath node if the subsystems supports multiple controllers.
529 * We also do this for private namespaces as the namespace sharing flag
530 * could change after a rescan.
531 */
532 if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
533 !nvme_is_unique_nsid(ctrl, head) || !multipath)
534 return 0;
535
536 head->disk = blk_alloc_disk(ctrl->numa_node);
537 if (!head->disk)
538 return -ENOMEM;
539 head->disk->fops = &nvme_ns_head_ops;
540 head->disk->private_data = head;
541 sprintf(head->disk->disk_name, "nvme%dn%d",
542 ctrl->subsys->instance, head->instance);
543
544 blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
545 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
546 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
547 /*
548 * This assumes all controllers that refer to a namespace either
549 * support poll queues or not. That is not a strict guarantee,
550 * but if the assumption is wrong the effect is only suboptimal
551 * performance but not correctness problem.
552 */
553 if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
554 ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
555 blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
556
557 /* set to a default value of 512 until the disk is validated */
558 blk_queue_logical_block_size(head->disk->queue, 512);
559 blk_set_stacking_limits(&head->disk->queue->limits);
560 blk_queue_dma_alignment(head->disk->queue, 3);
561
562 /* we need to propagate up the VMC settings */
563 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
564 vwc = true;
565 blk_queue_write_cache(head->disk->queue, vwc, vwc);
566 return 0;
567 }
568
nvme_mpath_set_live(struct nvme_ns * ns)569 static void nvme_mpath_set_live(struct nvme_ns *ns)
570 {
571 struct nvme_ns_head *head = ns->head;
572 int rc;
573
574 if (!head->disk)
575 return;
576
577 /*
578 * test_and_set_bit() is used because it is protecting against two nvme
579 * paths simultaneously calling device_add_disk() on the same namespace
580 * head.
581 */
582 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
583 rc = device_add_disk(&head->subsys->dev, head->disk,
584 nvme_ns_id_attr_groups);
585 if (rc) {
586 clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
587 return;
588 }
589 nvme_add_ns_head_cdev(head);
590 }
591
592 mutex_lock(&head->lock);
593 if (nvme_path_is_optimized(ns)) {
594 int node, srcu_idx;
595
596 srcu_idx = srcu_read_lock(&head->srcu);
597 for_each_node(node)
598 __nvme_find_path(head, node);
599 srcu_read_unlock(&head->srcu, srcu_idx);
600 }
601 mutex_unlock(&head->lock);
602
603 synchronize_srcu(&head->srcu);
604 kblockd_schedule_work(&head->requeue_work);
605 }
606
nvme_parse_ana_log(struct nvme_ctrl * ctrl,void * data,int (* cb)(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc *,void *))607 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
608 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
609 void *))
610 {
611 void *base = ctrl->ana_log_buf;
612 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
613 int error, i;
614
615 lockdep_assert_held(&ctrl->ana_lock);
616
617 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
618 struct nvme_ana_group_desc *desc = base + offset;
619 u32 nr_nsids;
620 size_t nsid_buf_size;
621
622 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
623 return -EINVAL;
624
625 nr_nsids = le32_to_cpu(desc->nnsids);
626 nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
627
628 if (WARN_ON_ONCE(desc->grpid == 0))
629 return -EINVAL;
630 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
631 return -EINVAL;
632 if (WARN_ON_ONCE(desc->state == 0))
633 return -EINVAL;
634 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
635 return -EINVAL;
636
637 offset += sizeof(*desc);
638 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
639 return -EINVAL;
640
641 error = cb(ctrl, desc, data);
642 if (error)
643 return error;
644
645 offset += nsid_buf_size;
646 }
647
648 return 0;
649 }
650
nvme_state_is_live(enum nvme_ana_state state)651 static inline bool nvme_state_is_live(enum nvme_ana_state state)
652 {
653 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
654 }
655
nvme_update_ns_ana_state(struct nvme_ana_group_desc * desc,struct nvme_ns * ns)656 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
657 struct nvme_ns *ns)
658 {
659 ns->ana_grpid = le32_to_cpu(desc->grpid);
660 ns->ana_state = desc->state;
661 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
662 /*
663 * nvme_mpath_set_live() will trigger I/O to the multipath path device
664 * and in turn to this path device. However we cannot accept this I/O
665 * if the controller is not live. This may deadlock if called from
666 * nvme_mpath_init_identify() and the ctrl will never complete
667 * initialization, preventing I/O from completing. For this case we
668 * will reprocess the ANA log page in nvme_mpath_update() once the
669 * controller is ready.
670 */
671 if (nvme_state_is_live(ns->ana_state) &&
672 ns->ctrl->state == NVME_CTRL_LIVE)
673 nvme_mpath_set_live(ns);
674 }
675
nvme_update_ana_state(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)676 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
677 struct nvme_ana_group_desc *desc, void *data)
678 {
679 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
680 unsigned *nr_change_groups = data;
681 struct nvme_ns *ns;
682
683 dev_dbg(ctrl->device, "ANA group %d: %s.\n",
684 le32_to_cpu(desc->grpid),
685 nvme_ana_state_names[desc->state]);
686
687 if (desc->state == NVME_ANA_CHANGE)
688 (*nr_change_groups)++;
689
690 if (!nr_nsids)
691 return 0;
692
693 down_read(&ctrl->namespaces_rwsem);
694 list_for_each_entry(ns, &ctrl->namespaces, list) {
695 unsigned nsid;
696 again:
697 nsid = le32_to_cpu(desc->nsids[n]);
698 if (ns->head->ns_id < nsid)
699 continue;
700 if (ns->head->ns_id == nsid)
701 nvme_update_ns_ana_state(desc, ns);
702 if (++n == nr_nsids)
703 break;
704 if (ns->head->ns_id > nsid)
705 goto again;
706 }
707 up_read(&ctrl->namespaces_rwsem);
708 return 0;
709 }
710
nvme_read_ana_log(struct nvme_ctrl * ctrl)711 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
712 {
713 u32 nr_change_groups = 0;
714 int error;
715
716 mutex_lock(&ctrl->ana_lock);
717 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
718 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
719 if (error) {
720 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
721 goto out_unlock;
722 }
723
724 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
725 nvme_update_ana_state);
726 if (error)
727 goto out_unlock;
728
729 /*
730 * In theory we should have an ANATT timer per group as they might enter
731 * the change state at different times. But that is a lot of overhead
732 * just to protect against a target that keeps entering new changes
733 * states while never finishing previous ones. But we'll still
734 * eventually time out once all groups are in change state, so this
735 * isn't a big deal.
736 *
737 * We also double the ANATT value to provide some slack for transports
738 * or AEN processing overhead.
739 */
740 if (nr_change_groups)
741 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
742 else
743 del_timer_sync(&ctrl->anatt_timer);
744 out_unlock:
745 mutex_unlock(&ctrl->ana_lock);
746 return error;
747 }
748
nvme_ana_work(struct work_struct * work)749 static void nvme_ana_work(struct work_struct *work)
750 {
751 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
752
753 if (ctrl->state != NVME_CTRL_LIVE)
754 return;
755
756 nvme_read_ana_log(ctrl);
757 }
758
nvme_mpath_update(struct nvme_ctrl * ctrl)759 void nvme_mpath_update(struct nvme_ctrl *ctrl)
760 {
761 u32 nr_change_groups = 0;
762
763 if (!ctrl->ana_log_buf)
764 return;
765
766 mutex_lock(&ctrl->ana_lock);
767 nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
768 mutex_unlock(&ctrl->ana_lock);
769 }
770
nvme_anatt_timeout(struct timer_list * t)771 static void nvme_anatt_timeout(struct timer_list *t)
772 {
773 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
774
775 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
776 nvme_reset_ctrl(ctrl);
777 }
778
nvme_mpath_stop(struct nvme_ctrl * ctrl)779 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
780 {
781 if (!nvme_ctrl_use_ana(ctrl))
782 return;
783 del_timer_sync(&ctrl->anatt_timer);
784 cancel_work_sync(&ctrl->ana_work);
785 }
786
787 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
788 struct device_attribute subsys_attr_##_name = \
789 __ATTR(_name, _mode, _show, _store)
790
nvme_subsys_iopolicy_show(struct device * dev,struct device_attribute * attr,char * buf)791 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
792 struct device_attribute *attr, char *buf)
793 {
794 struct nvme_subsystem *subsys =
795 container_of(dev, struct nvme_subsystem, dev);
796
797 return sysfs_emit(buf, "%s\n",
798 nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
799 }
800
nvme_subsys_iopolicy_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)801 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
802 struct device_attribute *attr, const char *buf, size_t count)
803 {
804 struct nvme_subsystem *subsys =
805 container_of(dev, struct nvme_subsystem, dev);
806 int i;
807
808 for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
809 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
810 WRITE_ONCE(subsys->iopolicy, i);
811 return count;
812 }
813 }
814
815 return -EINVAL;
816 }
817 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
818 nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
819
ana_grpid_show(struct device * dev,struct device_attribute * attr,char * buf)820 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
821 char *buf)
822 {
823 return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
824 }
825 DEVICE_ATTR_RO(ana_grpid);
826
ana_state_show(struct device * dev,struct device_attribute * attr,char * buf)827 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
828 char *buf)
829 {
830 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
831
832 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
833 }
834 DEVICE_ATTR_RO(ana_state);
835
nvme_lookup_ana_group_desc(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)836 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
837 struct nvme_ana_group_desc *desc, void *data)
838 {
839 struct nvme_ana_group_desc *dst = data;
840
841 if (desc->grpid != dst->grpid)
842 return 0;
843
844 *dst = *desc;
845 return -ENXIO; /* just break out of the loop */
846 }
847
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)848 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
849 {
850 if (nvme_ctrl_use_ana(ns->ctrl)) {
851 struct nvme_ana_group_desc desc = {
852 .grpid = anagrpid,
853 .state = 0,
854 };
855
856 mutex_lock(&ns->ctrl->ana_lock);
857 ns->ana_grpid = le32_to_cpu(anagrpid);
858 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
859 mutex_unlock(&ns->ctrl->ana_lock);
860 if (desc.state) {
861 /* found the group desc: update */
862 nvme_update_ns_ana_state(&desc, ns);
863 } else {
864 /* group desc not found: trigger a re-read */
865 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
866 queue_work(nvme_wq, &ns->ctrl->ana_work);
867 }
868 } else {
869 ns->ana_state = NVME_ANA_OPTIMIZED;
870 nvme_mpath_set_live(ns);
871 }
872
873 if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
874 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
875 ns->head->disk->queue);
876 #ifdef CONFIG_BLK_DEV_ZONED
877 if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
878 ns->head->disk->nr_zones = ns->disk->nr_zones;
879 #endif
880 }
881
nvme_mpath_shutdown_disk(struct nvme_ns_head * head)882 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
883 {
884 if (!head->disk)
885 return;
886 kblockd_schedule_work(&head->requeue_work);
887 if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
888 nvme_cdev_del(&head->cdev, &head->cdev_device);
889 del_gendisk(head->disk);
890 }
891 }
892
nvme_mpath_remove_disk(struct nvme_ns_head * head)893 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
894 {
895 if (!head->disk)
896 return;
897 /* make sure all pending bios are cleaned up */
898 kblockd_schedule_work(&head->requeue_work);
899 flush_work(&head->requeue_work);
900 put_disk(head->disk);
901 }
902
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)903 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
904 {
905 mutex_init(&ctrl->ana_lock);
906 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
907 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
908 }
909
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)910 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
911 {
912 size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
913 size_t ana_log_size;
914 int error = 0;
915
916 /* check if multipath is enabled and we have the capability */
917 if (!multipath || !ctrl->subsys ||
918 !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
919 return 0;
920
921 if (!ctrl->max_namespaces ||
922 ctrl->max_namespaces > le32_to_cpu(id->nn)) {
923 dev_err(ctrl->device,
924 "Invalid MNAN value %u\n", ctrl->max_namespaces);
925 return -EINVAL;
926 }
927
928 ctrl->anacap = id->anacap;
929 ctrl->anatt = id->anatt;
930 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
931 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
932
933 ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
934 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
935 ctrl->max_namespaces * sizeof(__le32);
936 if (ana_log_size > max_transfer_size) {
937 dev_err(ctrl->device,
938 "ANA log page size (%zd) larger than MDTS (%zd).\n",
939 ana_log_size, max_transfer_size);
940 dev_err(ctrl->device, "disabling ANA support.\n");
941 goto out_uninit;
942 }
943 if (ana_log_size > ctrl->ana_log_size) {
944 nvme_mpath_stop(ctrl);
945 nvme_mpath_uninit(ctrl);
946 ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
947 if (!ctrl->ana_log_buf)
948 return -ENOMEM;
949 }
950 ctrl->ana_log_size = ana_log_size;
951 error = nvme_read_ana_log(ctrl);
952 if (error)
953 goto out_uninit;
954 return 0;
955
956 out_uninit:
957 nvme_mpath_uninit(ctrl);
958 return error;
959 }
960
nvme_mpath_uninit(struct nvme_ctrl * ctrl)961 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
962 {
963 kvfree(ctrl->ana_log_buf);
964 ctrl->ana_log_buf = NULL;
965 ctrl->ana_log_size = 0;
966 }
967