xref: /openbmc/linux/drivers/nvme/host/core.c (revision 103881e6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6 
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/compat.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/hdreg.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/pr.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
24 
25 #include "nvme.h"
26 #include "fabrics.h"
27 #include <linux/nvme-auth.h>
28 
29 #define CREATE_TRACE_POINTS
30 #include "trace.h"
31 
32 #define NVME_MINORS		(1U << MINORBITS)
33 
34 struct nvme_ns_info {
35 	struct nvme_ns_ids ids;
36 	u32 nsid;
37 	__le32 anagrpid;
38 	bool is_shared;
39 	bool is_readonly;
40 	bool is_ready;
41 	bool is_removed;
42 };
43 
44 unsigned int admin_timeout = 60;
45 module_param(admin_timeout, uint, 0644);
46 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
47 EXPORT_SYMBOL_GPL(admin_timeout);
48 
49 unsigned int nvme_io_timeout = 30;
50 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
51 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
52 EXPORT_SYMBOL_GPL(nvme_io_timeout);
53 
54 static unsigned char shutdown_timeout = 5;
55 module_param(shutdown_timeout, byte, 0644);
56 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
57 
58 static u8 nvme_max_retries = 5;
59 module_param_named(max_retries, nvme_max_retries, byte, 0644);
60 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
61 
62 static unsigned long default_ps_max_latency_us = 100000;
63 module_param(default_ps_max_latency_us, ulong, 0644);
64 MODULE_PARM_DESC(default_ps_max_latency_us,
65 		 "max power saving latency for new devices; use PM QOS to change per device");
66 
67 static bool force_apst;
68 module_param(force_apst, bool, 0644);
69 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
70 
71 static unsigned long apst_primary_timeout_ms = 100;
72 module_param(apst_primary_timeout_ms, ulong, 0644);
73 MODULE_PARM_DESC(apst_primary_timeout_ms,
74 	"primary APST timeout in ms");
75 
76 static unsigned long apst_secondary_timeout_ms = 2000;
77 module_param(apst_secondary_timeout_ms, ulong, 0644);
78 MODULE_PARM_DESC(apst_secondary_timeout_ms,
79 	"secondary APST timeout in ms");
80 
81 static unsigned long apst_primary_latency_tol_us = 15000;
82 module_param(apst_primary_latency_tol_us, ulong, 0644);
83 MODULE_PARM_DESC(apst_primary_latency_tol_us,
84 	"primary APST latency tolerance in us");
85 
86 static unsigned long apst_secondary_latency_tol_us = 100000;
87 module_param(apst_secondary_latency_tol_us, ulong, 0644);
88 MODULE_PARM_DESC(apst_secondary_latency_tol_us,
89 	"secondary APST latency tolerance in us");
90 
91 /*
92  * nvme_wq - hosts nvme related works that are not reset or delete
93  * nvme_reset_wq - hosts nvme reset works
94  * nvme_delete_wq - hosts nvme delete works
95  *
96  * nvme_wq will host works such as scan, aen handling, fw activation,
97  * keep-alive, periodic reconnects etc. nvme_reset_wq
98  * runs reset works which also flush works hosted on nvme_wq for
99  * serialization purposes. nvme_delete_wq host controller deletion
100  * works which flush reset works for serialization.
101  */
102 struct workqueue_struct *nvme_wq;
103 EXPORT_SYMBOL_GPL(nvme_wq);
104 
105 struct workqueue_struct *nvme_reset_wq;
106 EXPORT_SYMBOL_GPL(nvme_reset_wq);
107 
108 struct workqueue_struct *nvme_delete_wq;
109 EXPORT_SYMBOL_GPL(nvme_delete_wq);
110 
111 static LIST_HEAD(nvme_subsystems);
112 static DEFINE_MUTEX(nvme_subsystems_lock);
113 
114 static DEFINE_IDA(nvme_instance_ida);
115 static dev_t nvme_ctrl_base_chr_devt;
116 static struct class *nvme_class;
117 static struct class *nvme_subsys_class;
118 
119 static DEFINE_IDA(nvme_ns_chr_minor_ida);
120 static dev_t nvme_ns_chr_devt;
121 static struct class *nvme_ns_chr_class;
122 
123 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
124 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
125 					   unsigned nsid);
126 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
127 				   struct nvme_command *cmd);
128 
129 void nvme_queue_scan(struct nvme_ctrl *ctrl)
130 {
131 	/*
132 	 * Only new queue scan work when admin and IO queues are both alive
133 	 */
134 	if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
135 		queue_work(nvme_wq, &ctrl->scan_work);
136 }
137 
138 /*
139  * Use this function to proceed with scheduling reset_work for a controller
140  * that had previously been set to the resetting state. This is intended for
141  * code paths that can't be interrupted by other reset attempts. A hot removal
142  * may prevent this from succeeding.
143  */
144 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
145 {
146 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
147 		return -EBUSY;
148 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
149 		return -EBUSY;
150 	return 0;
151 }
152 EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
153 
154 static void nvme_failfast_work(struct work_struct *work)
155 {
156 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
157 			struct nvme_ctrl, failfast_work);
158 
159 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
160 		return;
161 
162 	set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
163 	dev_info(ctrl->device, "failfast expired\n");
164 	nvme_kick_requeue_lists(ctrl);
165 }
166 
167 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
168 {
169 	if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
170 		return;
171 
172 	schedule_delayed_work(&ctrl->failfast_work,
173 			      ctrl->opts->fast_io_fail_tmo * HZ);
174 }
175 
176 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
177 {
178 	if (!ctrl->opts)
179 		return;
180 
181 	cancel_delayed_work_sync(&ctrl->failfast_work);
182 	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
183 }
184 
185 
186 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
187 {
188 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
189 		return -EBUSY;
190 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
191 		return -EBUSY;
192 	return 0;
193 }
194 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
195 
196 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
197 {
198 	int ret;
199 
200 	ret = nvme_reset_ctrl(ctrl);
201 	if (!ret) {
202 		flush_work(&ctrl->reset_work);
203 		if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
204 			ret = -ENETRESET;
205 	}
206 
207 	return ret;
208 }
209 
210 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
211 {
212 	dev_info(ctrl->device,
213 		 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
214 
215 	flush_work(&ctrl->reset_work);
216 	nvme_stop_ctrl(ctrl);
217 	nvme_remove_namespaces(ctrl);
218 	ctrl->ops->delete_ctrl(ctrl);
219 	nvme_uninit_ctrl(ctrl);
220 }
221 
222 static void nvme_delete_ctrl_work(struct work_struct *work)
223 {
224 	struct nvme_ctrl *ctrl =
225 		container_of(work, struct nvme_ctrl, delete_work);
226 
227 	nvme_do_delete_ctrl(ctrl);
228 }
229 
230 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
231 {
232 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
233 		return -EBUSY;
234 	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
235 		return -EBUSY;
236 	return 0;
237 }
238 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
239 
240 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
241 {
242 	/*
243 	 * Keep a reference until nvme_do_delete_ctrl() complete,
244 	 * since ->delete_ctrl can free the controller.
245 	 */
246 	nvme_get_ctrl(ctrl);
247 	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
248 		nvme_do_delete_ctrl(ctrl);
249 	nvme_put_ctrl(ctrl);
250 }
251 
252 static blk_status_t nvme_error_status(u16 status)
253 {
254 	switch (status & 0x7ff) {
255 	case NVME_SC_SUCCESS:
256 		return BLK_STS_OK;
257 	case NVME_SC_CAP_EXCEEDED:
258 		return BLK_STS_NOSPC;
259 	case NVME_SC_LBA_RANGE:
260 	case NVME_SC_CMD_INTERRUPTED:
261 	case NVME_SC_NS_NOT_READY:
262 		return BLK_STS_TARGET;
263 	case NVME_SC_BAD_ATTRIBUTES:
264 	case NVME_SC_ONCS_NOT_SUPPORTED:
265 	case NVME_SC_INVALID_OPCODE:
266 	case NVME_SC_INVALID_FIELD:
267 	case NVME_SC_INVALID_NS:
268 		return BLK_STS_NOTSUPP;
269 	case NVME_SC_WRITE_FAULT:
270 	case NVME_SC_READ_ERROR:
271 	case NVME_SC_UNWRITTEN_BLOCK:
272 	case NVME_SC_ACCESS_DENIED:
273 	case NVME_SC_READ_ONLY:
274 	case NVME_SC_COMPARE_FAILED:
275 		return BLK_STS_MEDIUM;
276 	case NVME_SC_GUARD_CHECK:
277 	case NVME_SC_APPTAG_CHECK:
278 	case NVME_SC_REFTAG_CHECK:
279 	case NVME_SC_INVALID_PI:
280 		return BLK_STS_PROTECTION;
281 	case NVME_SC_RESERVATION_CONFLICT:
282 		return BLK_STS_RESV_CONFLICT;
283 	case NVME_SC_HOST_PATH_ERROR:
284 		return BLK_STS_TRANSPORT;
285 	case NVME_SC_ZONE_TOO_MANY_ACTIVE:
286 		return BLK_STS_ZONE_ACTIVE_RESOURCE;
287 	case NVME_SC_ZONE_TOO_MANY_OPEN:
288 		return BLK_STS_ZONE_OPEN_RESOURCE;
289 	default:
290 		return BLK_STS_IOERR;
291 	}
292 }
293 
294 static void nvme_retry_req(struct request *req)
295 {
296 	unsigned long delay = 0;
297 	u16 crd;
298 
299 	/* The mask and shift result must be <= 3 */
300 	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
301 	if (crd)
302 		delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
303 
304 	nvme_req(req)->retries++;
305 	blk_mq_requeue_request(req, false);
306 	blk_mq_delay_kick_requeue_list(req->q, delay);
307 }
308 
309 static void nvme_log_error(struct request *req)
310 {
311 	struct nvme_ns *ns = req->q->queuedata;
312 	struct nvme_request *nr = nvme_req(req);
313 
314 	if (ns) {
315 		pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
316 		       ns->disk ? ns->disk->disk_name : "?",
317 		       nvme_get_opcode_str(nr->cmd->common.opcode),
318 		       nr->cmd->common.opcode,
319 		       (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
320 		       (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
321 		       nvme_get_error_status_str(nr->status),
322 		       nr->status >> 8 & 7,	/* Status Code Type */
323 		       nr->status & 0xff,	/* Status Code */
324 		       nr->status & NVME_SC_MORE ? "MORE " : "",
325 		       nr->status & NVME_SC_DNR  ? "DNR "  : "");
326 		return;
327 	}
328 
329 	pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
330 			   dev_name(nr->ctrl->device),
331 			   nvme_get_admin_opcode_str(nr->cmd->common.opcode),
332 			   nr->cmd->common.opcode,
333 			   nvme_get_error_status_str(nr->status),
334 			   nr->status >> 8 & 7,	/* Status Code Type */
335 			   nr->status & 0xff,	/* Status Code */
336 			   nr->status & NVME_SC_MORE ? "MORE " : "",
337 			   nr->status & NVME_SC_DNR  ? "DNR "  : "");
338 }
339 
340 enum nvme_disposition {
341 	COMPLETE,
342 	RETRY,
343 	FAILOVER,
344 	AUTHENTICATE,
345 };
346 
347 static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
348 {
349 	if (likely(nvme_req(req)->status == 0))
350 		return COMPLETE;
351 
352 	if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
353 		return AUTHENTICATE;
354 
355 	if (blk_noretry_request(req) ||
356 	    (nvme_req(req)->status & NVME_SC_DNR) ||
357 	    nvme_req(req)->retries >= nvme_max_retries)
358 		return COMPLETE;
359 
360 	if (req->cmd_flags & REQ_NVME_MPATH) {
361 		if (nvme_is_path_error(nvme_req(req)->status) ||
362 		    blk_queue_dying(req->q))
363 			return FAILOVER;
364 	} else {
365 		if (blk_queue_dying(req->q))
366 			return COMPLETE;
367 	}
368 
369 	return RETRY;
370 }
371 
372 static inline void nvme_end_req_zoned(struct request *req)
373 {
374 	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
375 	    req_op(req) == REQ_OP_ZONE_APPEND)
376 		req->__sector = nvme_lba_to_sect(req->q->queuedata,
377 			le64_to_cpu(nvme_req(req)->result.u64));
378 }
379 
380 void nvme_end_req(struct request *req)
381 {
382 	blk_status_t status = nvme_error_status(nvme_req(req)->status);
383 
384 	if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
385 		nvme_log_error(req);
386 	nvme_end_req_zoned(req);
387 	nvme_trace_bio_complete(req);
388 	if (req->cmd_flags & REQ_NVME_MPATH)
389 		nvme_mpath_end_request(req);
390 	blk_mq_end_request(req, status);
391 }
392 
393 void nvme_complete_rq(struct request *req)
394 {
395 	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
396 
397 	trace_nvme_complete_rq(req);
398 	nvme_cleanup_cmd(req);
399 
400 	/*
401 	 * Completions of long-running commands should not be able to
402 	 * defer sending of periodic keep alives, since the controller
403 	 * may have completed processing such commands a long time ago
404 	 * (arbitrarily close to command submission time).
405 	 * req->deadline - req->timeout is the command submission time
406 	 * in jiffies.
407 	 */
408 	if (ctrl->kas &&
409 	    req->deadline - req->timeout >= ctrl->ka_last_check_time)
410 		ctrl->comp_seen = true;
411 
412 	switch (nvme_decide_disposition(req)) {
413 	case COMPLETE:
414 		nvme_end_req(req);
415 		return;
416 	case RETRY:
417 		nvme_retry_req(req);
418 		return;
419 	case FAILOVER:
420 		nvme_failover_req(req);
421 		return;
422 	case AUTHENTICATE:
423 #ifdef CONFIG_NVME_AUTH
424 		queue_work(nvme_wq, &ctrl->dhchap_auth_work);
425 		nvme_retry_req(req);
426 #else
427 		nvme_end_req(req);
428 #endif
429 		return;
430 	}
431 }
432 EXPORT_SYMBOL_GPL(nvme_complete_rq);
433 
434 void nvme_complete_batch_req(struct request *req)
435 {
436 	trace_nvme_complete_rq(req);
437 	nvme_cleanup_cmd(req);
438 	nvme_end_req_zoned(req);
439 }
440 EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
441 
442 /*
443  * Called to unwind from ->queue_rq on a failed command submission so that the
444  * multipathing code gets called to potentially failover to another path.
445  * The caller needs to unwind all transport specific resource allocations and
446  * must return propagate the return value.
447  */
448 blk_status_t nvme_host_path_error(struct request *req)
449 {
450 	nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
451 	blk_mq_set_request_complete(req);
452 	nvme_complete_rq(req);
453 	return BLK_STS_OK;
454 }
455 EXPORT_SYMBOL_GPL(nvme_host_path_error);
456 
457 bool nvme_cancel_request(struct request *req, void *data)
458 {
459 	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
460 				"Cancelling I/O %d", req->tag);
461 
462 	/* don't abort one completed or idle request */
463 	if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT)
464 		return true;
465 
466 	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
467 	nvme_req(req)->flags |= NVME_REQ_CANCELLED;
468 	blk_mq_complete_request(req);
469 	return true;
470 }
471 EXPORT_SYMBOL_GPL(nvme_cancel_request);
472 
473 void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
474 {
475 	if (ctrl->tagset) {
476 		blk_mq_tagset_busy_iter(ctrl->tagset,
477 				nvme_cancel_request, ctrl);
478 		blk_mq_tagset_wait_completed_request(ctrl->tagset);
479 	}
480 }
481 EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
482 
483 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
484 {
485 	if (ctrl->admin_tagset) {
486 		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
487 				nvme_cancel_request, ctrl);
488 		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
489 	}
490 }
491 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
492 
493 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
494 		enum nvme_ctrl_state new_state)
495 {
496 	enum nvme_ctrl_state old_state;
497 	unsigned long flags;
498 	bool changed = false;
499 
500 	spin_lock_irqsave(&ctrl->lock, flags);
501 
502 	old_state = nvme_ctrl_state(ctrl);
503 	switch (new_state) {
504 	case NVME_CTRL_LIVE:
505 		switch (old_state) {
506 		case NVME_CTRL_NEW:
507 		case NVME_CTRL_RESETTING:
508 		case NVME_CTRL_CONNECTING:
509 			changed = true;
510 			fallthrough;
511 		default:
512 			break;
513 		}
514 		break;
515 	case NVME_CTRL_RESETTING:
516 		switch (old_state) {
517 		case NVME_CTRL_NEW:
518 		case NVME_CTRL_LIVE:
519 			changed = true;
520 			fallthrough;
521 		default:
522 			break;
523 		}
524 		break;
525 	case NVME_CTRL_CONNECTING:
526 		switch (old_state) {
527 		case NVME_CTRL_NEW:
528 		case NVME_CTRL_RESETTING:
529 			changed = true;
530 			fallthrough;
531 		default:
532 			break;
533 		}
534 		break;
535 	case NVME_CTRL_DELETING:
536 		switch (old_state) {
537 		case NVME_CTRL_LIVE:
538 		case NVME_CTRL_RESETTING:
539 		case NVME_CTRL_CONNECTING:
540 			changed = true;
541 			fallthrough;
542 		default:
543 			break;
544 		}
545 		break;
546 	case NVME_CTRL_DELETING_NOIO:
547 		switch (old_state) {
548 		case NVME_CTRL_DELETING:
549 		case NVME_CTRL_DEAD:
550 			changed = true;
551 			fallthrough;
552 		default:
553 			break;
554 		}
555 		break;
556 	case NVME_CTRL_DEAD:
557 		switch (old_state) {
558 		case NVME_CTRL_DELETING:
559 			changed = true;
560 			fallthrough;
561 		default:
562 			break;
563 		}
564 		break;
565 	default:
566 		break;
567 	}
568 
569 	if (changed) {
570 		WRITE_ONCE(ctrl->state, new_state);
571 		wake_up_all(&ctrl->state_wq);
572 	}
573 
574 	spin_unlock_irqrestore(&ctrl->lock, flags);
575 	if (!changed)
576 		return false;
577 
578 	if (new_state == NVME_CTRL_LIVE) {
579 		if (old_state == NVME_CTRL_CONNECTING)
580 			nvme_stop_failfast_work(ctrl);
581 		nvme_kick_requeue_lists(ctrl);
582 	} else if (new_state == NVME_CTRL_CONNECTING &&
583 		old_state == NVME_CTRL_RESETTING) {
584 		nvme_start_failfast_work(ctrl);
585 	}
586 	return changed;
587 }
588 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
589 
590 /*
591  * Waits for the controller state to be resetting, or returns false if it is
592  * not possible to ever transition to that state.
593  */
594 bool nvme_wait_reset(struct nvme_ctrl *ctrl)
595 {
596 	wait_event(ctrl->state_wq,
597 		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
598 		   nvme_state_terminal(ctrl));
599 	return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
600 }
601 EXPORT_SYMBOL_GPL(nvme_wait_reset);
602 
603 static void nvme_free_ns_head(struct kref *ref)
604 {
605 	struct nvme_ns_head *head =
606 		container_of(ref, struct nvme_ns_head, ref);
607 
608 	nvme_mpath_remove_disk(head);
609 	ida_free(&head->subsys->ns_ida, head->instance);
610 	cleanup_srcu_struct(&head->srcu);
611 	nvme_put_subsystem(head->subsys);
612 	kfree(head);
613 }
614 
615 bool nvme_tryget_ns_head(struct nvme_ns_head *head)
616 {
617 	return kref_get_unless_zero(&head->ref);
618 }
619 
620 void nvme_put_ns_head(struct nvme_ns_head *head)
621 {
622 	kref_put(&head->ref, nvme_free_ns_head);
623 }
624 
625 static void nvme_free_ns(struct kref *kref)
626 {
627 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
628 
629 	put_disk(ns->disk);
630 	nvme_put_ns_head(ns->head);
631 	nvme_put_ctrl(ns->ctrl);
632 	kfree(ns);
633 }
634 
635 static inline bool nvme_get_ns(struct nvme_ns *ns)
636 {
637 	return kref_get_unless_zero(&ns->kref);
638 }
639 
640 void nvme_put_ns(struct nvme_ns *ns)
641 {
642 	kref_put(&ns->kref, nvme_free_ns);
643 }
644 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
645 
646 static inline void nvme_clear_nvme_request(struct request *req)
647 {
648 	nvme_req(req)->status = 0;
649 	nvme_req(req)->retries = 0;
650 	nvme_req(req)->flags = 0;
651 	req->rq_flags |= RQF_DONTPREP;
652 }
653 
654 /* initialize a passthrough request */
655 void nvme_init_request(struct request *req, struct nvme_command *cmd)
656 {
657 	if (req->q->queuedata)
658 		req->timeout = NVME_IO_TIMEOUT;
659 	else /* no queuedata implies admin queue */
660 		req->timeout = NVME_ADMIN_TIMEOUT;
661 
662 	/* passthru commands should let the driver set the SGL flags */
663 	cmd->common.flags &= ~NVME_CMD_SGL_ALL;
664 
665 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
666 	if (req->mq_hctx->type == HCTX_TYPE_POLL)
667 		req->cmd_flags |= REQ_POLLED;
668 	nvme_clear_nvme_request(req);
669 	req->rq_flags |= RQF_QUIET;
670 	memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
671 }
672 EXPORT_SYMBOL_GPL(nvme_init_request);
673 
674 /*
675  * For something we're not in a state to send to the device the default action
676  * is to busy it and retry it after the controller state is recovered.  However,
677  * if the controller is deleting or if anything is marked for failfast or
678  * nvme multipath it is immediately failed.
679  *
680  * Note: commands used to initialize the controller will be marked for failfast.
681  * Note: nvme cli/ioctl commands are marked for failfast.
682  */
683 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
684 		struct request *rq)
685 {
686 	enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
687 
688 	if (state != NVME_CTRL_DELETING_NOIO &&
689 	    state != NVME_CTRL_DELETING &&
690 	    state != NVME_CTRL_DEAD &&
691 	    !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
692 	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
693 		return BLK_STS_RESOURCE;
694 	return nvme_host_path_error(rq);
695 }
696 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
697 
698 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
699 		bool queue_live)
700 {
701 	struct nvme_request *req = nvme_req(rq);
702 
703 	/*
704 	 * currently we have a problem sending passthru commands
705 	 * on the admin_q if the controller is not LIVE because we can't
706 	 * make sure that they are going out after the admin connect,
707 	 * controller enable and/or other commands in the initialization
708 	 * sequence. until the controller will be LIVE, fail with
709 	 * BLK_STS_RESOURCE so that they will be rescheduled.
710 	 */
711 	if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
712 		return false;
713 
714 	if (ctrl->ops->flags & NVME_F_FABRICS) {
715 		/*
716 		 * Only allow commands on a live queue, except for the connect
717 		 * command, which is require to set the queue live in the
718 		 * appropinquate states.
719 		 */
720 		switch (nvme_ctrl_state(ctrl)) {
721 		case NVME_CTRL_CONNECTING:
722 			if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
723 			    (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
724 			     req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
725 			     req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
726 				return true;
727 			break;
728 		default:
729 			break;
730 		case NVME_CTRL_DEAD:
731 			return false;
732 		}
733 	}
734 
735 	return queue_live;
736 }
737 EXPORT_SYMBOL_GPL(__nvme_check_ready);
738 
739 static inline void nvme_setup_flush(struct nvme_ns *ns,
740 		struct nvme_command *cmnd)
741 {
742 	memset(cmnd, 0, sizeof(*cmnd));
743 	cmnd->common.opcode = nvme_cmd_flush;
744 	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
745 }
746 
747 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
748 		struct nvme_command *cmnd)
749 {
750 	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
751 	struct nvme_dsm_range *range;
752 	struct bio *bio;
753 
754 	/*
755 	 * Some devices do not consider the DSM 'Number of Ranges' field when
756 	 * determining how much data to DMA. Always allocate memory for maximum
757 	 * number of segments to prevent device reading beyond end of buffer.
758 	 */
759 	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
760 
761 	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
762 	if (!range) {
763 		/*
764 		 * If we fail allocation our range, fallback to the controller
765 		 * discard page. If that's also busy, it's safe to return
766 		 * busy, as we know we can make progress once that's freed.
767 		 */
768 		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
769 			return BLK_STS_RESOURCE;
770 
771 		range = page_address(ns->ctrl->discard_page);
772 	}
773 
774 	if (queue_max_discard_segments(req->q) == 1) {
775 		u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
776 		u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
777 
778 		range[0].cattr = cpu_to_le32(0);
779 		range[0].nlb = cpu_to_le32(nlb);
780 		range[0].slba = cpu_to_le64(slba);
781 		n = 1;
782 	} else {
783 		__rq_for_each_bio(bio, req) {
784 			u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
785 			u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
786 
787 			if (n < segments) {
788 				range[n].cattr = cpu_to_le32(0);
789 				range[n].nlb = cpu_to_le32(nlb);
790 				range[n].slba = cpu_to_le64(slba);
791 			}
792 			n++;
793 		}
794 	}
795 
796 	if (WARN_ON_ONCE(n != segments)) {
797 		if (virt_to_page(range) == ns->ctrl->discard_page)
798 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
799 		else
800 			kfree(range);
801 		return BLK_STS_IOERR;
802 	}
803 
804 	memset(cmnd, 0, sizeof(*cmnd));
805 	cmnd->dsm.opcode = nvme_cmd_dsm;
806 	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
807 	cmnd->dsm.nr = cpu_to_le32(segments - 1);
808 	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
809 
810 	bvec_set_virt(&req->special_vec, range, alloc_size);
811 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
812 
813 	return BLK_STS_OK;
814 }
815 
816 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
817 			      struct request *req)
818 {
819 	u32 upper, lower;
820 	u64 ref48;
821 
822 	/* both rw and write zeroes share the same reftag format */
823 	switch (ns->guard_type) {
824 	case NVME_NVM_NS_16B_GUARD:
825 		cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
826 		break;
827 	case NVME_NVM_NS_64B_GUARD:
828 		ref48 = ext_pi_ref_tag(req);
829 		lower = lower_32_bits(ref48);
830 		upper = upper_32_bits(ref48);
831 
832 		cmnd->rw.reftag = cpu_to_le32(lower);
833 		cmnd->rw.cdw3 = cpu_to_le32(upper);
834 		break;
835 	default:
836 		break;
837 	}
838 }
839 
840 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
841 		struct request *req, struct nvme_command *cmnd)
842 {
843 	memset(cmnd, 0, sizeof(*cmnd));
844 
845 	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
846 		return nvme_setup_discard(ns, req, cmnd);
847 
848 	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
849 	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
850 	cmnd->write_zeroes.slba =
851 		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
852 	cmnd->write_zeroes.length =
853 		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
854 
855 	if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC))
856 		cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
857 
858 	if (nvme_ns_has_pi(ns)) {
859 		cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
860 
861 		switch (ns->pi_type) {
862 		case NVME_NS_DPS_PI_TYPE1:
863 		case NVME_NS_DPS_PI_TYPE2:
864 			nvme_set_ref_tag(ns, cmnd, req);
865 			break;
866 		}
867 	}
868 
869 	return BLK_STS_OK;
870 }
871 
872 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
873 		struct request *req, struct nvme_command *cmnd,
874 		enum nvme_opcode op)
875 {
876 	u16 control = 0;
877 	u32 dsmgmt = 0;
878 
879 	if (req->cmd_flags & REQ_FUA)
880 		control |= NVME_RW_FUA;
881 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
882 		control |= NVME_RW_LR;
883 
884 	if (req->cmd_flags & REQ_RAHEAD)
885 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
886 
887 	cmnd->rw.opcode = op;
888 	cmnd->rw.flags = 0;
889 	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
890 	cmnd->rw.cdw2 = 0;
891 	cmnd->rw.cdw3 = 0;
892 	cmnd->rw.metadata = 0;
893 	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
894 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
895 	cmnd->rw.reftag = 0;
896 	cmnd->rw.apptag = 0;
897 	cmnd->rw.appmask = 0;
898 
899 	if (ns->ms) {
900 		/*
901 		 * If formated with metadata, the block layer always provides a
902 		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
903 		 * we enable the PRACT bit for protection information or set the
904 		 * namespace capacity to zero to prevent any I/O.
905 		 */
906 		if (!blk_integrity_rq(req)) {
907 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
908 				return BLK_STS_NOTSUPP;
909 			control |= NVME_RW_PRINFO_PRACT;
910 		}
911 
912 		switch (ns->pi_type) {
913 		case NVME_NS_DPS_PI_TYPE3:
914 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
915 			break;
916 		case NVME_NS_DPS_PI_TYPE1:
917 		case NVME_NS_DPS_PI_TYPE2:
918 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
919 					NVME_RW_PRINFO_PRCHK_REF;
920 			if (op == nvme_cmd_zone_append)
921 				control |= NVME_RW_APPEND_PIREMAP;
922 			nvme_set_ref_tag(ns, cmnd, req);
923 			break;
924 		}
925 	}
926 
927 	cmnd->rw.control = cpu_to_le16(control);
928 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
929 	return 0;
930 }
931 
932 void nvme_cleanup_cmd(struct request *req)
933 {
934 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
935 		struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
936 
937 		if (req->special_vec.bv_page == ctrl->discard_page)
938 			clear_bit_unlock(0, &ctrl->discard_page_busy);
939 		else
940 			kfree(bvec_virt(&req->special_vec));
941 		req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
942 	}
943 }
944 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
945 
946 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
947 {
948 	struct nvme_command *cmd = nvme_req(req)->cmd;
949 	blk_status_t ret = BLK_STS_OK;
950 
951 	if (!(req->rq_flags & RQF_DONTPREP))
952 		nvme_clear_nvme_request(req);
953 
954 	switch (req_op(req)) {
955 	case REQ_OP_DRV_IN:
956 	case REQ_OP_DRV_OUT:
957 		/* these are setup prior to execution in nvme_init_request() */
958 		break;
959 	case REQ_OP_FLUSH:
960 		nvme_setup_flush(ns, cmd);
961 		break;
962 	case REQ_OP_ZONE_RESET_ALL:
963 	case REQ_OP_ZONE_RESET:
964 		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
965 		break;
966 	case REQ_OP_ZONE_OPEN:
967 		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
968 		break;
969 	case REQ_OP_ZONE_CLOSE:
970 		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
971 		break;
972 	case REQ_OP_ZONE_FINISH:
973 		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
974 		break;
975 	case REQ_OP_WRITE_ZEROES:
976 		ret = nvme_setup_write_zeroes(ns, req, cmd);
977 		break;
978 	case REQ_OP_DISCARD:
979 		ret = nvme_setup_discard(ns, req, cmd);
980 		break;
981 	case REQ_OP_READ:
982 		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
983 		break;
984 	case REQ_OP_WRITE:
985 		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
986 		break;
987 	case REQ_OP_ZONE_APPEND:
988 		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
989 		break;
990 	default:
991 		WARN_ON_ONCE(1);
992 		return BLK_STS_IOERR;
993 	}
994 
995 	cmd->common.command_id = nvme_cid(req);
996 	trace_nvme_setup_cmd(req, cmd);
997 	return ret;
998 }
999 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
1000 
1001 /*
1002  * Return values:
1003  * 0:  success
1004  * >0: nvme controller's cqe status response
1005  * <0: kernel error in lieu of controller response
1006  */
1007 int nvme_execute_rq(struct request *rq, bool at_head)
1008 {
1009 	blk_status_t status;
1010 
1011 	status = blk_execute_rq(rq, at_head);
1012 	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1013 		return -EINTR;
1014 	if (nvme_req(rq)->status)
1015 		return nvme_req(rq)->status;
1016 	return blk_status_to_errno(status);
1017 }
1018 EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
1019 
1020 /*
1021  * Returns 0 on success.  If the result is negative, it's a Linux error code;
1022  * if the result is positive, it's an NVM Express status code
1023  */
1024 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1025 		union nvme_result *result, void *buffer, unsigned bufflen,
1026 		int qid, int at_head, blk_mq_req_flags_t flags)
1027 {
1028 	struct request *req;
1029 	int ret;
1030 
1031 	if (qid == NVME_QID_ANY)
1032 		req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
1033 	else
1034 		req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
1035 						qid - 1);
1036 
1037 	if (IS_ERR(req))
1038 		return PTR_ERR(req);
1039 	nvme_init_request(req, cmd);
1040 
1041 	if (buffer && bufflen) {
1042 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
1043 		if (ret)
1044 			goto out;
1045 	}
1046 
1047 	ret = nvme_execute_rq(req, at_head);
1048 	if (result && ret >= 0)
1049 		*result = nvme_req(req)->result;
1050  out:
1051 	blk_mq_free_request(req);
1052 	return ret;
1053 }
1054 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1055 
1056 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1057 		void *buffer, unsigned bufflen)
1058 {
1059 	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
1060 			NVME_QID_ANY, 0, 0);
1061 }
1062 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1063 
1064 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1065 {
1066 	u32 effects = 0;
1067 
1068 	if (ns) {
1069 		effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1070 		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1071 			dev_warn_once(ctrl->device,
1072 				"IO command:%02x has unusual effects:%08x\n",
1073 				opcode, effects);
1074 
1075 		/*
1076 		 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
1077 		 * which would deadlock when done on an I/O command.  Note that
1078 		 * We already warn about an unusual effect above.
1079 		 */
1080 		effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
1081 	} else {
1082 		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1083 	}
1084 
1085 	return effects;
1086 }
1087 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
1088 
1089 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1090 {
1091 	u32 effects = nvme_command_effects(ctrl, ns, opcode);
1092 
1093 	/*
1094 	 * For simplicity, IO to all namespaces is quiesced even if the command
1095 	 * effects say only one namespace is affected.
1096 	 */
1097 	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1098 		mutex_lock(&ctrl->scan_lock);
1099 		mutex_lock(&ctrl->subsys->lock);
1100 		nvme_mpath_start_freeze(ctrl->subsys);
1101 		nvme_mpath_wait_freeze(ctrl->subsys);
1102 		nvme_start_freeze(ctrl);
1103 		nvme_wait_freeze(ctrl);
1104 	}
1105 	return effects;
1106 }
1107 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
1108 
1109 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1110 		       struct nvme_command *cmd, int status)
1111 {
1112 	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1113 		nvme_unfreeze(ctrl);
1114 		nvme_mpath_unfreeze(ctrl->subsys);
1115 		mutex_unlock(&ctrl->subsys->lock);
1116 		mutex_unlock(&ctrl->scan_lock);
1117 	}
1118 	if (effects & NVME_CMD_EFFECTS_CCC) {
1119 		if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY,
1120 				      &ctrl->flags)) {
1121 			dev_info(ctrl->device,
1122 "controller capabilities changed, reset may be required to take effect.\n");
1123 		}
1124 	}
1125 	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1126 		nvme_queue_scan(ctrl);
1127 		flush_work(&ctrl->scan_work);
1128 	}
1129 	if (ns)
1130 		return;
1131 
1132 	switch (cmd->common.opcode) {
1133 	case nvme_admin_set_features:
1134 		switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1135 		case NVME_FEAT_KATO:
1136 			/*
1137 			 * Keep alive commands interval on the host should be
1138 			 * updated when KATO is modified by Set Features
1139 			 * commands.
1140 			 */
1141 			if (!status)
1142 				nvme_update_keep_alive(ctrl, cmd);
1143 			break;
1144 		default:
1145 			break;
1146 		}
1147 		break;
1148 	default:
1149 		break;
1150 	}
1151 }
1152 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
1153 
1154 /*
1155  * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
1156  *
1157  *   The host should send Keep Alive commands at half of the Keep Alive Timeout
1158  *   accounting for transport roundtrip times [..].
1159  */
1160 static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
1161 {
1162 	unsigned long delay = ctrl->kato * HZ / 2;
1163 
1164 	/*
1165 	 * When using Traffic Based Keep Alive, we need to run
1166 	 * nvme_keep_alive_work at twice the normal frequency, as one
1167 	 * command completion can postpone sending a keep alive command
1168 	 * by up to twice the delay between runs.
1169 	 */
1170 	if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
1171 		delay /= 2;
1172 	return delay;
1173 }
1174 
1175 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1176 {
1177 	queue_delayed_work(nvme_wq, &ctrl->ka_work,
1178 			   nvme_keep_alive_work_period(ctrl));
1179 }
1180 
1181 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
1182 						 blk_status_t status)
1183 {
1184 	struct nvme_ctrl *ctrl = rq->end_io_data;
1185 	unsigned long flags;
1186 	bool startka = false;
1187 	unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
1188 	unsigned long delay = nvme_keep_alive_work_period(ctrl);
1189 
1190 	/*
1191 	 * Subtract off the keepalive RTT so nvme_keep_alive_work runs
1192 	 * at the desired frequency.
1193 	 */
1194 	if (rtt <= delay) {
1195 		delay -= rtt;
1196 	} else {
1197 		dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
1198 			 jiffies_to_msecs(rtt));
1199 		delay = 0;
1200 	}
1201 
1202 	blk_mq_free_request(rq);
1203 
1204 	if (status) {
1205 		dev_err(ctrl->device,
1206 			"failed nvme_keep_alive_end_io error=%d\n",
1207 				status);
1208 		return RQ_END_IO_NONE;
1209 	}
1210 
1211 	ctrl->ka_last_check_time = jiffies;
1212 	ctrl->comp_seen = false;
1213 	spin_lock_irqsave(&ctrl->lock, flags);
1214 	if (ctrl->state == NVME_CTRL_LIVE ||
1215 	    ctrl->state == NVME_CTRL_CONNECTING)
1216 		startka = true;
1217 	spin_unlock_irqrestore(&ctrl->lock, flags);
1218 	if (startka)
1219 		queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1220 	return RQ_END_IO_NONE;
1221 }
1222 
1223 static void nvme_keep_alive_work(struct work_struct *work)
1224 {
1225 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1226 			struct nvme_ctrl, ka_work);
1227 	bool comp_seen = ctrl->comp_seen;
1228 	struct request *rq;
1229 
1230 	ctrl->ka_last_check_time = jiffies;
1231 
1232 	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1233 		dev_dbg(ctrl->device,
1234 			"reschedule traffic based keep-alive timer\n");
1235 		ctrl->comp_seen = false;
1236 		nvme_queue_keep_alive_work(ctrl);
1237 		return;
1238 	}
1239 
1240 	rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
1241 				  BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1242 	if (IS_ERR(rq)) {
1243 		/* allocation failure, reset the controller */
1244 		dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1245 		nvme_reset_ctrl(ctrl);
1246 		return;
1247 	}
1248 	nvme_init_request(rq, &ctrl->ka_cmd);
1249 
1250 	rq->timeout = ctrl->kato * HZ;
1251 	rq->end_io = nvme_keep_alive_end_io;
1252 	rq->end_io_data = ctrl;
1253 	blk_execute_rq_nowait(rq, false);
1254 }
1255 
1256 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1257 {
1258 	if (unlikely(ctrl->kato == 0))
1259 		return;
1260 
1261 	nvme_queue_keep_alive_work(ctrl);
1262 }
1263 
1264 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1265 {
1266 	if (unlikely(ctrl->kato == 0))
1267 		return;
1268 
1269 	cancel_delayed_work_sync(&ctrl->ka_work);
1270 }
1271 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1272 
1273 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
1274 				   struct nvme_command *cmd)
1275 {
1276 	unsigned int new_kato =
1277 		DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1278 
1279 	dev_info(ctrl->device,
1280 		 "keep alive interval updated from %u ms to %u ms\n",
1281 		 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1282 
1283 	nvme_stop_keep_alive(ctrl);
1284 	ctrl->kato = new_kato;
1285 	nvme_start_keep_alive(ctrl);
1286 }
1287 
1288 /*
1289  * In NVMe 1.0 the CNS field was just a binary controller or namespace
1290  * flag, thus sending any new CNS opcodes has a big chance of not working.
1291  * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1292  * (but not for any later version).
1293  */
1294 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1295 {
1296 	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1297 		return ctrl->vs < NVME_VS(1, 2, 0);
1298 	return ctrl->vs < NVME_VS(1, 1, 0);
1299 }
1300 
1301 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1302 {
1303 	struct nvme_command c = { };
1304 	int error;
1305 
1306 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1307 	c.identify.opcode = nvme_admin_identify;
1308 	c.identify.cns = NVME_ID_CNS_CTRL;
1309 
1310 	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1311 	if (!*id)
1312 		return -ENOMEM;
1313 
1314 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1315 			sizeof(struct nvme_id_ctrl));
1316 	if (error)
1317 		kfree(*id);
1318 	return error;
1319 }
1320 
1321 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1322 		struct nvme_ns_id_desc *cur, bool *csi_seen)
1323 {
1324 	const char *warn_str = "ctrl returned bogus length:";
1325 	void *data = cur;
1326 
1327 	switch (cur->nidt) {
1328 	case NVME_NIDT_EUI64:
1329 		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1330 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1331 				 warn_str, cur->nidl);
1332 			return -1;
1333 		}
1334 		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1335 			return NVME_NIDT_EUI64_LEN;
1336 		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1337 		return NVME_NIDT_EUI64_LEN;
1338 	case NVME_NIDT_NGUID:
1339 		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1340 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1341 				 warn_str, cur->nidl);
1342 			return -1;
1343 		}
1344 		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1345 			return NVME_NIDT_NGUID_LEN;
1346 		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1347 		return NVME_NIDT_NGUID_LEN;
1348 	case NVME_NIDT_UUID:
1349 		if (cur->nidl != NVME_NIDT_UUID_LEN) {
1350 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1351 				 warn_str, cur->nidl);
1352 			return -1;
1353 		}
1354 		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1355 			return NVME_NIDT_UUID_LEN;
1356 		uuid_copy(&ids->uuid, data + sizeof(*cur));
1357 		return NVME_NIDT_UUID_LEN;
1358 	case NVME_NIDT_CSI:
1359 		if (cur->nidl != NVME_NIDT_CSI_LEN) {
1360 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1361 				 warn_str, cur->nidl);
1362 			return -1;
1363 		}
1364 		memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1365 		*csi_seen = true;
1366 		return NVME_NIDT_CSI_LEN;
1367 	default:
1368 		/* Skip unknown types */
1369 		return cur->nidl;
1370 	}
1371 }
1372 
1373 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
1374 		struct nvme_ns_info *info)
1375 {
1376 	struct nvme_command c = { };
1377 	bool csi_seen = false;
1378 	int status, pos, len;
1379 	void *data;
1380 
1381 	if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1382 		return 0;
1383 	if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1384 		return 0;
1385 
1386 	c.identify.opcode = nvme_admin_identify;
1387 	c.identify.nsid = cpu_to_le32(info->nsid);
1388 	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1389 
1390 	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1391 	if (!data)
1392 		return -ENOMEM;
1393 
1394 	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1395 				      NVME_IDENTIFY_DATA_SIZE);
1396 	if (status) {
1397 		dev_warn(ctrl->device,
1398 			"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1399 			info->nsid, status);
1400 		goto free_data;
1401 	}
1402 
1403 	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1404 		struct nvme_ns_id_desc *cur = data + pos;
1405 
1406 		if (cur->nidl == 0)
1407 			break;
1408 
1409 		len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
1410 		if (len < 0)
1411 			break;
1412 
1413 		len += sizeof(*cur);
1414 	}
1415 
1416 	if (nvme_multi_css(ctrl) && !csi_seen) {
1417 		dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1418 			 info->nsid);
1419 		status = -EINVAL;
1420 	}
1421 
1422 free_data:
1423 	kfree(data);
1424 	return status;
1425 }
1426 
1427 static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1428 			struct nvme_id_ns **id)
1429 {
1430 	struct nvme_command c = { };
1431 	int error;
1432 
1433 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1434 	c.identify.opcode = nvme_admin_identify;
1435 	c.identify.nsid = cpu_to_le32(nsid);
1436 	c.identify.cns = NVME_ID_CNS_NS;
1437 
1438 	*id = kmalloc(sizeof(**id), GFP_KERNEL);
1439 	if (!*id)
1440 		return -ENOMEM;
1441 
1442 	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1443 	if (error) {
1444 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1445 		kfree(*id);
1446 	}
1447 	return error;
1448 }
1449 
1450 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
1451 		struct nvme_ns_info *info)
1452 {
1453 	struct nvme_ns_ids *ids = &info->ids;
1454 	struct nvme_id_ns *id;
1455 	int ret;
1456 
1457 	ret = nvme_identify_ns(ctrl, info->nsid, &id);
1458 	if (ret)
1459 		return ret;
1460 
1461 	if (id->ncap == 0) {
1462 		/* namespace not allocated or attached */
1463 		info->is_removed = true;
1464 		ret = -ENODEV;
1465 		goto error;
1466 	}
1467 
1468 	info->anagrpid = id->anagrpid;
1469 	info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1470 	info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1471 	info->is_ready = true;
1472 	if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1473 		dev_info(ctrl->device,
1474 			 "Ignoring bogus Namespace Identifiers\n");
1475 	} else {
1476 		if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1477 		    !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1478 			memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
1479 		if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1480 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1481 			memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
1482 	}
1483 
1484 error:
1485 	kfree(id);
1486 	return ret;
1487 }
1488 
1489 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
1490 		struct nvme_ns_info *info)
1491 {
1492 	struct nvme_id_ns_cs_indep *id;
1493 	struct nvme_command c = {
1494 		.identify.opcode	= nvme_admin_identify,
1495 		.identify.nsid		= cpu_to_le32(info->nsid),
1496 		.identify.cns		= NVME_ID_CNS_NS_CS_INDEP,
1497 	};
1498 	int ret;
1499 
1500 	id = kmalloc(sizeof(*id), GFP_KERNEL);
1501 	if (!id)
1502 		return -ENOMEM;
1503 
1504 	ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1505 	if (!ret) {
1506 		info->anagrpid = id->anagrpid;
1507 		info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1508 		info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1509 		info->is_ready = id->nstat & NVME_NSTAT_NRDY;
1510 	}
1511 	kfree(id);
1512 	return ret;
1513 }
1514 
1515 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1516 		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1517 {
1518 	union nvme_result res = { 0 };
1519 	struct nvme_command c = { };
1520 	int ret;
1521 
1522 	c.features.opcode = op;
1523 	c.features.fid = cpu_to_le32(fid);
1524 	c.features.dword11 = cpu_to_le32(dword11);
1525 
1526 	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1527 			buffer, buflen, NVME_QID_ANY, 0, 0);
1528 	if (ret >= 0 && result)
1529 		*result = le32_to_cpu(res.u32);
1530 	return ret;
1531 }
1532 
1533 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1534 		      unsigned int dword11, void *buffer, size_t buflen,
1535 		      u32 *result)
1536 {
1537 	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1538 			     buflen, result);
1539 }
1540 EXPORT_SYMBOL_GPL(nvme_set_features);
1541 
1542 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1543 		      unsigned int dword11, void *buffer, size_t buflen,
1544 		      u32 *result)
1545 {
1546 	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1547 			     buflen, result);
1548 }
1549 EXPORT_SYMBOL_GPL(nvme_get_features);
1550 
1551 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1552 {
1553 	u32 q_count = (*count - 1) | ((*count - 1) << 16);
1554 	u32 result;
1555 	int status, nr_io_queues;
1556 
1557 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1558 			&result);
1559 	if (status < 0)
1560 		return status;
1561 
1562 	/*
1563 	 * Degraded controllers might return an error when setting the queue
1564 	 * count.  We still want to be able to bring them online and offer
1565 	 * access to the admin queue, as that might be only way to fix them up.
1566 	 */
1567 	if (status > 0) {
1568 		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1569 		*count = 0;
1570 	} else {
1571 		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1572 		*count = min(*count, nr_io_queues);
1573 	}
1574 
1575 	return 0;
1576 }
1577 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1578 
1579 #define NVME_AEN_SUPPORTED \
1580 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1581 	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1582 
1583 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1584 {
1585 	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1586 	int status;
1587 
1588 	if (!supported_aens)
1589 		return;
1590 
1591 	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1592 			NULL, 0, &result);
1593 	if (status)
1594 		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1595 			 supported_aens);
1596 
1597 	queue_work(nvme_wq, &ctrl->async_event_work);
1598 }
1599 
1600 static int nvme_ns_open(struct nvme_ns *ns)
1601 {
1602 
1603 	/* should never be called due to GENHD_FL_HIDDEN */
1604 	if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1605 		goto fail;
1606 	if (!nvme_get_ns(ns))
1607 		goto fail;
1608 	if (!try_module_get(ns->ctrl->ops->module))
1609 		goto fail_put_ns;
1610 
1611 	return 0;
1612 
1613 fail_put_ns:
1614 	nvme_put_ns(ns);
1615 fail:
1616 	return -ENXIO;
1617 }
1618 
1619 static void nvme_ns_release(struct nvme_ns *ns)
1620 {
1621 
1622 	module_put(ns->ctrl->ops->module);
1623 	nvme_put_ns(ns);
1624 }
1625 
1626 static int nvme_open(struct gendisk *disk, blk_mode_t mode)
1627 {
1628 	return nvme_ns_open(disk->private_data);
1629 }
1630 
1631 static void nvme_release(struct gendisk *disk)
1632 {
1633 	nvme_ns_release(disk->private_data);
1634 }
1635 
1636 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1637 {
1638 	/* some standard values */
1639 	geo->heads = 1 << 6;
1640 	geo->sectors = 1 << 5;
1641 	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1642 	return 0;
1643 }
1644 
1645 #ifdef CONFIG_BLK_DEV_INTEGRITY
1646 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
1647 				u32 max_integrity_segments)
1648 {
1649 	struct blk_integrity integrity = { };
1650 
1651 	switch (ns->pi_type) {
1652 	case NVME_NS_DPS_PI_TYPE3:
1653 		switch (ns->guard_type) {
1654 		case NVME_NVM_NS_16B_GUARD:
1655 			integrity.profile = &t10_pi_type3_crc;
1656 			integrity.tag_size = sizeof(u16) + sizeof(u32);
1657 			integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1658 			break;
1659 		case NVME_NVM_NS_64B_GUARD:
1660 			integrity.profile = &ext_pi_type3_crc64;
1661 			integrity.tag_size = sizeof(u16) + 6;
1662 			integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1663 			break;
1664 		default:
1665 			integrity.profile = NULL;
1666 			break;
1667 		}
1668 		break;
1669 	case NVME_NS_DPS_PI_TYPE1:
1670 	case NVME_NS_DPS_PI_TYPE2:
1671 		switch (ns->guard_type) {
1672 		case NVME_NVM_NS_16B_GUARD:
1673 			integrity.profile = &t10_pi_type1_crc;
1674 			integrity.tag_size = sizeof(u16);
1675 			integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1676 			break;
1677 		case NVME_NVM_NS_64B_GUARD:
1678 			integrity.profile = &ext_pi_type1_crc64;
1679 			integrity.tag_size = sizeof(u16);
1680 			integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1681 			break;
1682 		default:
1683 			integrity.profile = NULL;
1684 			break;
1685 		}
1686 		break;
1687 	default:
1688 		integrity.profile = NULL;
1689 		break;
1690 	}
1691 
1692 	integrity.tuple_size = ns->ms;
1693 	blk_integrity_register(disk, &integrity);
1694 	blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1695 }
1696 #else
1697 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
1698 				u32 max_integrity_segments)
1699 {
1700 }
1701 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1702 
1703 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1704 {
1705 	struct nvme_ctrl *ctrl = ns->ctrl;
1706 	struct request_queue *queue = disk->queue;
1707 	u32 size = queue_logical_block_size(queue);
1708 
1709 	if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
1710 		ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
1711 
1712 	if (ctrl->max_discard_sectors == 0) {
1713 		blk_queue_max_discard_sectors(queue, 0);
1714 		return;
1715 	}
1716 
1717 	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1718 			NVME_DSM_MAX_RANGES);
1719 
1720 	queue->limits.discard_granularity = size;
1721 
1722 	/* If discard is already enabled, don't reset queue limits */
1723 	if (queue->limits.max_discard_sectors)
1724 		return;
1725 
1726 	blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
1727 	blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1728 
1729 	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1730 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1731 }
1732 
1733 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1734 {
1735 	return uuid_equal(&a->uuid, &b->uuid) &&
1736 		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1737 		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1738 		a->csi == b->csi;
1739 }
1740 
1741 static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
1742 {
1743 	bool first = id->dps & NVME_NS_DPS_PI_FIRST;
1744 	unsigned lbaf = nvme_lbaf_index(id->flbas);
1745 	struct nvme_ctrl *ctrl = ns->ctrl;
1746 	struct nvme_command c = { };
1747 	struct nvme_id_ns_nvm *nvm;
1748 	int ret = 0;
1749 	u32 elbaf;
1750 
1751 	ns->pi_size = 0;
1752 	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1753 	if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
1754 		ns->pi_size = sizeof(struct t10_pi_tuple);
1755 		ns->guard_type = NVME_NVM_NS_16B_GUARD;
1756 		goto set_pi;
1757 	}
1758 
1759 	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
1760 	if (!nvm)
1761 		return -ENOMEM;
1762 
1763 	c.identify.opcode = nvme_admin_identify;
1764 	c.identify.nsid = cpu_to_le32(ns->head->ns_id);
1765 	c.identify.cns = NVME_ID_CNS_CS_NS;
1766 	c.identify.csi = NVME_CSI_NVM;
1767 
1768 	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
1769 	if (ret)
1770 		goto free_data;
1771 
1772 	elbaf = le32_to_cpu(nvm->elbaf[lbaf]);
1773 
1774 	/* no support for storage tag formats right now */
1775 	if (nvme_elbaf_sts(elbaf))
1776 		goto free_data;
1777 
1778 	ns->guard_type = nvme_elbaf_guard_type(elbaf);
1779 	switch (ns->guard_type) {
1780 	case NVME_NVM_NS_64B_GUARD:
1781 		ns->pi_size = sizeof(struct crc64_pi_tuple);
1782 		break;
1783 	case NVME_NVM_NS_16B_GUARD:
1784 		ns->pi_size = sizeof(struct t10_pi_tuple);
1785 		break;
1786 	default:
1787 		break;
1788 	}
1789 
1790 free_data:
1791 	kfree(nvm);
1792 set_pi:
1793 	if (ns->pi_size && (first || ns->ms == ns->pi_size))
1794 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1795 	else
1796 		ns->pi_type = 0;
1797 
1798 	return ret;
1799 }
1800 
1801 static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
1802 {
1803 	struct nvme_ctrl *ctrl = ns->ctrl;
1804 	int ret;
1805 
1806 	ret = nvme_init_ms(ns, id);
1807 	if (ret)
1808 		return ret;
1809 
1810 	ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1811 	if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1812 		return 0;
1813 
1814 	if (ctrl->ops->flags & NVME_F_FABRICS) {
1815 		/*
1816 		 * The NVMe over Fabrics specification only supports metadata as
1817 		 * part of the extended data LBA.  We rely on HCA/HBA support to
1818 		 * remap the separate metadata buffer from the block layer.
1819 		 */
1820 		if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1821 			return 0;
1822 
1823 		ns->features |= NVME_NS_EXT_LBAS;
1824 
1825 		/*
1826 		 * The current fabrics transport drivers support namespace
1827 		 * metadata formats only if nvme_ns_has_pi() returns true.
1828 		 * Suppress support for all other formats so the namespace will
1829 		 * have a 0 capacity and not be usable through the block stack.
1830 		 *
1831 		 * Note, this check will need to be modified if any drivers
1832 		 * gain the ability to use other metadata formats.
1833 		 */
1834 		if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
1835 			ns->features |= NVME_NS_METADATA_SUPPORTED;
1836 	} else {
1837 		/*
1838 		 * For PCIe controllers, we can't easily remap the separate
1839 		 * metadata buffer from the block layer and thus require a
1840 		 * separate metadata buffer for block layer metadata/PI support.
1841 		 * We allow extended LBAs for the passthrough interface, though.
1842 		 */
1843 		if (id->flbas & NVME_NS_FLBAS_META_EXT)
1844 			ns->features |= NVME_NS_EXT_LBAS;
1845 		else
1846 			ns->features |= NVME_NS_METADATA_SUPPORTED;
1847 	}
1848 	return 0;
1849 }
1850 
1851 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1852 		struct request_queue *q)
1853 {
1854 	bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1855 
1856 	if (ctrl->max_hw_sectors) {
1857 		u32 max_segments =
1858 			(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
1859 
1860 		max_segments = min_not_zero(max_segments, ctrl->max_segments);
1861 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1862 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1863 	}
1864 	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
1865 	blk_queue_dma_alignment(q, 3);
1866 	blk_queue_write_cache(q, vwc, vwc);
1867 }
1868 
1869 static void nvme_update_disk_info(struct gendisk *disk,
1870 		struct nvme_ns *ns, struct nvme_id_ns *id)
1871 {
1872 	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1873 	u32 bs = 1U << ns->lba_shift;
1874 	u32 atomic_bs, phys_bs, io_opt = 0;
1875 
1876 	/*
1877 	 * The block layer can't support LBA sizes larger than the page size
1878 	 * or smaller than a sector size yet, so catch this early and don't
1879 	 * allow block I/O.
1880 	 */
1881 	if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
1882 		capacity = 0;
1883 		bs = (1 << 9);
1884 	}
1885 
1886 	blk_integrity_unregister(disk);
1887 
1888 	atomic_bs = phys_bs = bs;
1889 	if (id->nabo == 0) {
1890 		/*
1891 		 * Bit 1 indicates whether NAWUPF is defined for this namespace
1892 		 * and whether it should be used instead of AWUPF. If NAWUPF ==
1893 		 * 0 then AWUPF must be used instead.
1894 		 */
1895 		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1896 			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1897 		else
1898 			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1899 	}
1900 
1901 	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1902 		/* NPWG = Namespace Preferred Write Granularity */
1903 		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1904 		/* NOWS = Namespace Optimal Write Size */
1905 		io_opt = bs * (1 + le16_to_cpu(id->nows));
1906 	}
1907 
1908 	blk_queue_logical_block_size(disk->queue, bs);
1909 	/*
1910 	 * Linux filesystems assume writing a single physical block is
1911 	 * an atomic operation. Hence limit the physical block size to the
1912 	 * value of the Atomic Write Unit Power Fail parameter.
1913 	 */
1914 	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1915 	blk_queue_io_min(disk->queue, phys_bs);
1916 	blk_queue_io_opt(disk->queue, io_opt);
1917 
1918 	/*
1919 	 * Register a metadata profile for PI, or the plain non-integrity NVMe
1920 	 * metadata masquerading as Type 0 if supported, otherwise reject block
1921 	 * I/O to namespaces with metadata except when the namespace supports
1922 	 * PI, as it can strip/insert in that case.
1923 	 */
1924 	if (ns->ms) {
1925 		if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1926 		    (ns->features & NVME_NS_METADATA_SUPPORTED))
1927 			nvme_init_integrity(disk, ns,
1928 					    ns->ctrl->max_integrity_segments);
1929 		else if (!nvme_ns_has_pi(ns))
1930 			capacity = 0;
1931 	}
1932 
1933 	set_capacity_and_notify(disk, capacity);
1934 
1935 	nvme_config_discard(disk, ns);
1936 	blk_queue_max_write_zeroes_sectors(disk->queue,
1937 					   ns->ctrl->max_zeroes_sectors);
1938 }
1939 
1940 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
1941 {
1942 	return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
1943 }
1944 
1945 static inline bool nvme_first_scan(struct gendisk *disk)
1946 {
1947 	/* nvme_alloc_ns() scans the disk prior to adding it */
1948 	return !disk_live(disk);
1949 }
1950 
1951 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
1952 {
1953 	struct nvme_ctrl *ctrl = ns->ctrl;
1954 	u32 iob;
1955 
1956 	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1957 	    is_power_of_2(ctrl->max_hw_sectors))
1958 		iob = ctrl->max_hw_sectors;
1959 	else
1960 		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1961 
1962 	if (!iob)
1963 		return;
1964 
1965 	if (!is_power_of_2(iob)) {
1966 		if (nvme_first_scan(ns->disk))
1967 			pr_warn("%s: ignoring unaligned IO boundary:%u\n",
1968 				ns->disk->disk_name, iob);
1969 		return;
1970 	}
1971 
1972 	if (blk_queue_is_zoned(ns->disk->queue)) {
1973 		if (nvme_first_scan(ns->disk))
1974 			pr_warn("%s: ignoring zoned namespace IO boundary\n",
1975 				ns->disk->disk_name);
1976 		return;
1977 	}
1978 
1979 	blk_queue_chunk_sectors(ns->queue, iob);
1980 }
1981 
1982 static int nvme_update_ns_info_generic(struct nvme_ns *ns,
1983 		struct nvme_ns_info *info)
1984 {
1985 	blk_mq_freeze_queue(ns->disk->queue);
1986 	nvme_set_queue_limits(ns->ctrl, ns->queue);
1987 	set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
1988 	blk_mq_unfreeze_queue(ns->disk->queue);
1989 
1990 	if (nvme_ns_head_multipath(ns->head)) {
1991 		blk_mq_freeze_queue(ns->head->disk->queue);
1992 		set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
1993 		nvme_mpath_revalidate_paths(ns);
1994 		blk_stack_limits(&ns->head->disk->queue->limits,
1995 				 &ns->queue->limits, 0);
1996 		ns->head->disk->flags |= GENHD_FL_HIDDEN;
1997 		blk_mq_unfreeze_queue(ns->head->disk->queue);
1998 	}
1999 
2000 	/* Hide the block-interface for these devices */
2001 	ns->disk->flags |= GENHD_FL_HIDDEN;
2002 	set_bit(NVME_NS_READY, &ns->flags);
2003 
2004 	return 0;
2005 }
2006 
2007 static int nvme_update_ns_info_block(struct nvme_ns *ns,
2008 		struct nvme_ns_info *info)
2009 {
2010 	struct nvme_id_ns *id;
2011 	unsigned lbaf;
2012 	int ret;
2013 
2014 	ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
2015 	if (ret)
2016 		return ret;
2017 
2018 	if (id->ncap == 0) {
2019 		/* namespace not allocated or attached */
2020 		info->is_removed = true;
2021 		ret = -ENODEV;
2022 		goto error;
2023 	}
2024 
2025 	blk_mq_freeze_queue(ns->disk->queue);
2026 	lbaf = nvme_lbaf_index(id->flbas);
2027 	ns->lba_shift = id->lbaf[lbaf].ds;
2028 	nvme_set_queue_limits(ns->ctrl, ns->queue);
2029 
2030 	ret = nvme_configure_metadata(ns, id);
2031 	if (ret < 0) {
2032 		blk_mq_unfreeze_queue(ns->disk->queue);
2033 		goto out;
2034 	}
2035 	nvme_set_chunk_sectors(ns, id);
2036 	nvme_update_disk_info(ns->disk, ns, id);
2037 
2038 	if (ns->head->ids.csi == NVME_CSI_ZNS) {
2039 		ret = nvme_update_zone_info(ns, lbaf);
2040 		if (ret) {
2041 			blk_mq_unfreeze_queue(ns->disk->queue);
2042 			goto out;
2043 		}
2044 	}
2045 
2046 	/*
2047 	 * Only set the DEAC bit if the device guarantees that reads from
2048 	 * deallocated data return zeroes.  While the DEAC bit does not
2049 	 * require that, it must be a no-op if reads from deallocated data
2050 	 * do not return zeroes.
2051 	 */
2052 	if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
2053 		ns->features |= NVME_NS_DEAC;
2054 	set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2055 	set_bit(NVME_NS_READY, &ns->flags);
2056 	blk_mq_unfreeze_queue(ns->disk->queue);
2057 
2058 	if (blk_queue_is_zoned(ns->queue)) {
2059 		ret = nvme_revalidate_zones(ns);
2060 		if (ret && !nvme_first_scan(ns->disk))
2061 			goto out;
2062 	}
2063 
2064 	if (nvme_ns_head_multipath(ns->head)) {
2065 		blk_mq_freeze_queue(ns->head->disk->queue);
2066 		nvme_update_disk_info(ns->head->disk, ns, id);
2067 		set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
2068 		nvme_mpath_revalidate_paths(ns);
2069 		blk_stack_limits(&ns->head->disk->queue->limits,
2070 				 &ns->queue->limits, 0);
2071 		disk_update_readahead(ns->head->disk);
2072 		blk_mq_unfreeze_queue(ns->head->disk->queue);
2073 	}
2074 
2075 	ret = 0;
2076 out:
2077 	/*
2078 	 * If probing fails due an unsupported feature, hide the block device,
2079 	 * but still allow other access.
2080 	 */
2081 	if (ret == -ENODEV) {
2082 		ns->disk->flags |= GENHD_FL_HIDDEN;
2083 		set_bit(NVME_NS_READY, &ns->flags);
2084 		ret = 0;
2085 	}
2086 
2087 error:
2088 	kfree(id);
2089 	return ret;
2090 }
2091 
2092 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
2093 {
2094 	switch (info->ids.csi) {
2095 	case NVME_CSI_ZNS:
2096 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
2097 			dev_info(ns->ctrl->device,
2098 	"block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
2099 				info->nsid);
2100 			return nvme_update_ns_info_generic(ns, info);
2101 		}
2102 		return nvme_update_ns_info_block(ns, info);
2103 	case NVME_CSI_NVM:
2104 		return nvme_update_ns_info_block(ns, info);
2105 	default:
2106 		dev_info(ns->ctrl->device,
2107 			"block device for nsid %u not supported (csi %u)\n",
2108 			info->nsid, info->ids.csi);
2109 		return nvme_update_ns_info_generic(ns, info);
2110 	}
2111 }
2112 
2113 #ifdef CONFIG_BLK_SED_OPAL
2114 static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2115 		bool send)
2116 {
2117 	struct nvme_ctrl *ctrl = data;
2118 	struct nvme_command cmd = { };
2119 
2120 	if (send)
2121 		cmd.common.opcode = nvme_admin_security_send;
2122 	else
2123 		cmd.common.opcode = nvme_admin_security_recv;
2124 	cmd.common.nsid = 0;
2125 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2126 	cmd.common.cdw11 = cpu_to_le32(len);
2127 
2128 	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2129 			NVME_QID_ANY, 1, 0);
2130 }
2131 
2132 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2133 {
2134 	if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
2135 		if (!ctrl->opal_dev)
2136 			ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
2137 		else if (was_suspended)
2138 			opal_unlock_from_suspend(ctrl->opal_dev);
2139 	} else {
2140 		free_opal_dev(ctrl->opal_dev);
2141 		ctrl->opal_dev = NULL;
2142 	}
2143 }
2144 #else
2145 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2146 {
2147 }
2148 #endif /* CONFIG_BLK_SED_OPAL */
2149 
2150 #ifdef CONFIG_BLK_DEV_ZONED
2151 static int nvme_report_zones(struct gendisk *disk, sector_t sector,
2152 		unsigned int nr_zones, report_zones_cb cb, void *data)
2153 {
2154 	return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
2155 			data);
2156 }
2157 #else
2158 #define nvme_report_zones	NULL
2159 #endif /* CONFIG_BLK_DEV_ZONED */
2160 
2161 const struct block_device_operations nvme_bdev_ops = {
2162 	.owner		= THIS_MODULE,
2163 	.ioctl		= nvme_ioctl,
2164 	.compat_ioctl	= blkdev_compat_ptr_ioctl,
2165 	.open		= nvme_open,
2166 	.release	= nvme_release,
2167 	.getgeo		= nvme_getgeo,
2168 	.report_zones	= nvme_report_zones,
2169 	.pr_ops		= &nvme_pr_ops,
2170 };
2171 
2172 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
2173 		u32 timeout, const char *op)
2174 {
2175 	unsigned long timeout_jiffies = jiffies + timeout * HZ;
2176 	u32 csts;
2177 	int ret;
2178 
2179 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2180 		if (csts == ~0)
2181 			return -ENODEV;
2182 		if ((csts & mask) == val)
2183 			break;
2184 
2185 		usleep_range(1000, 2000);
2186 		if (fatal_signal_pending(current))
2187 			return -EINTR;
2188 		if (time_after(jiffies, timeout_jiffies)) {
2189 			dev_err(ctrl->device,
2190 				"Device not ready; aborting %s, CSTS=0x%x\n",
2191 				op, csts);
2192 			return -ENODEV;
2193 		}
2194 	}
2195 
2196 	return ret;
2197 }
2198 
2199 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2200 {
2201 	int ret;
2202 
2203 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2204 	if (shutdown)
2205 		ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2206 	else
2207 		ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2208 
2209 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2210 	if (ret)
2211 		return ret;
2212 
2213 	if (shutdown) {
2214 		return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
2215 				       NVME_CSTS_SHST_CMPLT,
2216 				       ctrl->shutdown_timeout, "shutdown");
2217 	}
2218 	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2219 		msleep(NVME_QUIRK_DELAY_AMOUNT);
2220 	return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0,
2221 			       (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset");
2222 }
2223 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2224 
2225 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2226 {
2227 	unsigned dev_page_min;
2228 	u32 timeout;
2229 	int ret;
2230 
2231 	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2232 	if (ret) {
2233 		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2234 		return ret;
2235 	}
2236 	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2237 
2238 	if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2239 		dev_err(ctrl->device,
2240 			"Minimum device page size %u too large for host (%u)\n",
2241 			1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2242 		return -ENODEV;
2243 	}
2244 
2245 	if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2246 		ctrl->ctrl_config = NVME_CC_CSS_CSI;
2247 	else
2248 		ctrl->ctrl_config = NVME_CC_CSS_NVM;
2249 
2250 	if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
2251 		ctrl->ctrl_config |= NVME_CC_CRIME;
2252 
2253 	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2254 	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2255 	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2256 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2257 	if (ret)
2258 		return ret;
2259 
2260 	/* Flush write to device (required if transport is PCI) */
2261 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
2262 	if (ret)
2263 		return ret;
2264 
2265 	/* CAP value may change after initial CC write */
2266 	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2267 	if (ret)
2268 		return ret;
2269 
2270 	timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2271 	if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2272 		u32 crto, ready_timeout;
2273 
2274 		ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2275 		if (ret) {
2276 			dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2277 				ret);
2278 			return ret;
2279 		}
2280 
2281 		/*
2282 		 * CRTO should always be greater or equal to CAP.TO, but some
2283 		 * devices are known to get this wrong. Use the larger of the
2284 		 * two values.
2285 		 */
2286 		if (ctrl->ctrl_config & NVME_CC_CRIME)
2287 			ready_timeout = NVME_CRTO_CRIMT(crto);
2288 		else
2289 			ready_timeout = NVME_CRTO_CRWMT(crto);
2290 
2291 		if (ready_timeout < timeout)
2292 			dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
2293 				      crto, ctrl->cap);
2294 		else
2295 			timeout = ready_timeout;
2296 	}
2297 
2298 	ctrl->ctrl_config |= NVME_CC_ENABLE;
2299 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2300 	if (ret)
2301 		return ret;
2302 	return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY,
2303 			       (timeout + 1) / 2, "initialisation");
2304 }
2305 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2306 
2307 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2308 {
2309 	__le64 ts;
2310 	int ret;
2311 
2312 	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2313 		return 0;
2314 
2315 	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2316 	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2317 			NULL);
2318 	if (ret)
2319 		dev_warn_once(ctrl->device,
2320 			"could not set timestamp (%d)\n", ret);
2321 	return ret;
2322 }
2323 
2324 static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
2325 {
2326 	struct nvme_feat_host_behavior *host;
2327 	u8 acre = 0, lbafee = 0;
2328 	int ret;
2329 
2330 	/* Don't bother enabling the feature if retry delay is not reported */
2331 	if (ctrl->crdt[0])
2332 		acre = NVME_ENABLE_ACRE;
2333 	if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
2334 		lbafee = NVME_ENABLE_LBAFEE;
2335 
2336 	if (!acre && !lbafee)
2337 		return 0;
2338 
2339 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2340 	if (!host)
2341 		return 0;
2342 
2343 	host->acre = acre;
2344 	host->lbafee = lbafee;
2345 	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2346 				host, sizeof(*host), NULL);
2347 	kfree(host);
2348 	return ret;
2349 }
2350 
2351 /*
2352  * The function checks whether the given total (exlat + enlat) latency of
2353  * a power state allows the latter to be used as an APST transition target.
2354  * It does so by comparing the latency to the primary and secondary latency
2355  * tolerances defined by module params. If there's a match, the corresponding
2356  * timeout value is returned and the matching tolerance index (1 or 2) is
2357  * reported.
2358  */
2359 static bool nvme_apst_get_transition_time(u64 total_latency,
2360 		u64 *transition_time, unsigned *last_index)
2361 {
2362 	if (total_latency <= apst_primary_latency_tol_us) {
2363 		if (*last_index == 1)
2364 			return false;
2365 		*last_index = 1;
2366 		*transition_time = apst_primary_timeout_ms;
2367 		return true;
2368 	}
2369 	if (apst_secondary_timeout_ms &&
2370 		total_latency <= apst_secondary_latency_tol_us) {
2371 		if (*last_index <= 2)
2372 			return false;
2373 		*last_index = 2;
2374 		*transition_time = apst_secondary_timeout_ms;
2375 		return true;
2376 	}
2377 	return false;
2378 }
2379 
2380 /*
2381  * APST (Autonomous Power State Transition) lets us program a table of power
2382  * state transitions that the controller will perform automatically.
2383  *
2384  * Depending on module params, one of the two supported techniques will be used:
2385  *
2386  * - If the parameters provide explicit timeouts and tolerances, they will be
2387  *   used to build a table with up to 2 non-operational states to transition to.
2388  *   The default parameter values were selected based on the values used by
2389  *   Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
2390  *   regeneration of the APST table in the event of switching between external
2391  *   and battery power, the timeouts and tolerances reflect a compromise
2392  *   between values used by Microsoft for AC and battery scenarios.
2393  * - If not, we'll configure the table with a simple heuristic: we are willing
2394  *   to spend at most 2% of the time transitioning between power states.
2395  *   Therefore, when running in any given state, we will enter the next
2396  *   lower-power non-operational state after waiting 50 * (enlat + exlat)
2397  *   microseconds, as long as that state's exit latency is under the requested
2398  *   maximum latency.
2399  *
2400  * We will not autonomously enter any non-operational state for which the total
2401  * latency exceeds ps_max_latency_us.
2402  *
2403  * Users can set ps_max_latency_us to zero to turn off APST.
2404  */
2405 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2406 {
2407 	struct nvme_feat_auto_pst *table;
2408 	unsigned apste = 0;
2409 	u64 max_lat_us = 0;
2410 	__le64 target = 0;
2411 	int max_ps = -1;
2412 	int state;
2413 	int ret;
2414 	unsigned last_lt_index = UINT_MAX;
2415 
2416 	/*
2417 	 * If APST isn't supported or if we haven't been initialized yet,
2418 	 * then don't do anything.
2419 	 */
2420 	if (!ctrl->apsta)
2421 		return 0;
2422 
2423 	if (ctrl->npss > 31) {
2424 		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2425 		return 0;
2426 	}
2427 
2428 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2429 	if (!table)
2430 		return 0;
2431 
2432 	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2433 		/* Turn off APST. */
2434 		dev_dbg(ctrl->device, "APST disabled\n");
2435 		goto done;
2436 	}
2437 
2438 	/*
2439 	 * Walk through all states from lowest- to highest-power.
2440 	 * According to the spec, lower-numbered states use more power.  NPSS,
2441 	 * despite the name, is the index of the lowest-power state, not the
2442 	 * number of states.
2443 	 */
2444 	for (state = (int)ctrl->npss; state >= 0; state--) {
2445 		u64 total_latency_us, exit_latency_us, transition_ms;
2446 
2447 		if (target)
2448 			table->entries[state] = target;
2449 
2450 		/*
2451 		 * Don't allow transitions to the deepest state if it's quirked
2452 		 * off.
2453 		 */
2454 		if (state == ctrl->npss &&
2455 		    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2456 			continue;
2457 
2458 		/*
2459 		 * Is this state a useful non-operational state for higher-power
2460 		 * states to autonomously transition to?
2461 		 */
2462 		if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2463 			continue;
2464 
2465 		exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2466 		if (exit_latency_us > ctrl->ps_max_latency_us)
2467 			continue;
2468 
2469 		total_latency_us = exit_latency_us +
2470 			le32_to_cpu(ctrl->psd[state].entry_lat);
2471 
2472 		/*
2473 		 * This state is good. It can be used as the APST idle target
2474 		 * for higher power states.
2475 		 */
2476 		if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
2477 			if (!nvme_apst_get_transition_time(total_latency_us,
2478 					&transition_ms, &last_lt_index))
2479 				continue;
2480 		} else {
2481 			transition_ms = total_latency_us + 19;
2482 			do_div(transition_ms, 20);
2483 			if (transition_ms > (1 << 24) - 1)
2484 				transition_ms = (1 << 24) - 1;
2485 		}
2486 
2487 		target = cpu_to_le64((state << 3) | (transition_ms << 8));
2488 		if (max_ps == -1)
2489 			max_ps = state;
2490 		if (total_latency_us > max_lat_us)
2491 			max_lat_us = total_latency_us;
2492 	}
2493 
2494 	if (max_ps == -1)
2495 		dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2496 	else
2497 		dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2498 			max_ps, max_lat_us, (int)sizeof(*table), table);
2499 	apste = 1;
2500 
2501 done:
2502 	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2503 				table, sizeof(*table), NULL);
2504 	if (ret)
2505 		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2506 	kfree(table);
2507 	return ret;
2508 }
2509 
2510 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2511 {
2512 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2513 	u64 latency;
2514 
2515 	switch (val) {
2516 	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2517 	case PM_QOS_LATENCY_ANY:
2518 		latency = U64_MAX;
2519 		break;
2520 
2521 	default:
2522 		latency = val;
2523 	}
2524 
2525 	if (ctrl->ps_max_latency_us != latency) {
2526 		ctrl->ps_max_latency_us = latency;
2527 		if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
2528 			nvme_configure_apst(ctrl);
2529 	}
2530 }
2531 
2532 struct nvme_core_quirk_entry {
2533 	/*
2534 	 * NVMe model and firmware strings are padded with spaces.  For
2535 	 * simplicity, strings in the quirk table are padded with NULLs
2536 	 * instead.
2537 	 */
2538 	u16 vid;
2539 	const char *mn;
2540 	const char *fr;
2541 	unsigned long quirks;
2542 };
2543 
2544 static const struct nvme_core_quirk_entry core_quirks[] = {
2545 	{
2546 		/*
2547 		 * This Toshiba device seems to die using any APST states.  See:
2548 		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2549 		 */
2550 		.vid = 0x1179,
2551 		.mn = "THNSF5256GPUK TOSHIBA",
2552 		.quirks = NVME_QUIRK_NO_APST,
2553 	},
2554 	{
2555 		/*
2556 		 * This LiteON CL1-3D*-Q11 firmware version has a race
2557 		 * condition associated with actions related to suspend to idle
2558 		 * LiteON has resolved the problem in future firmware
2559 		 */
2560 		.vid = 0x14a4,
2561 		.fr = "22301111",
2562 		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2563 	},
2564 	{
2565 		/*
2566 		 * This Kioxia CD6-V Series / HPE PE8030 device times out and
2567 		 * aborts I/O during any load, but more easily reproducible
2568 		 * with discards (fstrim).
2569 		 *
2570 		 * The device is left in a state where it is also not possible
2571 		 * to use "nvme set-feature" to disable APST, but booting with
2572 		 * nvme_core.default_ps_max_latency=0 works.
2573 		 */
2574 		.vid = 0x1e0f,
2575 		.mn = "KCD6XVUL6T40",
2576 		.quirks = NVME_QUIRK_NO_APST,
2577 	},
2578 	{
2579 		/*
2580 		 * The external Samsung X5 SSD fails initialization without a
2581 		 * delay before checking if it is ready and has a whole set of
2582 		 * other problems.  To make this even more interesting, it
2583 		 * shares the PCI ID with internal Samsung 970 Evo Plus that
2584 		 * does not need or want these quirks.
2585 		 */
2586 		.vid = 0x144d,
2587 		.mn = "Samsung Portable SSD X5",
2588 		.quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
2589 			  NVME_QUIRK_NO_DEEPEST_PS |
2590 			  NVME_QUIRK_IGNORE_DEV_SUBNQN,
2591 	}
2592 };
2593 
2594 /* match is null-terminated but idstr is space-padded. */
2595 static bool string_matches(const char *idstr, const char *match, size_t len)
2596 {
2597 	size_t matchlen;
2598 
2599 	if (!match)
2600 		return true;
2601 
2602 	matchlen = strlen(match);
2603 	WARN_ON_ONCE(matchlen > len);
2604 
2605 	if (memcmp(idstr, match, matchlen))
2606 		return false;
2607 
2608 	for (; matchlen < len; matchlen++)
2609 		if (idstr[matchlen] != ' ')
2610 			return false;
2611 
2612 	return true;
2613 }
2614 
2615 static bool quirk_matches(const struct nvme_id_ctrl *id,
2616 			  const struct nvme_core_quirk_entry *q)
2617 {
2618 	return q->vid == le16_to_cpu(id->vid) &&
2619 		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2620 		string_matches(id->fr, q->fr, sizeof(id->fr));
2621 }
2622 
2623 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2624 		struct nvme_id_ctrl *id)
2625 {
2626 	size_t nqnlen;
2627 	int off;
2628 
2629 	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2630 		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2631 		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2632 			strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2633 			return;
2634 		}
2635 
2636 		if (ctrl->vs >= NVME_VS(1, 2, 1))
2637 			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2638 	}
2639 
2640 	/*
2641 	 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
2642 	 * Base Specification 2.0.  It is slightly different from the format
2643 	 * specified there due to historic reasons, and we can't change it now.
2644 	 */
2645 	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2646 			"nqn.2014.08.org.nvmexpress:%04x%04x",
2647 			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2648 	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2649 	off += sizeof(id->sn);
2650 	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2651 	off += sizeof(id->mn);
2652 	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2653 }
2654 
2655 static void nvme_release_subsystem(struct device *dev)
2656 {
2657 	struct nvme_subsystem *subsys =
2658 		container_of(dev, struct nvme_subsystem, dev);
2659 
2660 	if (subsys->instance >= 0)
2661 		ida_free(&nvme_instance_ida, subsys->instance);
2662 	kfree(subsys);
2663 }
2664 
2665 static void nvme_destroy_subsystem(struct kref *ref)
2666 {
2667 	struct nvme_subsystem *subsys =
2668 			container_of(ref, struct nvme_subsystem, ref);
2669 
2670 	mutex_lock(&nvme_subsystems_lock);
2671 	list_del(&subsys->entry);
2672 	mutex_unlock(&nvme_subsystems_lock);
2673 
2674 	ida_destroy(&subsys->ns_ida);
2675 	device_del(&subsys->dev);
2676 	put_device(&subsys->dev);
2677 }
2678 
2679 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2680 {
2681 	kref_put(&subsys->ref, nvme_destroy_subsystem);
2682 }
2683 
2684 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2685 {
2686 	struct nvme_subsystem *subsys;
2687 
2688 	lockdep_assert_held(&nvme_subsystems_lock);
2689 
2690 	/*
2691 	 * Fail matches for discovery subsystems. This results
2692 	 * in each discovery controller bound to a unique subsystem.
2693 	 * This avoids issues with validating controller values
2694 	 * that can only be true when there is a single unique subsystem.
2695 	 * There may be multiple and completely independent entities
2696 	 * that provide discovery controllers.
2697 	 */
2698 	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2699 		return NULL;
2700 
2701 	list_for_each_entry(subsys, &nvme_subsystems, entry) {
2702 		if (strcmp(subsys->subnqn, subsysnqn))
2703 			continue;
2704 		if (!kref_get_unless_zero(&subsys->ref))
2705 			continue;
2706 		return subsys;
2707 	}
2708 
2709 	return NULL;
2710 }
2711 
2712 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
2713 {
2714 	return ctrl->opts && ctrl->opts->discovery_nqn;
2715 }
2716 
2717 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2718 		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2719 {
2720 	struct nvme_ctrl *tmp;
2721 
2722 	lockdep_assert_held(&nvme_subsystems_lock);
2723 
2724 	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2725 		if (nvme_state_terminal(tmp))
2726 			continue;
2727 
2728 		if (tmp->cntlid == ctrl->cntlid) {
2729 			dev_err(ctrl->device,
2730 				"Duplicate cntlid %u with %s, subsys %s, rejecting\n",
2731 				ctrl->cntlid, dev_name(tmp->device),
2732 				subsys->subnqn);
2733 			return false;
2734 		}
2735 
2736 		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2737 		    nvme_discovery_ctrl(ctrl))
2738 			continue;
2739 
2740 		dev_err(ctrl->device,
2741 			"Subsystem does not support multiple controllers\n");
2742 		return false;
2743 	}
2744 
2745 	return true;
2746 }
2747 
2748 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2749 {
2750 	struct nvme_subsystem *subsys, *found;
2751 	int ret;
2752 
2753 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2754 	if (!subsys)
2755 		return -ENOMEM;
2756 
2757 	subsys->instance = -1;
2758 	mutex_init(&subsys->lock);
2759 	kref_init(&subsys->ref);
2760 	INIT_LIST_HEAD(&subsys->ctrls);
2761 	INIT_LIST_HEAD(&subsys->nsheads);
2762 	nvme_init_subnqn(subsys, ctrl, id);
2763 	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2764 	memcpy(subsys->model, id->mn, sizeof(subsys->model));
2765 	subsys->vendor_id = le16_to_cpu(id->vid);
2766 	subsys->cmic = id->cmic;
2767 
2768 	/* Versions prior to 1.4 don't necessarily report a valid type */
2769 	if (id->cntrltype == NVME_CTRL_DISC ||
2770 	    !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
2771 		subsys->subtype = NVME_NQN_DISC;
2772 	else
2773 		subsys->subtype = NVME_NQN_NVME;
2774 
2775 	if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
2776 		dev_err(ctrl->device,
2777 			"Subsystem %s is not a discovery controller",
2778 			subsys->subnqn);
2779 		kfree(subsys);
2780 		return -EINVAL;
2781 	}
2782 	subsys->awupf = le16_to_cpu(id->awupf);
2783 	nvme_mpath_default_iopolicy(subsys);
2784 
2785 	subsys->dev.class = nvme_subsys_class;
2786 	subsys->dev.release = nvme_release_subsystem;
2787 	subsys->dev.groups = nvme_subsys_attrs_groups;
2788 	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2789 	device_initialize(&subsys->dev);
2790 
2791 	mutex_lock(&nvme_subsystems_lock);
2792 	found = __nvme_find_get_subsystem(subsys->subnqn);
2793 	if (found) {
2794 		put_device(&subsys->dev);
2795 		subsys = found;
2796 
2797 		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2798 			ret = -EINVAL;
2799 			goto out_put_subsystem;
2800 		}
2801 	} else {
2802 		ret = device_add(&subsys->dev);
2803 		if (ret) {
2804 			dev_err(ctrl->device,
2805 				"failed to register subsystem device.\n");
2806 			put_device(&subsys->dev);
2807 			goto out_unlock;
2808 		}
2809 		ida_init(&subsys->ns_ida);
2810 		list_add_tail(&subsys->entry, &nvme_subsystems);
2811 	}
2812 
2813 	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2814 				dev_name(ctrl->device));
2815 	if (ret) {
2816 		dev_err(ctrl->device,
2817 			"failed to create sysfs link from subsystem.\n");
2818 		goto out_put_subsystem;
2819 	}
2820 
2821 	if (!found)
2822 		subsys->instance = ctrl->instance;
2823 	ctrl->subsys = subsys;
2824 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2825 	mutex_unlock(&nvme_subsystems_lock);
2826 	return 0;
2827 
2828 out_put_subsystem:
2829 	nvme_put_subsystem(subsys);
2830 out_unlock:
2831 	mutex_unlock(&nvme_subsystems_lock);
2832 	return ret;
2833 }
2834 
2835 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2836 		void *log, size_t size, u64 offset)
2837 {
2838 	struct nvme_command c = { };
2839 	u32 dwlen = nvme_bytes_to_numd(size);
2840 
2841 	c.get_log_page.opcode = nvme_admin_get_log_page;
2842 	c.get_log_page.nsid = cpu_to_le32(nsid);
2843 	c.get_log_page.lid = log_page;
2844 	c.get_log_page.lsp = lsp;
2845 	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2846 	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2847 	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2848 	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2849 	c.get_log_page.csi = csi;
2850 
2851 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2852 }
2853 
2854 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
2855 				struct nvme_effects_log **log)
2856 {
2857 	struct nvme_effects_log	*cel = xa_load(&ctrl->cels, csi);
2858 	int ret;
2859 
2860 	if (cel)
2861 		goto out;
2862 
2863 	cel = kzalloc(sizeof(*cel), GFP_KERNEL);
2864 	if (!cel)
2865 		return -ENOMEM;
2866 
2867 	ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2868 			cel, sizeof(*cel), 0);
2869 	if (ret) {
2870 		kfree(cel);
2871 		return ret;
2872 	}
2873 
2874 	xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2875 out:
2876 	*log = cel;
2877 	return 0;
2878 }
2879 
2880 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2881 {
2882 	u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2883 
2884 	if (check_shl_overflow(1U, units + page_shift - 9, &val))
2885 		return UINT_MAX;
2886 	return val;
2887 }
2888 
2889 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
2890 {
2891 	struct nvme_command c = { };
2892 	struct nvme_id_ctrl_nvm *id;
2893 	int ret;
2894 
2895 	if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
2896 		ctrl->max_discard_sectors = UINT_MAX;
2897 		ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
2898 	} else {
2899 		ctrl->max_discard_sectors = 0;
2900 		ctrl->max_discard_segments = 0;
2901 	}
2902 
2903 	/*
2904 	 * Even though NVMe spec explicitly states that MDTS is not applicable
2905 	 * to the write-zeroes, we are cautious and limit the size to the
2906 	 * controllers max_hw_sectors value, which is based on the MDTS field
2907 	 * and possibly other limiting factors.
2908 	 */
2909 	if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
2910 	    !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
2911 		ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
2912 	else
2913 		ctrl->max_zeroes_sectors = 0;
2914 
2915 	if (ctrl->subsys->subtype != NVME_NQN_NVME ||
2916 	    nvme_ctrl_limited_cns(ctrl) ||
2917 	    test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
2918 		return 0;
2919 
2920 	id = kzalloc(sizeof(*id), GFP_KERNEL);
2921 	if (!id)
2922 		return -ENOMEM;
2923 
2924 	c.identify.opcode = nvme_admin_identify;
2925 	c.identify.cns = NVME_ID_CNS_CS_CTRL;
2926 	c.identify.csi = NVME_CSI_NVM;
2927 
2928 	ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
2929 	if (ret)
2930 		goto free_data;
2931 
2932 	if (id->dmrl)
2933 		ctrl->max_discard_segments = id->dmrl;
2934 	ctrl->dmrsl = le32_to_cpu(id->dmrsl);
2935 	if (id->wzsl)
2936 		ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
2937 
2938 free_data:
2939 	if (ret > 0)
2940 		set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
2941 	kfree(id);
2942 	return ret;
2943 }
2944 
2945 static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
2946 {
2947 	struct nvme_effects_log	*log = ctrl->effects;
2948 
2949 	log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
2950 						NVME_CMD_EFFECTS_NCC |
2951 						NVME_CMD_EFFECTS_CSE_MASK);
2952 	log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
2953 						NVME_CMD_EFFECTS_CSE_MASK);
2954 
2955 	/*
2956 	 * The spec says the result of a security receive command depends on
2957 	 * the previous security send command. As such, many vendors log this
2958 	 * command as one to submitted only when no other commands to the same
2959 	 * namespace are outstanding. The intention is to tell the host to
2960 	 * prevent mixing security send and receive.
2961 	 *
2962 	 * This driver can only enforce such exclusive access against IO
2963 	 * queues, though. We are not readily able to enforce such a rule for
2964 	 * two commands to the admin queue, which is the only queue that
2965 	 * matters for this command.
2966 	 *
2967 	 * Rather than blindly freezing the IO queues for this effect that
2968 	 * doesn't even apply to IO, mask it off.
2969 	 */
2970 	log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK);
2971 
2972 	log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
2973 	log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
2974 	log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
2975 }
2976 
2977 static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2978 {
2979 	int ret = 0;
2980 
2981 	if (ctrl->effects)
2982 		return 0;
2983 
2984 	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2985 		ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
2986 		if (ret < 0)
2987 			return ret;
2988 	}
2989 
2990 	if (!ctrl->effects) {
2991 		ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
2992 		if (!ctrl->effects)
2993 			return -ENOMEM;
2994 		xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
2995 	}
2996 
2997 	nvme_init_known_nvm_effects(ctrl);
2998 	return 0;
2999 }
3000 
3001 static int nvme_init_identify(struct nvme_ctrl *ctrl)
3002 {
3003 	struct nvme_id_ctrl *id;
3004 	u32 max_hw_sectors;
3005 	bool prev_apst_enabled;
3006 	int ret;
3007 
3008 	ret = nvme_identify_ctrl(ctrl, &id);
3009 	if (ret) {
3010 		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
3011 		return -EIO;
3012 	}
3013 
3014 	if (!(ctrl->ops->flags & NVME_F_FABRICS))
3015 		ctrl->cntlid = le16_to_cpu(id->cntlid);
3016 
3017 	if (!ctrl->identified) {
3018 		unsigned int i;
3019 
3020 		/*
3021 		 * Check for quirks.  Quirk can depend on firmware version,
3022 		 * so, in principle, the set of quirks present can change
3023 		 * across a reset.  As a possible future enhancement, we
3024 		 * could re-scan for quirks every time we reinitialize
3025 		 * the device, but we'd have to make sure that the driver
3026 		 * behaves intelligently if the quirks change.
3027 		 */
3028 		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
3029 			if (quirk_matches(id, &core_quirks[i]))
3030 				ctrl->quirks |= core_quirks[i].quirks;
3031 		}
3032 
3033 		ret = nvme_init_subsystem(ctrl, id);
3034 		if (ret)
3035 			goto out_free;
3036 
3037 		ret = nvme_init_effects(ctrl, id);
3038 		if (ret)
3039 			goto out_free;
3040 	}
3041 	memcpy(ctrl->subsys->firmware_rev, id->fr,
3042 	       sizeof(ctrl->subsys->firmware_rev));
3043 
3044 	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3045 		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3046 		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
3047 	}
3048 
3049 	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
3050 	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
3051 	ctrl->crdt[2] = le16_to_cpu(id->crdt3);
3052 
3053 	ctrl->oacs = le16_to_cpu(id->oacs);
3054 	ctrl->oncs = le16_to_cpu(id->oncs);
3055 	ctrl->mtfa = le16_to_cpu(id->mtfa);
3056 	ctrl->oaes = le32_to_cpu(id->oaes);
3057 	ctrl->wctemp = le16_to_cpu(id->wctemp);
3058 	ctrl->cctemp = le16_to_cpu(id->cctemp);
3059 
3060 	atomic_set(&ctrl->abort_limit, id->acl + 1);
3061 	ctrl->vwc = id->vwc;
3062 	if (id->mdts)
3063 		max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
3064 	else
3065 		max_hw_sectors = UINT_MAX;
3066 	ctrl->max_hw_sectors =
3067 		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3068 
3069 	nvme_set_queue_limits(ctrl, ctrl->admin_q);
3070 	ctrl->sgls = le32_to_cpu(id->sgls);
3071 	ctrl->kas = le16_to_cpu(id->kas);
3072 	ctrl->max_namespaces = le32_to_cpu(id->mnan);
3073 	ctrl->ctratt = le32_to_cpu(id->ctratt);
3074 
3075 	ctrl->cntrltype = id->cntrltype;
3076 	ctrl->dctype = id->dctype;
3077 
3078 	if (id->rtd3e) {
3079 		/* us -> s */
3080 		u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
3081 
3082 		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
3083 						 shutdown_timeout, 60);
3084 
3085 		if (ctrl->shutdown_timeout != shutdown_timeout)
3086 			dev_info(ctrl->device,
3087 				 "Shutdown timeout set to %u seconds\n",
3088 				 ctrl->shutdown_timeout);
3089 	} else
3090 		ctrl->shutdown_timeout = shutdown_timeout;
3091 
3092 	ctrl->npss = id->npss;
3093 	ctrl->apsta = id->apsta;
3094 	prev_apst_enabled = ctrl->apst_enabled;
3095 	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3096 		if (force_apst && id->apsta) {
3097 			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3098 			ctrl->apst_enabled = true;
3099 		} else {
3100 			ctrl->apst_enabled = false;
3101 		}
3102 	} else {
3103 		ctrl->apst_enabled = id->apsta;
3104 	}
3105 	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3106 
3107 	if (ctrl->ops->flags & NVME_F_FABRICS) {
3108 		ctrl->icdoff = le16_to_cpu(id->icdoff);
3109 		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3110 		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3111 		ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3112 
3113 		/*
3114 		 * In fabrics we need to verify the cntlid matches the
3115 		 * admin connect
3116 		 */
3117 		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3118 			dev_err(ctrl->device,
3119 				"Mismatching cntlid: Connect %u vs Identify "
3120 				"%u, rejecting\n",
3121 				ctrl->cntlid, le16_to_cpu(id->cntlid));
3122 			ret = -EINVAL;
3123 			goto out_free;
3124 		}
3125 
3126 		if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3127 			dev_err(ctrl->device,
3128 				"keep-alive support is mandatory for fabrics\n");
3129 			ret = -EINVAL;
3130 			goto out_free;
3131 		}
3132 	} else {
3133 		ctrl->hmpre = le32_to_cpu(id->hmpre);
3134 		ctrl->hmmin = le32_to_cpu(id->hmmin);
3135 		ctrl->hmminds = le32_to_cpu(id->hmminds);
3136 		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3137 	}
3138 
3139 	ret = nvme_mpath_init_identify(ctrl, id);
3140 	if (ret < 0)
3141 		goto out_free;
3142 
3143 	if (ctrl->apst_enabled && !prev_apst_enabled)
3144 		dev_pm_qos_expose_latency_tolerance(ctrl->device);
3145 	else if (!ctrl->apst_enabled && prev_apst_enabled)
3146 		dev_pm_qos_hide_latency_tolerance(ctrl->device);
3147 
3148 out_free:
3149 	kfree(id);
3150 	return ret;
3151 }
3152 
3153 /*
3154  * Initialize the cached copies of the Identify data and various controller
3155  * register in our nvme_ctrl structure.  This should be called as soon as
3156  * the admin queue is fully up and running.
3157  */
3158 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
3159 {
3160 	int ret;
3161 
3162 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3163 	if (ret) {
3164 		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3165 		return ret;
3166 	}
3167 
3168 	ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3169 
3170 	if (ctrl->vs >= NVME_VS(1, 1, 0))
3171 		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3172 
3173 	ret = nvme_init_identify(ctrl);
3174 	if (ret)
3175 		return ret;
3176 
3177 	ret = nvme_configure_apst(ctrl);
3178 	if (ret < 0)
3179 		return ret;
3180 
3181 	ret = nvme_configure_timestamp(ctrl);
3182 	if (ret < 0)
3183 		return ret;
3184 
3185 	ret = nvme_configure_host_options(ctrl);
3186 	if (ret < 0)
3187 		return ret;
3188 
3189 	nvme_configure_opal(ctrl, was_suspended);
3190 
3191 	if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3192 		/*
3193 		 * Do not return errors unless we are in a controller reset,
3194 		 * the controller works perfectly fine without hwmon.
3195 		 */
3196 		ret = nvme_hwmon_init(ctrl);
3197 		if (ret == -EINTR)
3198 			return ret;
3199 	}
3200 
3201 	clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
3202 	ctrl->identified = true;
3203 
3204 	return 0;
3205 }
3206 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3207 
3208 static int nvme_dev_open(struct inode *inode, struct file *file)
3209 {
3210 	struct nvme_ctrl *ctrl =
3211 		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3212 
3213 	switch (nvme_ctrl_state(ctrl)) {
3214 	case NVME_CTRL_LIVE:
3215 		break;
3216 	default:
3217 		return -EWOULDBLOCK;
3218 	}
3219 
3220 	nvme_get_ctrl(ctrl);
3221 	if (!try_module_get(ctrl->ops->module)) {
3222 		nvme_put_ctrl(ctrl);
3223 		return -EINVAL;
3224 	}
3225 
3226 	file->private_data = ctrl;
3227 	return 0;
3228 }
3229 
3230 static int nvme_dev_release(struct inode *inode, struct file *file)
3231 {
3232 	struct nvme_ctrl *ctrl =
3233 		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3234 
3235 	module_put(ctrl->ops->module);
3236 	nvme_put_ctrl(ctrl);
3237 	return 0;
3238 }
3239 
3240 static const struct file_operations nvme_dev_fops = {
3241 	.owner		= THIS_MODULE,
3242 	.open		= nvme_dev_open,
3243 	.release	= nvme_dev_release,
3244 	.unlocked_ioctl	= nvme_dev_ioctl,
3245 	.compat_ioctl	= compat_ptr_ioctl,
3246 	.uring_cmd	= nvme_dev_uring_cmd,
3247 };
3248 
3249 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
3250 		unsigned nsid)
3251 {
3252 	struct nvme_ns_head *h;
3253 
3254 	lockdep_assert_held(&ctrl->subsys->lock);
3255 
3256 	list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
3257 		/*
3258 		 * Private namespaces can share NSIDs under some conditions.
3259 		 * In that case we can't use the same ns_head for namespaces
3260 		 * with the same NSID.
3261 		 */
3262 		if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
3263 			continue;
3264 		if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
3265 			return h;
3266 	}
3267 
3268 	return NULL;
3269 }
3270 
3271 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
3272 		struct nvme_ns_ids *ids)
3273 {
3274 	bool has_uuid = !uuid_is_null(&ids->uuid);
3275 	bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
3276 	bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
3277 	struct nvme_ns_head *h;
3278 
3279 	lockdep_assert_held(&subsys->lock);
3280 
3281 	list_for_each_entry(h, &subsys->nsheads, entry) {
3282 		if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
3283 			return -EINVAL;
3284 		if (has_nguid &&
3285 		    memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
3286 			return -EINVAL;
3287 		if (has_eui64 &&
3288 		    memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
3289 			return -EINVAL;
3290 	}
3291 
3292 	return 0;
3293 }
3294 
3295 static void nvme_cdev_rel(struct device *dev)
3296 {
3297 	ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3298 }
3299 
3300 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
3301 {
3302 	cdev_device_del(cdev, cdev_device);
3303 	put_device(cdev_device);
3304 }
3305 
3306 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
3307 		const struct file_operations *fops, struct module *owner)
3308 {
3309 	int minor, ret;
3310 
3311 	minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
3312 	if (minor < 0)
3313 		return minor;
3314 	cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3315 	cdev_device->class = nvme_ns_chr_class;
3316 	cdev_device->release = nvme_cdev_rel;
3317 	device_initialize(cdev_device);
3318 	cdev_init(cdev, fops);
3319 	cdev->owner = owner;
3320 	ret = cdev_device_add(cdev, cdev_device);
3321 	if (ret)
3322 		put_device(cdev_device);
3323 
3324 	return ret;
3325 }
3326 
3327 static int nvme_ns_chr_open(struct inode *inode, struct file *file)
3328 {
3329 	return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3330 }
3331 
3332 static int nvme_ns_chr_release(struct inode *inode, struct file *file)
3333 {
3334 	nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3335 	return 0;
3336 }
3337 
3338 static const struct file_operations nvme_ns_chr_fops = {
3339 	.owner		= THIS_MODULE,
3340 	.open		= nvme_ns_chr_open,
3341 	.release	= nvme_ns_chr_release,
3342 	.unlocked_ioctl	= nvme_ns_chr_ioctl,
3343 	.compat_ioctl	= compat_ptr_ioctl,
3344 	.uring_cmd	= nvme_ns_chr_uring_cmd,
3345 	.uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
3346 };
3347 
3348 static int nvme_add_ns_cdev(struct nvme_ns *ns)
3349 {
3350 	int ret;
3351 
3352 	ns->cdev_device.parent = ns->ctrl->device;
3353 	ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3354 			   ns->ctrl->instance, ns->head->instance);
3355 	if (ret)
3356 		return ret;
3357 
3358 	return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3359 			     ns->ctrl->ops->module);
3360 }
3361 
3362 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3363 		struct nvme_ns_info *info)
3364 {
3365 	struct nvme_ns_head *head;
3366 	size_t size = sizeof(*head);
3367 	int ret = -ENOMEM;
3368 
3369 #ifdef CONFIG_NVME_MULTIPATH
3370 	size += num_possible_nodes() * sizeof(struct nvme_ns *);
3371 #endif
3372 
3373 	head = kzalloc(size, GFP_KERNEL);
3374 	if (!head)
3375 		goto out;
3376 	ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
3377 	if (ret < 0)
3378 		goto out_free_head;
3379 	head->instance = ret;
3380 	INIT_LIST_HEAD(&head->list);
3381 	ret = init_srcu_struct(&head->srcu);
3382 	if (ret)
3383 		goto out_ida_remove;
3384 	head->subsys = ctrl->subsys;
3385 	head->ns_id = info->nsid;
3386 	head->ids = info->ids;
3387 	head->shared = info->is_shared;
3388 	kref_init(&head->ref);
3389 
3390 	if (head->ids.csi) {
3391 		ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3392 		if (ret)
3393 			goto out_cleanup_srcu;
3394 	} else
3395 		head->effects = ctrl->effects;
3396 
3397 	ret = nvme_mpath_alloc_disk(ctrl, head);
3398 	if (ret)
3399 		goto out_cleanup_srcu;
3400 
3401 	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3402 
3403 	kref_get(&ctrl->subsys->ref);
3404 
3405 	return head;
3406 out_cleanup_srcu:
3407 	cleanup_srcu_struct(&head->srcu);
3408 out_ida_remove:
3409 	ida_free(&ctrl->subsys->ns_ida, head->instance);
3410 out_free_head:
3411 	kfree(head);
3412 out:
3413 	if (ret > 0)
3414 		ret = blk_status_to_errno(nvme_error_status(ret));
3415 	return ERR_PTR(ret);
3416 }
3417 
3418 static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
3419 		struct nvme_ns_ids *ids)
3420 {
3421 	struct nvme_subsystem *s;
3422 	int ret = 0;
3423 
3424 	/*
3425 	 * Note that this check is racy as we try to avoid holding the global
3426 	 * lock over the whole ns_head creation.  But it is only intended as
3427 	 * a sanity check anyway.
3428 	 */
3429 	mutex_lock(&nvme_subsystems_lock);
3430 	list_for_each_entry(s, &nvme_subsystems, entry) {
3431 		if (s == this)
3432 			continue;
3433 		mutex_lock(&s->lock);
3434 		ret = nvme_subsys_check_duplicate_ids(s, ids);
3435 		mutex_unlock(&s->lock);
3436 		if (ret)
3437 			break;
3438 	}
3439 	mutex_unlock(&nvme_subsystems_lock);
3440 
3441 	return ret;
3442 }
3443 
3444 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
3445 {
3446 	struct nvme_ctrl *ctrl = ns->ctrl;
3447 	struct nvme_ns_head *head = NULL;
3448 	int ret;
3449 
3450 	ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
3451 	if (ret) {
3452 		/*
3453 		 * We've found two different namespaces on two different
3454 		 * subsystems that report the same ID.  This is pretty nasty
3455 		 * for anything that actually requires unique device
3456 		 * identification.  In the kernel we need this for multipathing,
3457 		 * and in user space the /dev/disk/by-id/ links rely on it.
3458 		 *
3459 		 * If the device also claims to be multi-path capable back off
3460 		 * here now and refuse the probe the second device as this is a
3461 		 * recipe for data corruption.  If not this is probably a
3462 		 * cheap consumer device if on the PCIe bus, so let the user
3463 		 * proceed and use the shiny toy, but warn that with changing
3464 		 * probing order (which due to our async probing could just be
3465 		 * device taking longer to startup) the other device could show
3466 		 * up at any time.
3467 		 */
3468 		nvme_print_device_info(ctrl);
3469 		if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
3470 		    ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
3471 		     info->is_shared)) {
3472 			dev_err(ctrl->device,
3473 				"ignoring nsid %d because of duplicate IDs\n",
3474 				info->nsid);
3475 			return ret;
3476 		}
3477 
3478 		dev_err(ctrl->device,
3479 			"clearing duplicate IDs for nsid %d\n", info->nsid);
3480 		dev_err(ctrl->device,
3481 			"use of /dev/disk/by-id/ may cause data corruption\n");
3482 		memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
3483 		memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
3484 		memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
3485 		ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
3486 	}
3487 
3488 	mutex_lock(&ctrl->subsys->lock);
3489 	head = nvme_find_ns_head(ctrl, info->nsid);
3490 	if (!head) {
3491 		ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
3492 		if (ret) {
3493 			dev_err(ctrl->device,
3494 				"duplicate IDs in subsystem for nsid %d\n",
3495 				info->nsid);
3496 			goto out_unlock;
3497 		}
3498 		head = nvme_alloc_ns_head(ctrl, info);
3499 		if (IS_ERR(head)) {
3500 			ret = PTR_ERR(head);
3501 			goto out_unlock;
3502 		}
3503 	} else {
3504 		ret = -EINVAL;
3505 		if (!info->is_shared || !head->shared) {
3506 			dev_err(ctrl->device,
3507 				"Duplicate unshared namespace %d\n",
3508 				info->nsid);
3509 			goto out_put_ns_head;
3510 		}
3511 		if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
3512 			dev_err(ctrl->device,
3513 				"IDs don't match for shared namespace %d\n",
3514 					info->nsid);
3515 			goto out_put_ns_head;
3516 		}
3517 
3518 		if (!multipath) {
3519 			dev_warn(ctrl->device,
3520 				"Found shared namespace %d, but multipathing not supported.\n",
3521 				info->nsid);
3522 			dev_warn_once(ctrl->device,
3523 				"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
3524 		}
3525 	}
3526 
3527 	list_add_tail_rcu(&ns->siblings, &head->list);
3528 	ns->head = head;
3529 	mutex_unlock(&ctrl->subsys->lock);
3530 	return 0;
3531 
3532 out_put_ns_head:
3533 	nvme_put_ns_head(head);
3534 out_unlock:
3535 	mutex_unlock(&ctrl->subsys->lock);
3536 	return ret;
3537 }
3538 
3539 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3540 {
3541 	struct nvme_ns *ns, *ret = NULL;
3542 
3543 	down_read(&ctrl->namespaces_rwsem);
3544 	list_for_each_entry(ns, &ctrl->namespaces, list) {
3545 		if (ns->head->ns_id == nsid) {
3546 			if (!nvme_get_ns(ns))
3547 				continue;
3548 			ret = ns;
3549 			break;
3550 		}
3551 		if (ns->head->ns_id > nsid)
3552 			break;
3553 	}
3554 	up_read(&ctrl->namespaces_rwsem);
3555 	return ret;
3556 }
3557 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3558 
3559 /*
3560  * Add the namespace to the controller list while keeping the list ordered.
3561  */
3562 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
3563 {
3564 	struct nvme_ns *tmp;
3565 
3566 	list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
3567 		if (tmp->head->ns_id < ns->head->ns_id) {
3568 			list_add(&ns->list, &tmp->list);
3569 			return;
3570 		}
3571 	}
3572 	list_add(&ns->list, &ns->ctrl->namespaces);
3573 }
3574 
3575 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
3576 {
3577 	struct nvme_ns *ns;
3578 	struct gendisk *disk;
3579 	int node = ctrl->numa_node;
3580 
3581 	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3582 	if (!ns)
3583 		return;
3584 
3585 	disk = blk_mq_alloc_disk(ctrl->tagset, ns);
3586 	if (IS_ERR(disk))
3587 		goto out_free_ns;
3588 	disk->fops = &nvme_bdev_ops;
3589 	disk->private_data = ns;
3590 
3591 	ns->disk = disk;
3592 	ns->queue = disk->queue;
3593 
3594 	if (ctrl->opts && ctrl->opts->data_digest)
3595 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3596 
3597 	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3598 	if (ctrl->ops->supports_pci_p2pdma &&
3599 	    ctrl->ops->supports_pci_p2pdma(ctrl))
3600 		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3601 
3602 	ns->ctrl = ctrl;
3603 	kref_init(&ns->kref);
3604 
3605 	if (nvme_init_ns_head(ns, info))
3606 		goto out_cleanup_disk;
3607 
3608 	/*
3609 	 * If multipathing is enabled, the device name for all disks and not
3610 	 * just those that represent shared namespaces needs to be based on the
3611 	 * subsystem instance.  Using the controller instance for private
3612 	 * namespaces could lead to naming collisions between shared and private
3613 	 * namespaces if they don't use a common numbering scheme.
3614 	 *
3615 	 * If multipathing is not enabled, disk names must use the controller
3616 	 * instance as shared namespaces will show up as multiple block
3617 	 * devices.
3618 	 */
3619 	if (nvme_ns_head_multipath(ns->head)) {
3620 		sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
3621 			ctrl->instance, ns->head->instance);
3622 		disk->flags |= GENHD_FL_HIDDEN;
3623 	} else if (multipath) {
3624 		sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
3625 			ns->head->instance);
3626 	} else {
3627 		sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
3628 			ns->head->instance);
3629 	}
3630 
3631 	if (nvme_update_ns_info(ns, info))
3632 		goto out_unlink_ns;
3633 
3634 	down_write(&ctrl->namespaces_rwsem);
3635 	/*
3636 	 * Ensure that no namespaces are added to the ctrl list after the queues
3637 	 * are frozen, thereby avoiding a deadlock between scan and reset.
3638 	 */
3639 	if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
3640 		up_write(&ctrl->namespaces_rwsem);
3641 		goto out_unlink_ns;
3642 	}
3643 	nvme_ns_add_to_ctrl_list(ns);
3644 	up_write(&ctrl->namespaces_rwsem);
3645 	nvme_get_ctrl(ctrl);
3646 
3647 	if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
3648 		goto out_cleanup_ns_from_list;
3649 
3650 	if (!nvme_ns_head_multipath(ns->head))
3651 		nvme_add_ns_cdev(ns);
3652 
3653 	nvme_mpath_add_disk(ns, info->anagrpid);
3654 	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3655 
3656 	return;
3657 
3658  out_cleanup_ns_from_list:
3659 	nvme_put_ctrl(ctrl);
3660 	down_write(&ctrl->namespaces_rwsem);
3661 	list_del_init(&ns->list);
3662 	up_write(&ctrl->namespaces_rwsem);
3663  out_unlink_ns:
3664 	mutex_lock(&ctrl->subsys->lock);
3665 	list_del_rcu(&ns->siblings);
3666 	if (list_empty(&ns->head->list))
3667 		list_del_init(&ns->head->entry);
3668 	mutex_unlock(&ctrl->subsys->lock);
3669 	nvme_put_ns_head(ns->head);
3670  out_cleanup_disk:
3671 	put_disk(disk);
3672  out_free_ns:
3673 	kfree(ns);
3674 }
3675 
3676 static void nvme_ns_remove(struct nvme_ns *ns)
3677 {
3678 	bool last_path = false;
3679 
3680 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3681 		return;
3682 
3683 	clear_bit(NVME_NS_READY, &ns->flags);
3684 	set_capacity(ns->disk, 0);
3685 	nvme_fault_inject_fini(&ns->fault_inject);
3686 
3687 	/*
3688 	 * Ensure that !NVME_NS_READY is seen by other threads to prevent
3689 	 * this ns going back into current_path.
3690 	 */
3691 	synchronize_srcu(&ns->head->srcu);
3692 
3693 	/* wait for concurrent submissions */
3694 	if (nvme_mpath_clear_current_path(ns))
3695 		synchronize_srcu(&ns->head->srcu);
3696 
3697 	mutex_lock(&ns->ctrl->subsys->lock);
3698 	list_del_rcu(&ns->siblings);
3699 	if (list_empty(&ns->head->list)) {
3700 		list_del_init(&ns->head->entry);
3701 		last_path = true;
3702 	}
3703 	mutex_unlock(&ns->ctrl->subsys->lock);
3704 
3705 	/* guarantee not available in head->list */
3706 	synchronize_srcu(&ns->head->srcu);
3707 
3708 	if (!nvme_ns_head_multipath(ns->head))
3709 		nvme_cdev_del(&ns->cdev, &ns->cdev_device);
3710 	del_gendisk(ns->disk);
3711 
3712 	down_write(&ns->ctrl->namespaces_rwsem);
3713 	list_del_init(&ns->list);
3714 	up_write(&ns->ctrl->namespaces_rwsem);
3715 
3716 	if (last_path)
3717 		nvme_mpath_shutdown_disk(ns->head);
3718 	nvme_put_ns(ns);
3719 }
3720 
3721 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3722 {
3723 	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3724 
3725 	if (ns) {
3726 		nvme_ns_remove(ns);
3727 		nvme_put_ns(ns);
3728 	}
3729 }
3730 
3731 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
3732 {
3733 	int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3734 
3735 	if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
3736 		dev_err(ns->ctrl->device,
3737 			"identifiers changed for nsid %d\n", ns->head->ns_id);
3738 		goto out;
3739 	}
3740 
3741 	ret = nvme_update_ns_info(ns, info);
3742 out:
3743 	/*
3744 	 * Only remove the namespace if we got a fatal error back from the
3745 	 * device, otherwise ignore the error and just move on.
3746 	 *
3747 	 * TODO: we should probably schedule a delayed retry here.
3748 	 */
3749 	if (ret > 0 && (ret & NVME_SC_DNR))
3750 		nvme_ns_remove(ns);
3751 }
3752 
3753 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3754 {
3755 	struct nvme_ns_info info = { .nsid = nsid };
3756 	struct nvme_ns *ns;
3757 	int ret;
3758 
3759 	if (nvme_identify_ns_descs(ctrl, &info))
3760 		return;
3761 
3762 	if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
3763 		dev_warn(ctrl->device,
3764 			"command set not reported for nsid: %d\n", nsid);
3765 		return;
3766 	}
3767 
3768 	/*
3769 	 * If available try to use the Command Set Idependent Identify Namespace
3770 	 * data structure to find all the generic information that is needed to
3771 	 * set up a namespace.  If not fall back to the legacy version.
3772 	 */
3773 	if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
3774 	    (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS))
3775 		ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
3776 	else
3777 		ret = nvme_ns_info_from_identify(ctrl, &info);
3778 
3779 	if (info.is_removed)
3780 		nvme_ns_remove_by_nsid(ctrl, nsid);
3781 
3782 	/*
3783 	 * Ignore the namespace if it is not ready. We will get an AEN once it
3784 	 * becomes ready and restart the scan.
3785 	 */
3786 	if (ret || !info.is_ready)
3787 		return;
3788 
3789 	ns = nvme_find_get_ns(ctrl, nsid);
3790 	if (ns) {
3791 		nvme_validate_ns(ns, &info);
3792 		nvme_put_ns(ns);
3793 	} else {
3794 		nvme_alloc_ns(ctrl, &info);
3795 	}
3796 }
3797 
3798 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3799 					unsigned nsid)
3800 {
3801 	struct nvme_ns *ns, *next;
3802 	LIST_HEAD(rm_list);
3803 
3804 	down_write(&ctrl->namespaces_rwsem);
3805 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3806 		if (ns->head->ns_id > nsid)
3807 			list_move_tail(&ns->list, &rm_list);
3808 	}
3809 	up_write(&ctrl->namespaces_rwsem);
3810 
3811 	list_for_each_entry_safe(ns, next, &rm_list, list)
3812 		nvme_ns_remove(ns);
3813 
3814 }
3815 
3816 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
3817 {
3818 	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
3819 	__le32 *ns_list;
3820 	u32 prev = 0;
3821 	int ret = 0, i;
3822 
3823 	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3824 	if (!ns_list)
3825 		return -ENOMEM;
3826 
3827 	for (;;) {
3828 		struct nvme_command cmd = {
3829 			.identify.opcode	= nvme_admin_identify,
3830 			.identify.cns		= NVME_ID_CNS_NS_ACTIVE_LIST,
3831 			.identify.nsid		= cpu_to_le32(prev),
3832 		};
3833 
3834 		ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
3835 					    NVME_IDENTIFY_DATA_SIZE);
3836 		if (ret) {
3837 			dev_warn(ctrl->device,
3838 				"Identify NS List failed (status=0x%x)\n", ret);
3839 			goto free;
3840 		}
3841 
3842 		for (i = 0; i < nr_entries; i++) {
3843 			u32 nsid = le32_to_cpu(ns_list[i]);
3844 
3845 			if (!nsid)	/* end of the list? */
3846 				goto out;
3847 			nvme_scan_ns(ctrl, nsid);
3848 			while (++prev < nsid)
3849 				nvme_ns_remove_by_nsid(ctrl, prev);
3850 		}
3851 	}
3852  out:
3853 	nvme_remove_invalid_namespaces(ctrl, prev);
3854  free:
3855 	kfree(ns_list);
3856 	return ret;
3857 }
3858 
3859 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
3860 {
3861 	struct nvme_id_ctrl *id;
3862 	u32 nn, i;
3863 
3864 	if (nvme_identify_ctrl(ctrl, &id))
3865 		return;
3866 	nn = le32_to_cpu(id->nn);
3867 	kfree(id);
3868 
3869 	for (i = 1; i <= nn; i++)
3870 		nvme_scan_ns(ctrl, i);
3871 
3872 	nvme_remove_invalid_namespaces(ctrl, nn);
3873 }
3874 
3875 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3876 {
3877 	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3878 	__le32 *log;
3879 	int error;
3880 
3881 	log = kzalloc(log_size, GFP_KERNEL);
3882 	if (!log)
3883 		return;
3884 
3885 	/*
3886 	 * We need to read the log to clear the AEN, but we don't want to rely
3887 	 * on it for the changed namespace information as userspace could have
3888 	 * raced with us in reading the log page, which could cause us to miss
3889 	 * updates.
3890 	 */
3891 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
3892 			NVME_CSI_NVM, log, log_size, 0);
3893 	if (error)
3894 		dev_warn(ctrl->device,
3895 			"reading changed ns log failed: %d\n", error);
3896 
3897 	kfree(log);
3898 }
3899 
3900 static void nvme_scan_work(struct work_struct *work)
3901 {
3902 	struct nvme_ctrl *ctrl =
3903 		container_of(work, struct nvme_ctrl, scan_work);
3904 	int ret;
3905 
3906 	/* No tagset on a live ctrl means IO queues could not created */
3907 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
3908 		return;
3909 
3910 	/*
3911 	 * Identify controller limits can change at controller reset due to
3912 	 * new firmware download, even though it is not common we cannot ignore
3913 	 * such scenario. Controller's non-mdts limits are reported in the unit
3914 	 * of logical blocks that is dependent on the format of attached
3915 	 * namespace. Hence re-read the limits at the time of ns allocation.
3916 	 */
3917 	ret = nvme_init_non_mdts_limits(ctrl);
3918 	if (ret < 0) {
3919 		dev_warn(ctrl->device,
3920 			"reading non-mdts-limits failed: %d\n", ret);
3921 		return;
3922 	}
3923 
3924 	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3925 		dev_info(ctrl->device, "rescanning namespaces.\n");
3926 		nvme_clear_changed_ns_log(ctrl);
3927 	}
3928 
3929 	mutex_lock(&ctrl->scan_lock);
3930 	if (nvme_ctrl_limited_cns(ctrl)) {
3931 		nvme_scan_ns_sequential(ctrl);
3932 	} else {
3933 		/*
3934 		 * Fall back to sequential scan if DNR is set to handle broken
3935 		 * devices which should support Identify NS List (as per the VS
3936 		 * they report) but don't actually support it.
3937 		 */
3938 		ret = nvme_scan_ns_list(ctrl);
3939 		if (ret > 0 && ret & NVME_SC_DNR)
3940 			nvme_scan_ns_sequential(ctrl);
3941 	}
3942 	mutex_unlock(&ctrl->scan_lock);
3943 }
3944 
3945 /*
3946  * This function iterates the namespace list unlocked to allow recovery from
3947  * controller failure. It is up to the caller to ensure the namespace list is
3948  * not modified by scan work while this function is executing.
3949  */
3950 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3951 {
3952 	struct nvme_ns *ns, *next;
3953 	LIST_HEAD(ns_list);
3954 
3955 	/*
3956 	 * make sure to requeue I/O to all namespaces as these
3957 	 * might result from the scan itself and must complete
3958 	 * for the scan_work to make progress
3959 	 */
3960 	nvme_mpath_clear_ctrl_paths(ctrl);
3961 
3962 	/*
3963 	 * Unquiesce io queues so any pending IO won't hang, especially
3964 	 * those submitted from scan work
3965 	 */
3966 	nvme_unquiesce_io_queues(ctrl);
3967 
3968 	/* prevent racing with ns scanning */
3969 	flush_work(&ctrl->scan_work);
3970 
3971 	/*
3972 	 * The dead states indicates the controller was not gracefully
3973 	 * disconnected. In that case, we won't be able to flush any data while
3974 	 * removing the namespaces' disks; fail all the queues now to avoid
3975 	 * potentially having to clean up the failed sync later.
3976 	 */
3977 	if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
3978 		nvme_mark_namespaces_dead(ctrl);
3979 
3980 	/* this is a no-op when called from the controller reset handler */
3981 	nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
3982 
3983 	down_write(&ctrl->namespaces_rwsem);
3984 	list_splice_init(&ctrl->namespaces, &ns_list);
3985 	up_write(&ctrl->namespaces_rwsem);
3986 
3987 	list_for_each_entry_safe(ns, next, &ns_list, list)
3988 		nvme_ns_remove(ns);
3989 }
3990 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
3991 
3992 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env)
3993 {
3994 	const struct nvme_ctrl *ctrl =
3995 		container_of(dev, struct nvme_ctrl, ctrl_device);
3996 	struct nvmf_ctrl_options *opts = ctrl->opts;
3997 	int ret;
3998 
3999 	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4000 	if (ret)
4001 		return ret;
4002 
4003 	if (opts) {
4004 		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4005 		if (ret)
4006 			return ret;
4007 
4008 		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
4009 				opts->trsvcid ?: "none");
4010 		if (ret)
4011 			return ret;
4012 
4013 		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
4014 				opts->host_traddr ?: "none");
4015 		if (ret)
4016 			return ret;
4017 
4018 		ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
4019 				opts->host_iface ?: "none");
4020 	}
4021 	return ret;
4022 }
4023 
4024 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
4025 {
4026 	char *envp[2] = { envdata, NULL };
4027 
4028 	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4029 }
4030 
4031 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4032 {
4033 	char *envp[2] = { NULL, NULL };
4034 	u32 aen_result = ctrl->aen_result;
4035 
4036 	ctrl->aen_result = 0;
4037 	if (!aen_result)
4038 		return;
4039 
4040 	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
4041 	if (!envp[0])
4042 		return;
4043 	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4044 	kfree(envp[0]);
4045 }
4046 
4047 static void nvme_async_event_work(struct work_struct *work)
4048 {
4049 	struct nvme_ctrl *ctrl =
4050 		container_of(work, struct nvme_ctrl, async_event_work);
4051 
4052 	nvme_aen_uevent(ctrl);
4053 
4054 	/*
4055 	 * The transport drivers must guarantee AER submission here is safe by
4056 	 * flushing ctrl async_event_work after changing the controller state
4057 	 * from LIVE and before freeing the admin queue.
4058 	*/
4059 	if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
4060 		ctrl->ops->submit_async_event(ctrl);
4061 }
4062 
4063 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4064 {
4065 
4066 	u32 csts;
4067 
4068 	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4069 		return false;
4070 
4071 	if (csts == ~0)
4072 		return false;
4073 
4074 	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4075 }
4076 
4077 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
4078 {
4079 	struct nvme_fw_slot_info_log *log;
4080 
4081 	log = kmalloc(sizeof(*log), GFP_KERNEL);
4082 	if (!log)
4083 		return;
4084 
4085 	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
4086 			log, sizeof(*log), 0))
4087 		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4088 	kfree(log);
4089 }
4090 
4091 static void nvme_fw_act_work(struct work_struct *work)
4092 {
4093 	struct nvme_ctrl *ctrl = container_of(work,
4094 				struct nvme_ctrl, fw_act_work);
4095 	unsigned long fw_act_timeout;
4096 
4097 	nvme_auth_stop(ctrl);
4098 
4099 	if (ctrl->mtfa)
4100 		fw_act_timeout = jiffies +
4101 				msecs_to_jiffies(ctrl->mtfa * 100);
4102 	else
4103 		fw_act_timeout = jiffies +
4104 				msecs_to_jiffies(admin_timeout * 1000);
4105 
4106 	nvme_quiesce_io_queues(ctrl);
4107 	while (nvme_ctrl_pp_status(ctrl)) {
4108 		if (time_after(jiffies, fw_act_timeout)) {
4109 			dev_warn(ctrl->device,
4110 				"Fw activation timeout, reset controller\n");
4111 			nvme_try_sched_reset(ctrl);
4112 			return;
4113 		}
4114 		msleep(100);
4115 	}
4116 
4117 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4118 		return;
4119 
4120 	nvme_unquiesce_io_queues(ctrl);
4121 	/* read FW slot information to clear the AER */
4122 	nvme_get_fw_slot_info(ctrl);
4123 
4124 	queue_work(nvme_wq, &ctrl->async_event_work);
4125 }
4126 
4127 static u32 nvme_aer_type(u32 result)
4128 {
4129 	return result & 0x7;
4130 }
4131 
4132 static u32 nvme_aer_subtype(u32 result)
4133 {
4134 	return (result & 0xff00) >> 8;
4135 }
4136 
4137 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4138 {
4139 	u32 aer_notice_type = nvme_aer_subtype(result);
4140 	bool requeue = true;
4141 
4142 	switch (aer_notice_type) {
4143 	case NVME_AER_NOTICE_NS_CHANGED:
4144 		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4145 		nvme_queue_scan(ctrl);
4146 		break;
4147 	case NVME_AER_NOTICE_FW_ACT_STARTING:
4148 		/*
4149 		 * We are (ab)using the RESETTING state to prevent subsequent
4150 		 * recovery actions from interfering with the controller's
4151 		 * firmware activation.
4152 		 */
4153 		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
4154 			requeue = false;
4155 			queue_work(nvme_wq, &ctrl->fw_act_work);
4156 		}
4157 		break;
4158 #ifdef CONFIG_NVME_MULTIPATH
4159 	case NVME_AER_NOTICE_ANA:
4160 		if (!ctrl->ana_log_buf)
4161 			break;
4162 		queue_work(nvme_wq, &ctrl->ana_work);
4163 		break;
4164 #endif
4165 	case NVME_AER_NOTICE_DISC_CHANGED:
4166 		ctrl->aen_result = result;
4167 		break;
4168 	default:
4169 		dev_warn(ctrl->device, "async event result %08x\n", result);
4170 	}
4171 	return requeue;
4172 }
4173 
4174 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
4175 {
4176 	dev_warn(ctrl->device, "resetting controller due to AER\n");
4177 	nvme_reset_ctrl(ctrl);
4178 }
4179 
4180 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4181 		volatile union nvme_result *res)
4182 {
4183 	u32 result = le32_to_cpu(res->u32);
4184 	u32 aer_type = nvme_aer_type(result);
4185 	u32 aer_subtype = nvme_aer_subtype(result);
4186 	bool requeue = true;
4187 
4188 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4189 		return;
4190 
4191 	trace_nvme_async_event(ctrl, result);
4192 	switch (aer_type) {
4193 	case NVME_AER_NOTICE:
4194 		requeue = nvme_handle_aen_notice(ctrl, result);
4195 		break;
4196 	case NVME_AER_ERROR:
4197 		/*
4198 		 * For a persistent internal error, don't run async_event_work
4199 		 * to submit a new AER. The controller reset will do it.
4200 		 */
4201 		if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
4202 			nvme_handle_aer_persistent_error(ctrl);
4203 			return;
4204 		}
4205 		fallthrough;
4206 	case NVME_AER_SMART:
4207 	case NVME_AER_CSS:
4208 	case NVME_AER_VS:
4209 		ctrl->aen_result = result;
4210 		break;
4211 	default:
4212 		break;
4213 	}
4214 
4215 	if (requeue)
4216 		queue_work(nvme_wq, &ctrl->async_event_work);
4217 }
4218 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4219 
4220 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4221 		const struct blk_mq_ops *ops, unsigned int cmd_size)
4222 {
4223 	int ret;
4224 
4225 	memset(set, 0, sizeof(*set));
4226 	set->ops = ops;
4227 	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
4228 	if (ctrl->ops->flags & NVME_F_FABRICS)
4229 		/* Reserved for fabric connect and keep alive */
4230 		set->reserved_tags = 2;
4231 	set->numa_node = ctrl->numa_node;
4232 	set->flags = BLK_MQ_F_NO_SCHED;
4233 	if (ctrl->ops->flags & NVME_F_BLOCKING)
4234 		set->flags |= BLK_MQ_F_BLOCKING;
4235 	set->cmd_size = cmd_size;
4236 	set->driver_data = ctrl;
4237 	set->nr_hw_queues = 1;
4238 	set->timeout = NVME_ADMIN_TIMEOUT;
4239 	ret = blk_mq_alloc_tag_set(set);
4240 	if (ret)
4241 		return ret;
4242 
4243 	ctrl->admin_q = blk_mq_init_queue(set);
4244 	if (IS_ERR(ctrl->admin_q)) {
4245 		ret = PTR_ERR(ctrl->admin_q);
4246 		goto out_free_tagset;
4247 	}
4248 
4249 	if (ctrl->ops->flags & NVME_F_FABRICS) {
4250 		ctrl->fabrics_q = blk_mq_init_queue(set);
4251 		if (IS_ERR(ctrl->fabrics_q)) {
4252 			ret = PTR_ERR(ctrl->fabrics_q);
4253 			goto out_cleanup_admin_q;
4254 		}
4255 	}
4256 
4257 	ctrl->admin_tagset = set;
4258 	return 0;
4259 
4260 out_cleanup_admin_q:
4261 	blk_mq_destroy_queue(ctrl->admin_q);
4262 	blk_put_queue(ctrl->admin_q);
4263 out_free_tagset:
4264 	blk_mq_free_tag_set(set);
4265 	ctrl->admin_q = NULL;
4266 	ctrl->fabrics_q = NULL;
4267 	return ret;
4268 }
4269 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
4270 
4271 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
4272 {
4273 	blk_mq_destroy_queue(ctrl->admin_q);
4274 	blk_put_queue(ctrl->admin_q);
4275 	if (ctrl->ops->flags & NVME_F_FABRICS) {
4276 		blk_mq_destroy_queue(ctrl->fabrics_q);
4277 		blk_put_queue(ctrl->fabrics_q);
4278 	}
4279 	blk_mq_free_tag_set(ctrl->admin_tagset);
4280 }
4281 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
4282 
4283 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4284 		const struct blk_mq_ops *ops, unsigned int nr_maps,
4285 		unsigned int cmd_size)
4286 {
4287 	int ret;
4288 
4289 	memset(set, 0, sizeof(*set));
4290 	set->ops = ops;
4291 	set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
4292 	/*
4293 	 * Some Apple controllers requires tags to be unique across admin and
4294 	 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
4295 	 */
4296 	if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
4297 		set->reserved_tags = NVME_AQ_DEPTH;
4298 	else if (ctrl->ops->flags & NVME_F_FABRICS)
4299 		/* Reserved for fabric connect */
4300 		set->reserved_tags = 1;
4301 	set->numa_node = ctrl->numa_node;
4302 	set->flags = BLK_MQ_F_SHOULD_MERGE;
4303 	if (ctrl->ops->flags & NVME_F_BLOCKING)
4304 		set->flags |= BLK_MQ_F_BLOCKING;
4305 	set->cmd_size = cmd_size,
4306 	set->driver_data = ctrl;
4307 	set->nr_hw_queues = ctrl->queue_count - 1;
4308 	set->timeout = NVME_IO_TIMEOUT;
4309 	set->nr_maps = nr_maps;
4310 	ret = blk_mq_alloc_tag_set(set);
4311 	if (ret)
4312 		return ret;
4313 
4314 	if (ctrl->ops->flags & NVME_F_FABRICS) {
4315 		ctrl->connect_q = blk_mq_init_queue(set);
4316         	if (IS_ERR(ctrl->connect_q)) {
4317 			ret = PTR_ERR(ctrl->connect_q);
4318 			goto out_free_tag_set;
4319 		}
4320 		blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE,
4321 				   ctrl->connect_q);
4322 	}
4323 
4324 	ctrl->tagset = set;
4325 	return 0;
4326 
4327 out_free_tag_set:
4328 	blk_mq_free_tag_set(set);
4329 	ctrl->connect_q = NULL;
4330 	return ret;
4331 }
4332 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
4333 
4334 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
4335 {
4336 	if (ctrl->ops->flags & NVME_F_FABRICS) {
4337 		blk_mq_destroy_queue(ctrl->connect_q);
4338 		blk_put_queue(ctrl->connect_q);
4339 	}
4340 	blk_mq_free_tag_set(ctrl->tagset);
4341 }
4342 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
4343 
4344 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4345 {
4346 	nvme_mpath_stop(ctrl);
4347 	nvme_auth_stop(ctrl);
4348 	nvme_stop_keep_alive(ctrl);
4349 	nvme_stop_failfast_work(ctrl);
4350 	flush_work(&ctrl->async_event_work);
4351 	cancel_work_sync(&ctrl->fw_act_work);
4352 	if (ctrl->ops->stop_ctrl)
4353 		ctrl->ops->stop_ctrl(ctrl);
4354 }
4355 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4356 
4357 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4358 {
4359 	nvme_start_keep_alive(ctrl);
4360 
4361 	nvme_enable_aen(ctrl);
4362 
4363 	/*
4364 	 * persistent discovery controllers need to send indication to userspace
4365 	 * to re-read the discovery log page to learn about possible changes
4366 	 * that were missed. We identify persistent discovery controllers by
4367 	 * checking that they started once before, hence are reconnecting back.
4368 	 */
4369 	if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
4370 	    nvme_discovery_ctrl(ctrl))
4371 		nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
4372 
4373 	if (ctrl->queue_count > 1) {
4374 		nvme_queue_scan(ctrl);
4375 		nvme_unquiesce_io_queues(ctrl);
4376 		nvme_mpath_update(ctrl);
4377 	}
4378 
4379 	nvme_change_uevent(ctrl, "NVME_EVENT=connected");
4380 	set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
4381 }
4382 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4383 
4384 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4385 {
4386 	nvme_hwmon_exit(ctrl);
4387 	nvme_fault_inject_fini(&ctrl->fault_inject);
4388 	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4389 	cdev_device_del(&ctrl->cdev, ctrl->device);
4390 	nvme_put_ctrl(ctrl);
4391 }
4392 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4393 
4394 static void nvme_free_cels(struct nvme_ctrl *ctrl)
4395 {
4396 	struct nvme_effects_log	*cel;
4397 	unsigned long i;
4398 
4399 	xa_for_each(&ctrl->cels, i, cel) {
4400 		xa_erase(&ctrl->cels, i);
4401 		kfree(cel);
4402 	}
4403 
4404 	xa_destroy(&ctrl->cels);
4405 }
4406 
4407 static void nvme_free_ctrl(struct device *dev)
4408 {
4409 	struct nvme_ctrl *ctrl =
4410 		container_of(dev, struct nvme_ctrl, ctrl_device);
4411 	struct nvme_subsystem *subsys = ctrl->subsys;
4412 
4413 	if (!subsys || ctrl->instance != subsys->instance)
4414 		ida_free(&nvme_instance_ida, ctrl->instance);
4415 
4416 	nvme_free_cels(ctrl);
4417 	nvme_mpath_uninit(ctrl);
4418 	nvme_auth_stop(ctrl);
4419 	nvme_auth_free(ctrl);
4420 	__free_page(ctrl->discard_page);
4421 	free_opal_dev(ctrl->opal_dev);
4422 
4423 	if (subsys) {
4424 		mutex_lock(&nvme_subsystems_lock);
4425 		list_del(&ctrl->subsys_entry);
4426 		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4427 		mutex_unlock(&nvme_subsystems_lock);
4428 	}
4429 
4430 	ctrl->ops->free_ctrl(ctrl);
4431 
4432 	if (subsys)
4433 		nvme_put_subsystem(subsys);
4434 }
4435 
4436 /*
4437  * Initialize a NVMe controller structures.  This needs to be called during
4438  * earliest initialization so that we have the initialized structured around
4439  * during probing.
4440  */
4441 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4442 		const struct nvme_ctrl_ops *ops, unsigned long quirks)
4443 {
4444 	int ret;
4445 
4446 	WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
4447 	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4448 	spin_lock_init(&ctrl->lock);
4449 	mutex_init(&ctrl->scan_lock);
4450 	INIT_LIST_HEAD(&ctrl->namespaces);
4451 	xa_init(&ctrl->cels);
4452 	init_rwsem(&ctrl->namespaces_rwsem);
4453 	ctrl->dev = dev;
4454 	ctrl->ops = ops;
4455 	ctrl->quirks = quirks;
4456 	ctrl->numa_node = NUMA_NO_NODE;
4457 	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4458 	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4459 	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4460 	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4461 	init_waitqueue_head(&ctrl->state_wq);
4462 
4463 	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4464 	INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4465 	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4466 	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4467 
4468 	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4469 			PAGE_SIZE);
4470 	ctrl->discard_page = alloc_page(GFP_KERNEL);
4471 	if (!ctrl->discard_page) {
4472 		ret = -ENOMEM;
4473 		goto out;
4474 	}
4475 
4476 	ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
4477 	if (ret < 0)
4478 		goto out;
4479 	ctrl->instance = ret;
4480 
4481 	device_initialize(&ctrl->ctrl_device);
4482 	ctrl->device = &ctrl->ctrl_device;
4483 	ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
4484 			ctrl->instance);
4485 	ctrl->device->class = nvme_class;
4486 	ctrl->device->parent = ctrl->dev;
4487 	if (ops->dev_attr_groups)
4488 		ctrl->device->groups = ops->dev_attr_groups;
4489 	else
4490 		ctrl->device->groups = nvme_dev_attr_groups;
4491 	ctrl->device->release = nvme_free_ctrl;
4492 	dev_set_drvdata(ctrl->device, ctrl);
4493 	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4494 	if (ret)
4495 		goto out_release_instance;
4496 
4497 	nvme_get_ctrl(ctrl);
4498 	cdev_init(&ctrl->cdev, &nvme_dev_fops);
4499 	ctrl->cdev.owner = ops->module;
4500 	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4501 	if (ret)
4502 		goto out_free_name;
4503 
4504 	/*
4505 	 * Initialize latency tolerance controls.  The sysfs files won't
4506 	 * be visible to userspace unless the device actually supports APST.
4507 	 */
4508 	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4509 	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4510 		min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4511 
4512 	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4513 	nvme_mpath_init_ctrl(ctrl);
4514 	ret = nvme_auth_init_ctrl(ctrl);
4515 	if (ret)
4516 		goto out_free_cdev;
4517 
4518 	return 0;
4519 out_free_cdev:
4520 	nvme_fault_inject_fini(&ctrl->fault_inject);
4521 	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4522 	cdev_device_del(&ctrl->cdev, ctrl->device);
4523 out_free_name:
4524 	nvme_put_ctrl(ctrl);
4525 	kfree_const(ctrl->device->kobj.name);
4526 out_release_instance:
4527 	ida_free(&nvme_instance_ida, ctrl->instance);
4528 out:
4529 	if (ctrl->discard_page)
4530 		__free_page(ctrl->discard_page);
4531 	return ret;
4532 }
4533 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4534 
4535 /* let I/O to all namespaces fail in preparation for surprise removal */
4536 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
4537 {
4538 	struct nvme_ns *ns;
4539 
4540 	down_read(&ctrl->namespaces_rwsem);
4541 	list_for_each_entry(ns, &ctrl->namespaces, list)
4542 		blk_mark_disk_dead(ns->disk);
4543 	up_read(&ctrl->namespaces_rwsem);
4544 }
4545 EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
4546 
4547 void nvme_unfreeze(struct nvme_ctrl *ctrl)
4548 {
4549 	struct nvme_ns *ns;
4550 
4551 	down_read(&ctrl->namespaces_rwsem);
4552 	list_for_each_entry(ns, &ctrl->namespaces, list)
4553 		blk_mq_unfreeze_queue(ns->queue);
4554 	up_read(&ctrl->namespaces_rwsem);
4555 	clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
4556 }
4557 EXPORT_SYMBOL_GPL(nvme_unfreeze);
4558 
4559 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4560 {
4561 	struct nvme_ns *ns;
4562 
4563 	down_read(&ctrl->namespaces_rwsem);
4564 	list_for_each_entry(ns, &ctrl->namespaces, list) {
4565 		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4566 		if (timeout <= 0)
4567 			break;
4568 	}
4569 	up_read(&ctrl->namespaces_rwsem);
4570 	return timeout;
4571 }
4572 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4573 
4574 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4575 {
4576 	struct nvme_ns *ns;
4577 
4578 	down_read(&ctrl->namespaces_rwsem);
4579 	list_for_each_entry(ns, &ctrl->namespaces, list)
4580 		blk_mq_freeze_queue_wait(ns->queue);
4581 	up_read(&ctrl->namespaces_rwsem);
4582 }
4583 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4584 
4585 void nvme_start_freeze(struct nvme_ctrl *ctrl)
4586 {
4587 	struct nvme_ns *ns;
4588 
4589 	set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
4590 	down_read(&ctrl->namespaces_rwsem);
4591 	list_for_each_entry(ns, &ctrl->namespaces, list)
4592 		blk_freeze_queue_start(ns->queue);
4593 	up_read(&ctrl->namespaces_rwsem);
4594 }
4595 EXPORT_SYMBOL_GPL(nvme_start_freeze);
4596 
4597 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl)
4598 {
4599 	if (!ctrl->tagset)
4600 		return;
4601 	if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
4602 		blk_mq_quiesce_tagset(ctrl->tagset);
4603 	else
4604 		blk_mq_wait_quiesce_done(ctrl->tagset);
4605 }
4606 EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues);
4607 
4608 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl)
4609 {
4610 	if (!ctrl->tagset)
4611 		return;
4612 	if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
4613 		blk_mq_unquiesce_tagset(ctrl->tagset);
4614 }
4615 EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues);
4616 
4617 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
4618 {
4619 	if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4620 		blk_mq_quiesce_queue(ctrl->admin_q);
4621 	else
4622 		blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
4623 }
4624 EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue);
4625 
4626 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl)
4627 {
4628 	if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4629 		blk_mq_unquiesce_queue(ctrl->admin_q);
4630 }
4631 EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
4632 
4633 void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
4634 {
4635 	struct nvme_ns *ns;
4636 
4637 	down_read(&ctrl->namespaces_rwsem);
4638 	list_for_each_entry(ns, &ctrl->namespaces, list)
4639 		blk_sync_queue(ns->queue);
4640 	up_read(&ctrl->namespaces_rwsem);
4641 }
4642 EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
4643 
4644 void nvme_sync_queues(struct nvme_ctrl *ctrl)
4645 {
4646 	nvme_sync_io_queues(ctrl);
4647 	if (ctrl->admin_q)
4648 		blk_sync_queue(ctrl->admin_q);
4649 }
4650 EXPORT_SYMBOL_GPL(nvme_sync_queues);
4651 
4652 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
4653 {
4654 	if (file->f_op != &nvme_dev_fops)
4655 		return NULL;
4656 	return file->private_data;
4657 }
4658 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
4659 
4660 /*
4661  * Check we didn't inadvertently grow the command structure sizes:
4662  */
4663 static inline void _nvme_check_size(void)
4664 {
4665 	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4666 	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4667 	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4668 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4669 	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4670 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4671 	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4672 	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4673 	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4674 	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4675 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4676 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4677 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4678 	BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) !=
4679 			NVME_IDENTIFY_DATA_SIZE);
4680 	BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
4681 	BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE);
4682 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4683 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
4684 	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4685 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4686 	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4687 	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4688 	BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
4689 }
4690 
4691 
4692 static int __init nvme_core_init(void)
4693 {
4694 	int result = -ENOMEM;
4695 
4696 	_nvme_check_size();
4697 
4698 	nvme_wq = alloc_workqueue("nvme-wq",
4699 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4700 	if (!nvme_wq)
4701 		goto out;
4702 
4703 	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4704 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4705 	if (!nvme_reset_wq)
4706 		goto destroy_wq;
4707 
4708 	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4709 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4710 	if (!nvme_delete_wq)
4711 		goto destroy_reset_wq;
4712 
4713 	result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
4714 			NVME_MINORS, "nvme");
4715 	if (result < 0)
4716 		goto destroy_delete_wq;
4717 
4718 	nvme_class = class_create("nvme");
4719 	if (IS_ERR(nvme_class)) {
4720 		result = PTR_ERR(nvme_class);
4721 		goto unregister_chrdev;
4722 	}
4723 	nvme_class->dev_uevent = nvme_class_uevent;
4724 
4725 	nvme_subsys_class = class_create("nvme-subsystem");
4726 	if (IS_ERR(nvme_subsys_class)) {
4727 		result = PTR_ERR(nvme_subsys_class);
4728 		goto destroy_class;
4729 	}
4730 
4731 	result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
4732 				     "nvme-generic");
4733 	if (result < 0)
4734 		goto destroy_subsys_class;
4735 
4736 	nvme_ns_chr_class = class_create("nvme-generic");
4737 	if (IS_ERR(nvme_ns_chr_class)) {
4738 		result = PTR_ERR(nvme_ns_chr_class);
4739 		goto unregister_generic_ns;
4740 	}
4741 
4742 	result = nvme_init_auth();
4743 	if (result)
4744 		goto destroy_ns_chr;
4745 	return 0;
4746 
4747 destroy_ns_chr:
4748 	class_destroy(nvme_ns_chr_class);
4749 unregister_generic_ns:
4750 	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4751 destroy_subsys_class:
4752 	class_destroy(nvme_subsys_class);
4753 destroy_class:
4754 	class_destroy(nvme_class);
4755 unregister_chrdev:
4756 	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4757 destroy_delete_wq:
4758 	destroy_workqueue(nvme_delete_wq);
4759 destroy_reset_wq:
4760 	destroy_workqueue(nvme_reset_wq);
4761 destroy_wq:
4762 	destroy_workqueue(nvme_wq);
4763 out:
4764 	return result;
4765 }
4766 
4767 static void __exit nvme_core_exit(void)
4768 {
4769 	nvme_exit_auth();
4770 	class_destroy(nvme_ns_chr_class);
4771 	class_destroy(nvme_subsys_class);
4772 	class_destroy(nvme_class);
4773 	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4774 	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4775 	destroy_workqueue(nvme_delete_wq);
4776 	destroy_workqueue(nvme_reset_wq);
4777 	destroy_workqueue(nvme_wq);
4778 	ida_destroy(&nvme_ns_chr_minor_ida);
4779 	ida_destroy(&nvme_instance_ida);
4780 }
4781 
4782 MODULE_LICENSE("GPL");
4783 MODULE_VERSION("1.0");
4784 module_init(nvme_core_init);
4785 module_exit(nvme_core_exit);
4786