xref: /openbmc/linux/drivers/nvme/host/core.c (revision 6b342707)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6 
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/compat.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/hdreg.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/pr.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
24 
25 #include "nvme.h"
26 #include "fabrics.h"
27 
28 #define CREATE_TRACE_POINTS
29 #include "trace.h"
30 
31 #define NVME_MINORS		(1U << MINORBITS)
32 
33 unsigned int admin_timeout = 60;
34 module_param(admin_timeout, uint, 0644);
35 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36 EXPORT_SYMBOL_GPL(admin_timeout);
37 
38 unsigned int nvme_io_timeout = 30;
39 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41 EXPORT_SYMBOL_GPL(nvme_io_timeout);
42 
43 static unsigned char shutdown_timeout = 5;
44 module_param(shutdown_timeout, byte, 0644);
45 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
46 
47 static u8 nvme_max_retries = 5;
48 module_param_named(max_retries, nvme_max_retries, byte, 0644);
49 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50 
51 static unsigned long default_ps_max_latency_us = 100000;
52 module_param(default_ps_max_latency_us, ulong, 0644);
53 MODULE_PARM_DESC(default_ps_max_latency_us,
54 		 "max power saving latency for new devices; use PM QOS to change per device");
55 
56 static bool force_apst;
57 module_param(force_apst, bool, 0644);
58 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
59 
60 static unsigned long apst_primary_timeout_ms = 100;
61 module_param(apst_primary_timeout_ms, ulong, 0644);
62 MODULE_PARM_DESC(apst_primary_timeout_ms,
63 	"primary APST timeout in ms");
64 
65 static unsigned long apst_secondary_timeout_ms = 2000;
66 module_param(apst_secondary_timeout_ms, ulong, 0644);
67 MODULE_PARM_DESC(apst_secondary_timeout_ms,
68 	"secondary APST timeout in ms");
69 
70 static unsigned long apst_primary_latency_tol_us = 15000;
71 module_param(apst_primary_latency_tol_us, ulong, 0644);
72 MODULE_PARM_DESC(apst_primary_latency_tol_us,
73 	"primary APST latency tolerance in us");
74 
75 static unsigned long apst_secondary_latency_tol_us = 100000;
76 module_param(apst_secondary_latency_tol_us, ulong, 0644);
77 MODULE_PARM_DESC(apst_secondary_latency_tol_us,
78 	"secondary APST latency tolerance in us");
79 
80 static bool streams;
81 module_param(streams, bool, 0644);
82 MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
83 
84 /*
85  * nvme_wq - hosts nvme related works that are not reset or delete
86  * nvme_reset_wq - hosts nvme reset works
87  * nvme_delete_wq - hosts nvme delete works
88  *
89  * nvme_wq will host works such as scan, aen handling, fw activation,
90  * keep-alive, periodic reconnects etc. nvme_reset_wq
91  * runs reset works which also flush works hosted on nvme_wq for
92  * serialization purposes. nvme_delete_wq host controller deletion
93  * works which flush reset works for serialization.
94  */
95 struct workqueue_struct *nvme_wq;
96 EXPORT_SYMBOL_GPL(nvme_wq);
97 
98 struct workqueue_struct *nvme_reset_wq;
99 EXPORT_SYMBOL_GPL(nvme_reset_wq);
100 
101 struct workqueue_struct *nvme_delete_wq;
102 EXPORT_SYMBOL_GPL(nvme_delete_wq);
103 
104 static LIST_HEAD(nvme_subsystems);
105 static DEFINE_MUTEX(nvme_subsystems_lock);
106 
107 static DEFINE_IDA(nvme_instance_ida);
108 static dev_t nvme_ctrl_base_chr_devt;
109 static struct class *nvme_class;
110 static struct class *nvme_subsys_class;
111 
112 static DEFINE_IDA(nvme_ns_chr_minor_ida);
113 static dev_t nvme_ns_chr_devt;
114 static struct class *nvme_ns_chr_class;
115 
116 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
117 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
118 					   unsigned nsid);
119 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
120 				   struct nvme_command *cmd);
121 
122 void nvme_queue_scan(struct nvme_ctrl *ctrl)
123 {
124 	/*
125 	 * Only new queue scan work when admin and IO queues are both alive
126 	 */
127 	if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
128 		queue_work(nvme_wq, &ctrl->scan_work);
129 }
130 
131 /*
132  * Use this function to proceed with scheduling reset_work for a controller
133  * that had previously been set to the resetting state. This is intended for
134  * code paths that can't be interrupted by other reset attempts. A hot removal
135  * may prevent this from succeeding.
136  */
137 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
138 {
139 	if (ctrl->state != NVME_CTRL_RESETTING)
140 		return -EBUSY;
141 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
142 		return -EBUSY;
143 	return 0;
144 }
145 EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
146 
147 static void nvme_failfast_work(struct work_struct *work)
148 {
149 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
150 			struct nvme_ctrl, failfast_work);
151 
152 	if (ctrl->state != NVME_CTRL_CONNECTING)
153 		return;
154 
155 	set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
156 	dev_info(ctrl->device, "failfast expired\n");
157 	nvme_kick_requeue_lists(ctrl);
158 }
159 
160 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
161 {
162 	if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
163 		return;
164 
165 	schedule_delayed_work(&ctrl->failfast_work,
166 			      ctrl->opts->fast_io_fail_tmo * HZ);
167 }
168 
169 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
170 {
171 	if (!ctrl->opts)
172 		return;
173 
174 	cancel_delayed_work_sync(&ctrl->failfast_work);
175 	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
176 }
177 
178 
179 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
180 {
181 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
182 		return -EBUSY;
183 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
184 		return -EBUSY;
185 	return 0;
186 }
187 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
188 
189 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
190 {
191 	int ret;
192 
193 	ret = nvme_reset_ctrl(ctrl);
194 	if (!ret) {
195 		flush_work(&ctrl->reset_work);
196 		if (ctrl->state != NVME_CTRL_LIVE)
197 			ret = -ENETRESET;
198 	}
199 
200 	return ret;
201 }
202 
203 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
204 {
205 	dev_info(ctrl->device,
206 		 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
207 
208 	flush_work(&ctrl->reset_work);
209 	nvme_stop_ctrl(ctrl);
210 	nvme_remove_namespaces(ctrl);
211 	ctrl->ops->delete_ctrl(ctrl);
212 	nvme_uninit_ctrl(ctrl);
213 }
214 
215 static void nvme_delete_ctrl_work(struct work_struct *work)
216 {
217 	struct nvme_ctrl *ctrl =
218 		container_of(work, struct nvme_ctrl, delete_work);
219 
220 	nvme_do_delete_ctrl(ctrl);
221 }
222 
223 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
224 {
225 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
226 		return -EBUSY;
227 	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
228 		return -EBUSY;
229 	return 0;
230 }
231 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
232 
233 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
234 {
235 	/*
236 	 * Keep a reference until nvme_do_delete_ctrl() complete,
237 	 * since ->delete_ctrl can free the controller.
238 	 */
239 	nvme_get_ctrl(ctrl);
240 	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
241 		nvme_do_delete_ctrl(ctrl);
242 	nvme_put_ctrl(ctrl);
243 }
244 
245 static blk_status_t nvme_error_status(u16 status)
246 {
247 	switch (status & 0x7ff) {
248 	case NVME_SC_SUCCESS:
249 		return BLK_STS_OK;
250 	case NVME_SC_CAP_EXCEEDED:
251 		return BLK_STS_NOSPC;
252 	case NVME_SC_LBA_RANGE:
253 	case NVME_SC_CMD_INTERRUPTED:
254 	case NVME_SC_NS_NOT_READY:
255 		return BLK_STS_TARGET;
256 	case NVME_SC_BAD_ATTRIBUTES:
257 	case NVME_SC_ONCS_NOT_SUPPORTED:
258 	case NVME_SC_INVALID_OPCODE:
259 	case NVME_SC_INVALID_FIELD:
260 	case NVME_SC_INVALID_NS:
261 		return BLK_STS_NOTSUPP;
262 	case NVME_SC_WRITE_FAULT:
263 	case NVME_SC_READ_ERROR:
264 	case NVME_SC_UNWRITTEN_BLOCK:
265 	case NVME_SC_ACCESS_DENIED:
266 	case NVME_SC_READ_ONLY:
267 	case NVME_SC_COMPARE_FAILED:
268 		return BLK_STS_MEDIUM;
269 	case NVME_SC_GUARD_CHECK:
270 	case NVME_SC_APPTAG_CHECK:
271 	case NVME_SC_REFTAG_CHECK:
272 	case NVME_SC_INVALID_PI:
273 		return BLK_STS_PROTECTION;
274 	case NVME_SC_RESERVATION_CONFLICT:
275 		return BLK_STS_NEXUS;
276 	case NVME_SC_HOST_PATH_ERROR:
277 		return BLK_STS_TRANSPORT;
278 	case NVME_SC_ZONE_TOO_MANY_ACTIVE:
279 		return BLK_STS_ZONE_ACTIVE_RESOURCE;
280 	case NVME_SC_ZONE_TOO_MANY_OPEN:
281 		return BLK_STS_ZONE_OPEN_RESOURCE;
282 	default:
283 		return BLK_STS_IOERR;
284 	}
285 }
286 
287 static void nvme_retry_req(struct request *req)
288 {
289 	unsigned long delay = 0;
290 	u16 crd;
291 
292 	/* The mask and shift result must be <= 3 */
293 	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
294 	if (crd)
295 		delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
296 
297 	nvme_req(req)->retries++;
298 	blk_mq_requeue_request(req, false);
299 	blk_mq_delay_kick_requeue_list(req->q, delay);
300 }
301 
302 enum nvme_disposition {
303 	COMPLETE,
304 	RETRY,
305 	FAILOVER,
306 };
307 
308 static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
309 {
310 	if (likely(nvme_req(req)->status == 0))
311 		return COMPLETE;
312 
313 	if (blk_noretry_request(req) ||
314 	    (nvme_req(req)->status & NVME_SC_DNR) ||
315 	    nvme_req(req)->retries >= nvme_max_retries)
316 		return COMPLETE;
317 
318 	if (req->cmd_flags & REQ_NVME_MPATH) {
319 		if (nvme_is_path_error(nvme_req(req)->status) ||
320 		    blk_queue_dying(req->q))
321 			return FAILOVER;
322 	} else {
323 		if (blk_queue_dying(req->q))
324 			return COMPLETE;
325 	}
326 
327 	return RETRY;
328 }
329 
330 static inline void nvme_end_req_zoned(struct request *req)
331 {
332 	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
333 	    req_op(req) == REQ_OP_ZONE_APPEND)
334 		req->__sector = nvme_lba_to_sect(req->q->queuedata,
335 			le64_to_cpu(nvme_req(req)->result.u64));
336 }
337 
338 static inline void nvme_end_req(struct request *req)
339 {
340 	blk_status_t status = nvme_error_status(nvme_req(req)->status);
341 
342 	nvme_end_req_zoned(req);
343 	nvme_trace_bio_complete(req);
344 	blk_mq_end_request(req, status);
345 }
346 
347 void nvme_complete_rq(struct request *req)
348 {
349 	trace_nvme_complete_rq(req);
350 	nvme_cleanup_cmd(req);
351 
352 	if (nvme_req(req)->ctrl->kas)
353 		nvme_req(req)->ctrl->comp_seen = true;
354 
355 	switch (nvme_decide_disposition(req)) {
356 	case COMPLETE:
357 		nvme_end_req(req);
358 		return;
359 	case RETRY:
360 		nvme_retry_req(req);
361 		return;
362 	case FAILOVER:
363 		nvme_failover_req(req);
364 		return;
365 	}
366 }
367 EXPORT_SYMBOL_GPL(nvme_complete_rq);
368 
369 void nvme_complete_batch_req(struct request *req)
370 {
371 	trace_nvme_complete_rq(req);
372 	nvme_cleanup_cmd(req);
373 	nvme_end_req_zoned(req);
374 }
375 EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
376 
377 /*
378  * Called to unwind from ->queue_rq on a failed command submission so that the
379  * multipathing code gets called to potentially failover to another path.
380  * The caller needs to unwind all transport specific resource allocations and
381  * must return propagate the return value.
382  */
383 blk_status_t nvme_host_path_error(struct request *req)
384 {
385 	nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
386 	blk_mq_set_request_complete(req);
387 	nvme_complete_rq(req);
388 	return BLK_STS_OK;
389 }
390 EXPORT_SYMBOL_GPL(nvme_host_path_error);
391 
392 bool nvme_cancel_request(struct request *req, void *data, bool reserved)
393 {
394 	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
395 				"Cancelling I/O %d", req->tag);
396 
397 	/* don't abort one completed request */
398 	if (blk_mq_request_completed(req))
399 		return true;
400 
401 	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
402 	nvme_req(req)->flags |= NVME_REQ_CANCELLED;
403 	blk_mq_complete_request(req);
404 	return true;
405 }
406 EXPORT_SYMBOL_GPL(nvme_cancel_request);
407 
408 void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
409 {
410 	if (ctrl->tagset) {
411 		blk_mq_tagset_busy_iter(ctrl->tagset,
412 				nvme_cancel_request, ctrl);
413 		blk_mq_tagset_wait_completed_request(ctrl->tagset);
414 	}
415 }
416 EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
417 
418 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
419 {
420 	if (ctrl->admin_tagset) {
421 		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
422 				nvme_cancel_request, ctrl);
423 		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
424 	}
425 }
426 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
427 
428 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
429 		enum nvme_ctrl_state new_state)
430 {
431 	enum nvme_ctrl_state old_state;
432 	unsigned long flags;
433 	bool changed = false;
434 
435 	spin_lock_irqsave(&ctrl->lock, flags);
436 
437 	old_state = ctrl->state;
438 	switch (new_state) {
439 	case NVME_CTRL_LIVE:
440 		switch (old_state) {
441 		case NVME_CTRL_NEW:
442 		case NVME_CTRL_RESETTING:
443 		case NVME_CTRL_CONNECTING:
444 			changed = true;
445 			fallthrough;
446 		default:
447 			break;
448 		}
449 		break;
450 	case NVME_CTRL_RESETTING:
451 		switch (old_state) {
452 		case NVME_CTRL_NEW:
453 		case NVME_CTRL_LIVE:
454 			changed = true;
455 			fallthrough;
456 		default:
457 			break;
458 		}
459 		break;
460 	case NVME_CTRL_CONNECTING:
461 		switch (old_state) {
462 		case NVME_CTRL_NEW:
463 		case NVME_CTRL_RESETTING:
464 			changed = true;
465 			fallthrough;
466 		default:
467 			break;
468 		}
469 		break;
470 	case NVME_CTRL_DELETING:
471 		switch (old_state) {
472 		case NVME_CTRL_LIVE:
473 		case NVME_CTRL_RESETTING:
474 		case NVME_CTRL_CONNECTING:
475 			changed = true;
476 			fallthrough;
477 		default:
478 			break;
479 		}
480 		break;
481 	case NVME_CTRL_DELETING_NOIO:
482 		switch (old_state) {
483 		case NVME_CTRL_DELETING:
484 		case NVME_CTRL_DEAD:
485 			changed = true;
486 			fallthrough;
487 		default:
488 			break;
489 		}
490 		break;
491 	case NVME_CTRL_DEAD:
492 		switch (old_state) {
493 		case NVME_CTRL_DELETING:
494 			changed = true;
495 			fallthrough;
496 		default:
497 			break;
498 		}
499 		break;
500 	default:
501 		break;
502 	}
503 
504 	if (changed) {
505 		ctrl->state = new_state;
506 		wake_up_all(&ctrl->state_wq);
507 	}
508 
509 	spin_unlock_irqrestore(&ctrl->lock, flags);
510 	if (!changed)
511 		return false;
512 
513 	if (ctrl->state == NVME_CTRL_LIVE) {
514 		if (old_state == NVME_CTRL_CONNECTING)
515 			nvme_stop_failfast_work(ctrl);
516 		nvme_kick_requeue_lists(ctrl);
517 	} else if (ctrl->state == NVME_CTRL_CONNECTING &&
518 		old_state == NVME_CTRL_RESETTING) {
519 		nvme_start_failfast_work(ctrl);
520 	}
521 	return changed;
522 }
523 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
524 
525 /*
526  * Returns true for sink states that can't ever transition back to live.
527  */
528 static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
529 {
530 	switch (ctrl->state) {
531 	case NVME_CTRL_NEW:
532 	case NVME_CTRL_LIVE:
533 	case NVME_CTRL_RESETTING:
534 	case NVME_CTRL_CONNECTING:
535 		return false;
536 	case NVME_CTRL_DELETING:
537 	case NVME_CTRL_DELETING_NOIO:
538 	case NVME_CTRL_DEAD:
539 		return true;
540 	default:
541 		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
542 		return true;
543 	}
544 }
545 
546 /*
547  * Waits for the controller state to be resetting, or returns false if it is
548  * not possible to ever transition to that state.
549  */
550 bool nvme_wait_reset(struct nvme_ctrl *ctrl)
551 {
552 	wait_event(ctrl->state_wq,
553 		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
554 		   nvme_state_terminal(ctrl));
555 	return ctrl->state == NVME_CTRL_RESETTING;
556 }
557 EXPORT_SYMBOL_GPL(nvme_wait_reset);
558 
559 static void nvme_free_ns_head(struct kref *ref)
560 {
561 	struct nvme_ns_head *head =
562 		container_of(ref, struct nvme_ns_head, ref);
563 
564 	nvme_mpath_remove_disk(head);
565 	ida_simple_remove(&head->subsys->ns_ida, head->instance);
566 	cleanup_srcu_struct(&head->srcu);
567 	nvme_put_subsystem(head->subsys);
568 	kfree(head);
569 }
570 
571 bool nvme_tryget_ns_head(struct nvme_ns_head *head)
572 {
573 	return kref_get_unless_zero(&head->ref);
574 }
575 
576 void nvme_put_ns_head(struct nvme_ns_head *head)
577 {
578 	kref_put(&head->ref, nvme_free_ns_head);
579 }
580 
581 static void nvme_free_ns(struct kref *kref)
582 {
583 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
584 
585 	put_disk(ns->disk);
586 	nvme_put_ns_head(ns->head);
587 	nvme_put_ctrl(ns->ctrl);
588 	kfree(ns);
589 }
590 
591 static inline bool nvme_get_ns(struct nvme_ns *ns)
592 {
593 	return kref_get_unless_zero(&ns->kref);
594 }
595 
596 void nvme_put_ns(struct nvme_ns *ns)
597 {
598 	kref_put(&ns->kref, nvme_free_ns);
599 }
600 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
601 
602 static inline void nvme_clear_nvme_request(struct request *req)
603 {
604 	nvme_req(req)->status = 0;
605 	nvme_req(req)->retries = 0;
606 	nvme_req(req)->flags = 0;
607 	req->rq_flags |= RQF_DONTPREP;
608 }
609 
610 static inline unsigned int nvme_req_op(struct nvme_command *cmd)
611 {
612 	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
613 }
614 
615 static inline void nvme_init_request(struct request *req,
616 		struct nvme_command *cmd)
617 {
618 	if (req->q->queuedata)
619 		req->timeout = NVME_IO_TIMEOUT;
620 	else /* no queuedata implies admin queue */
621 		req->timeout = NVME_ADMIN_TIMEOUT;
622 
623 	/* passthru commands should let the driver set the SGL flags */
624 	cmd->common.flags &= ~NVME_CMD_SGL_ALL;
625 
626 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
627 	if (req->mq_hctx->type == HCTX_TYPE_POLL)
628 		req->cmd_flags |= REQ_POLLED;
629 	nvme_clear_nvme_request(req);
630 	memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
631 }
632 
633 struct request *nvme_alloc_request(struct request_queue *q,
634 		struct nvme_command *cmd, blk_mq_req_flags_t flags)
635 {
636 	struct request *req;
637 
638 	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
639 	if (!IS_ERR(req))
640 		nvme_init_request(req, cmd);
641 	return req;
642 }
643 EXPORT_SYMBOL_GPL(nvme_alloc_request);
644 
645 static struct request *nvme_alloc_request_qid(struct request_queue *q,
646 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
647 {
648 	struct request *req;
649 
650 	req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
651 			qid ? qid - 1 : 0);
652 	if (!IS_ERR(req))
653 		nvme_init_request(req, cmd);
654 	return req;
655 }
656 
657 /*
658  * For something we're not in a state to send to the device the default action
659  * is to busy it and retry it after the controller state is recovered.  However,
660  * if the controller is deleting or if anything is marked for failfast or
661  * nvme multipath it is immediately failed.
662  *
663  * Note: commands used to initialize the controller will be marked for failfast.
664  * Note: nvme cli/ioctl commands are marked for failfast.
665  */
666 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
667 		struct request *rq)
668 {
669 	if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
670 	    ctrl->state != NVME_CTRL_DELETING &&
671 	    ctrl->state != NVME_CTRL_DEAD &&
672 	    !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
673 	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
674 		return BLK_STS_RESOURCE;
675 	return nvme_host_path_error(rq);
676 }
677 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
678 
679 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
680 		bool queue_live)
681 {
682 	struct nvme_request *req = nvme_req(rq);
683 
684 	/*
685 	 * currently we have a problem sending passthru commands
686 	 * on the admin_q if the controller is not LIVE because we can't
687 	 * make sure that they are going out after the admin connect,
688 	 * controller enable and/or other commands in the initialization
689 	 * sequence. until the controller will be LIVE, fail with
690 	 * BLK_STS_RESOURCE so that they will be rescheduled.
691 	 */
692 	if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
693 		return false;
694 
695 	if (ctrl->ops->flags & NVME_F_FABRICS) {
696 		/*
697 		 * Only allow commands on a live queue, except for the connect
698 		 * command, which is require to set the queue live in the
699 		 * appropinquate states.
700 		 */
701 		switch (ctrl->state) {
702 		case NVME_CTRL_CONNECTING:
703 			if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
704 			    req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
705 				return true;
706 			break;
707 		default:
708 			break;
709 		case NVME_CTRL_DEAD:
710 			return false;
711 		}
712 	}
713 
714 	return queue_live;
715 }
716 EXPORT_SYMBOL_GPL(__nvme_check_ready);
717 
718 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
719 {
720 	struct nvme_command c = { };
721 
722 	c.directive.opcode = nvme_admin_directive_send;
723 	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
724 	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
725 	c.directive.dtype = NVME_DIR_IDENTIFY;
726 	c.directive.tdtype = NVME_DIR_STREAMS;
727 	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
728 
729 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
730 }
731 
732 static int nvme_disable_streams(struct nvme_ctrl *ctrl)
733 {
734 	return nvme_toggle_streams(ctrl, false);
735 }
736 
737 static int nvme_enable_streams(struct nvme_ctrl *ctrl)
738 {
739 	return nvme_toggle_streams(ctrl, true);
740 }
741 
742 static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
743 				  struct streams_directive_params *s, u32 nsid)
744 {
745 	struct nvme_command c = { };
746 
747 	memset(s, 0, sizeof(*s));
748 
749 	c.directive.opcode = nvme_admin_directive_recv;
750 	c.directive.nsid = cpu_to_le32(nsid);
751 	c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
752 	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
753 	c.directive.dtype = NVME_DIR_STREAMS;
754 
755 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
756 }
757 
758 static int nvme_configure_directives(struct nvme_ctrl *ctrl)
759 {
760 	struct streams_directive_params s;
761 	int ret;
762 
763 	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
764 		return 0;
765 	if (!streams)
766 		return 0;
767 
768 	ret = nvme_enable_streams(ctrl);
769 	if (ret)
770 		return ret;
771 
772 	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
773 	if (ret)
774 		goto out_disable_stream;
775 
776 	ctrl->nssa = le16_to_cpu(s.nssa);
777 	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
778 		dev_info(ctrl->device, "too few streams (%u) available\n",
779 					ctrl->nssa);
780 		goto out_disable_stream;
781 	}
782 
783 	ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
784 	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
785 	return 0;
786 
787 out_disable_stream:
788 	nvme_disable_streams(ctrl);
789 	return ret;
790 }
791 
792 /*
793  * Check if 'req' has a write hint associated with it. If it does, assign
794  * a valid namespace stream to the write.
795  */
796 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
797 				     struct request *req, u16 *control,
798 				     u32 *dsmgmt)
799 {
800 	enum rw_hint streamid = req->write_hint;
801 
802 	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
803 		streamid = 0;
804 	else {
805 		streamid--;
806 		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
807 			return;
808 
809 		*control |= NVME_RW_DTYPE_STREAMS;
810 		*dsmgmt |= streamid << 16;
811 	}
812 
813 	if (streamid < ARRAY_SIZE(req->q->write_hints))
814 		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
815 }
816 
817 static inline void nvme_setup_flush(struct nvme_ns *ns,
818 		struct nvme_command *cmnd)
819 {
820 	memset(cmnd, 0, sizeof(*cmnd));
821 	cmnd->common.opcode = nvme_cmd_flush;
822 	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
823 }
824 
825 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
826 		struct nvme_command *cmnd)
827 {
828 	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
829 	struct nvme_dsm_range *range;
830 	struct bio *bio;
831 
832 	/*
833 	 * Some devices do not consider the DSM 'Number of Ranges' field when
834 	 * determining how much data to DMA. Always allocate memory for maximum
835 	 * number of segments to prevent device reading beyond end of buffer.
836 	 */
837 	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
838 
839 	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
840 	if (!range) {
841 		/*
842 		 * If we fail allocation our range, fallback to the controller
843 		 * discard page. If that's also busy, it's safe to return
844 		 * busy, as we know we can make progress once that's freed.
845 		 */
846 		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
847 			return BLK_STS_RESOURCE;
848 
849 		range = page_address(ns->ctrl->discard_page);
850 	}
851 
852 	__rq_for_each_bio(bio, req) {
853 		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
854 		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
855 
856 		if (n < segments) {
857 			range[n].cattr = cpu_to_le32(0);
858 			range[n].nlb = cpu_to_le32(nlb);
859 			range[n].slba = cpu_to_le64(slba);
860 		}
861 		n++;
862 	}
863 
864 	if (WARN_ON_ONCE(n != segments)) {
865 		if (virt_to_page(range) == ns->ctrl->discard_page)
866 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
867 		else
868 			kfree(range);
869 		return BLK_STS_IOERR;
870 	}
871 
872 	memset(cmnd, 0, sizeof(*cmnd));
873 	cmnd->dsm.opcode = nvme_cmd_dsm;
874 	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
875 	cmnd->dsm.nr = cpu_to_le32(segments - 1);
876 	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
877 
878 	req->special_vec.bv_page = virt_to_page(range);
879 	req->special_vec.bv_offset = offset_in_page(range);
880 	req->special_vec.bv_len = alloc_size;
881 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
882 
883 	return BLK_STS_OK;
884 }
885 
886 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
887 		struct request *req, struct nvme_command *cmnd)
888 {
889 	memset(cmnd, 0, sizeof(*cmnd));
890 
891 	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
892 		return nvme_setup_discard(ns, req, cmnd);
893 
894 	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
895 	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
896 	cmnd->write_zeroes.slba =
897 		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
898 	cmnd->write_zeroes.length =
899 		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
900 
901 	if (nvme_ns_has_pi(ns)) {
902 		cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
903 
904 		switch (ns->pi_type) {
905 		case NVME_NS_DPS_PI_TYPE1:
906 		case NVME_NS_DPS_PI_TYPE2:
907 			cmnd->write_zeroes.reftag =
908 				cpu_to_le32(t10_pi_ref_tag(req));
909 			break;
910 		}
911 	}
912 
913 	return BLK_STS_OK;
914 }
915 
916 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
917 		struct request *req, struct nvme_command *cmnd,
918 		enum nvme_opcode op)
919 {
920 	struct nvme_ctrl *ctrl = ns->ctrl;
921 	u16 control = 0;
922 	u32 dsmgmt = 0;
923 
924 	if (req->cmd_flags & REQ_FUA)
925 		control |= NVME_RW_FUA;
926 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
927 		control |= NVME_RW_LR;
928 
929 	if (req->cmd_flags & REQ_RAHEAD)
930 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
931 
932 	cmnd->rw.opcode = op;
933 	cmnd->rw.flags = 0;
934 	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
935 	cmnd->rw.rsvd2 = 0;
936 	cmnd->rw.metadata = 0;
937 	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
938 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
939 	cmnd->rw.reftag = 0;
940 	cmnd->rw.apptag = 0;
941 	cmnd->rw.appmask = 0;
942 
943 	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
944 		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
945 
946 	if (ns->ms) {
947 		/*
948 		 * If formated with metadata, the block layer always provides a
949 		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
950 		 * we enable the PRACT bit for protection information or set the
951 		 * namespace capacity to zero to prevent any I/O.
952 		 */
953 		if (!blk_integrity_rq(req)) {
954 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
955 				return BLK_STS_NOTSUPP;
956 			control |= NVME_RW_PRINFO_PRACT;
957 		}
958 
959 		switch (ns->pi_type) {
960 		case NVME_NS_DPS_PI_TYPE3:
961 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
962 			break;
963 		case NVME_NS_DPS_PI_TYPE1:
964 		case NVME_NS_DPS_PI_TYPE2:
965 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
966 					NVME_RW_PRINFO_PRCHK_REF;
967 			if (op == nvme_cmd_zone_append)
968 				control |= NVME_RW_APPEND_PIREMAP;
969 			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
970 			break;
971 		}
972 	}
973 
974 	cmnd->rw.control = cpu_to_le16(control);
975 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
976 	return 0;
977 }
978 
979 void nvme_cleanup_cmd(struct request *req)
980 {
981 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
982 		struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
983 
984 		if (req->special_vec.bv_page == ctrl->discard_page)
985 			clear_bit_unlock(0, &ctrl->discard_page_busy);
986 		else
987 			kfree(bvec_virt(&req->special_vec));
988 	}
989 }
990 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
991 
992 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
993 {
994 	struct nvme_command *cmd = nvme_req(req)->cmd;
995 	blk_status_t ret = BLK_STS_OK;
996 
997 	if (!(req->rq_flags & RQF_DONTPREP))
998 		nvme_clear_nvme_request(req);
999 
1000 	switch (req_op(req)) {
1001 	case REQ_OP_DRV_IN:
1002 	case REQ_OP_DRV_OUT:
1003 		/* these are setup prior to execution in nvme_init_request() */
1004 		break;
1005 	case REQ_OP_FLUSH:
1006 		nvme_setup_flush(ns, cmd);
1007 		break;
1008 	case REQ_OP_ZONE_RESET_ALL:
1009 	case REQ_OP_ZONE_RESET:
1010 		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
1011 		break;
1012 	case REQ_OP_ZONE_OPEN:
1013 		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
1014 		break;
1015 	case REQ_OP_ZONE_CLOSE:
1016 		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
1017 		break;
1018 	case REQ_OP_ZONE_FINISH:
1019 		ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
1020 		break;
1021 	case REQ_OP_WRITE_ZEROES:
1022 		ret = nvme_setup_write_zeroes(ns, req, cmd);
1023 		break;
1024 	case REQ_OP_DISCARD:
1025 		ret = nvme_setup_discard(ns, req, cmd);
1026 		break;
1027 	case REQ_OP_READ:
1028 		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
1029 		break;
1030 	case REQ_OP_WRITE:
1031 		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
1032 		break;
1033 	case REQ_OP_ZONE_APPEND:
1034 		ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
1035 		break;
1036 	default:
1037 		WARN_ON_ONCE(1);
1038 		return BLK_STS_IOERR;
1039 	}
1040 
1041 	cmd->common.command_id = nvme_cid(req);
1042 	trace_nvme_setup_cmd(req, cmd);
1043 	return ret;
1044 }
1045 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
1046 
1047 /*
1048  * Return values:
1049  * 0:  success
1050  * >0: nvme controller's cqe status response
1051  * <0: kernel error in lieu of controller response
1052  */
1053 static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
1054 		bool at_head)
1055 {
1056 	blk_status_t status;
1057 
1058 	status = blk_execute_rq(rq, at_head);
1059 	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1060 		return -EINTR;
1061 	if (nvme_req(rq)->status)
1062 		return nvme_req(rq)->status;
1063 	return blk_status_to_errno(status);
1064 }
1065 
1066 /*
1067  * Returns 0 on success.  If the result is negative, it's a Linux error code;
1068  * if the result is positive, it's an NVM Express status code
1069  */
1070 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1071 		union nvme_result *result, void *buffer, unsigned bufflen,
1072 		unsigned timeout, int qid, int at_head,
1073 		blk_mq_req_flags_t flags)
1074 {
1075 	struct request *req;
1076 	int ret;
1077 
1078 	if (qid == NVME_QID_ANY)
1079 		req = nvme_alloc_request(q, cmd, flags);
1080 	else
1081 		req = nvme_alloc_request_qid(q, cmd, flags, qid);
1082 	if (IS_ERR(req))
1083 		return PTR_ERR(req);
1084 
1085 	if (timeout)
1086 		req->timeout = timeout;
1087 
1088 	if (buffer && bufflen) {
1089 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
1090 		if (ret)
1091 			goto out;
1092 	}
1093 
1094 	ret = nvme_execute_rq(NULL, req, at_head);
1095 	if (result && ret >= 0)
1096 		*result = nvme_req(req)->result;
1097  out:
1098 	blk_mq_free_request(req);
1099 	return ret;
1100 }
1101 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1102 
1103 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1104 		void *buffer, unsigned bufflen)
1105 {
1106 	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
1107 			NVME_QID_ANY, 0, 0);
1108 }
1109 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1110 
1111 static u32 nvme_known_admin_effects(u8 opcode)
1112 {
1113 	switch (opcode) {
1114 	case nvme_admin_format_nvm:
1115 		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
1116 			NVME_CMD_EFFECTS_CSE_MASK;
1117 	case nvme_admin_sanitize_nvm:
1118 		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
1119 	default:
1120 		break;
1121 	}
1122 	return 0;
1123 }
1124 
1125 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1126 {
1127 	u32 effects = 0;
1128 
1129 	if (ns) {
1130 		if (ns->head->effects)
1131 			effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1132 		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1133 			dev_warn_once(ctrl->device,
1134 				"IO command:%02x has unhandled effects:%08x\n",
1135 				opcode, effects);
1136 		return 0;
1137 	}
1138 
1139 	if (ctrl->effects)
1140 		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1141 	effects |= nvme_known_admin_effects(opcode);
1142 
1143 	return effects;
1144 }
1145 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
1146 
1147 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1148 			       u8 opcode)
1149 {
1150 	u32 effects = nvme_command_effects(ctrl, ns, opcode);
1151 
1152 	/*
1153 	 * For simplicity, IO to all namespaces is quiesced even if the command
1154 	 * effects say only one namespace is affected.
1155 	 */
1156 	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1157 		mutex_lock(&ctrl->scan_lock);
1158 		mutex_lock(&ctrl->subsys->lock);
1159 		nvme_mpath_start_freeze(ctrl->subsys);
1160 		nvme_mpath_wait_freeze(ctrl->subsys);
1161 		nvme_start_freeze(ctrl);
1162 		nvme_wait_freeze(ctrl);
1163 	}
1164 	return effects;
1165 }
1166 
1167 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
1168 			      struct nvme_command *cmd, int status)
1169 {
1170 	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1171 		nvme_unfreeze(ctrl);
1172 		nvme_mpath_unfreeze(ctrl->subsys);
1173 		mutex_unlock(&ctrl->subsys->lock);
1174 		nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1175 		mutex_unlock(&ctrl->scan_lock);
1176 	}
1177 	if (effects & NVME_CMD_EFFECTS_CCC)
1178 		nvme_init_ctrl_finish(ctrl);
1179 	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1180 		nvme_queue_scan(ctrl);
1181 		flush_work(&ctrl->scan_work);
1182 	}
1183 
1184 	switch (cmd->common.opcode) {
1185 	case nvme_admin_set_features:
1186 		switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1187 		case NVME_FEAT_KATO:
1188 			/*
1189 			 * Keep alive commands interval on the host should be
1190 			 * updated when KATO is modified by Set Features
1191 			 * commands.
1192 			 */
1193 			if (!status)
1194 				nvme_update_keep_alive(ctrl, cmd);
1195 			break;
1196 		default:
1197 			break;
1198 		}
1199 		break;
1200 	default:
1201 		break;
1202 	}
1203 }
1204 
1205 int nvme_execute_passthru_rq(struct request *rq)
1206 {
1207 	struct nvme_command *cmd = nvme_req(rq)->cmd;
1208 	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
1209 	struct nvme_ns *ns = rq->q->queuedata;
1210 	struct gendisk *disk = ns ? ns->disk : NULL;
1211 	u32 effects;
1212 	int  ret;
1213 
1214 	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1215 	ret = nvme_execute_rq(disk, rq, false);
1216 	if (effects) /* nothing to be done for zero cmd effects */
1217 		nvme_passthru_end(ctrl, effects, cmd, ret);
1218 
1219 	return ret;
1220 }
1221 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
1222 
1223 /*
1224  * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
1225  *
1226  *   The host should send Keep Alive commands at half of the Keep Alive Timeout
1227  *   accounting for transport roundtrip times [..].
1228  */
1229 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1230 {
1231 	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
1232 }
1233 
1234 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
1235 {
1236 	struct nvme_ctrl *ctrl = rq->end_io_data;
1237 	unsigned long flags;
1238 	bool startka = false;
1239 
1240 	blk_mq_free_request(rq);
1241 
1242 	if (status) {
1243 		dev_err(ctrl->device,
1244 			"failed nvme_keep_alive_end_io error=%d\n",
1245 				status);
1246 		return;
1247 	}
1248 
1249 	ctrl->comp_seen = false;
1250 	spin_lock_irqsave(&ctrl->lock, flags);
1251 	if (ctrl->state == NVME_CTRL_LIVE ||
1252 	    ctrl->state == NVME_CTRL_CONNECTING)
1253 		startka = true;
1254 	spin_unlock_irqrestore(&ctrl->lock, flags);
1255 	if (startka)
1256 		nvme_queue_keep_alive_work(ctrl);
1257 }
1258 
1259 static void nvme_keep_alive_work(struct work_struct *work)
1260 {
1261 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1262 			struct nvme_ctrl, ka_work);
1263 	bool comp_seen = ctrl->comp_seen;
1264 	struct request *rq;
1265 
1266 	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1267 		dev_dbg(ctrl->device,
1268 			"reschedule traffic based keep-alive timer\n");
1269 		ctrl->comp_seen = false;
1270 		nvme_queue_keep_alive_work(ctrl);
1271 		return;
1272 	}
1273 
1274 	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
1275 				BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1276 	if (IS_ERR(rq)) {
1277 		/* allocation failure, reset the controller */
1278 		dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1279 		nvme_reset_ctrl(ctrl);
1280 		return;
1281 	}
1282 
1283 	rq->timeout = ctrl->kato * HZ;
1284 	rq->end_io_data = ctrl;
1285 	blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
1286 }
1287 
1288 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1289 {
1290 	if (unlikely(ctrl->kato == 0))
1291 		return;
1292 
1293 	nvme_queue_keep_alive_work(ctrl);
1294 }
1295 
1296 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1297 {
1298 	if (unlikely(ctrl->kato == 0))
1299 		return;
1300 
1301 	cancel_delayed_work_sync(&ctrl->ka_work);
1302 }
1303 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1304 
1305 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
1306 				   struct nvme_command *cmd)
1307 {
1308 	unsigned int new_kato =
1309 		DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1310 
1311 	dev_info(ctrl->device,
1312 		 "keep alive interval updated from %u ms to %u ms\n",
1313 		 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1314 
1315 	nvme_stop_keep_alive(ctrl);
1316 	ctrl->kato = new_kato;
1317 	nvme_start_keep_alive(ctrl);
1318 }
1319 
1320 /*
1321  * In NVMe 1.0 the CNS field was just a binary controller or namespace
1322  * flag, thus sending any new CNS opcodes has a big chance of not working.
1323  * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1324  * (but not for any later version).
1325  */
1326 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1327 {
1328 	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1329 		return ctrl->vs < NVME_VS(1, 2, 0);
1330 	return ctrl->vs < NVME_VS(1, 1, 0);
1331 }
1332 
1333 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1334 {
1335 	struct nvme_command c = { };
1336 	int error;
1337 
1338 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1339 	c.identify.opcode = nvme_admin_identify;
1340 	c.identify.cns = NVME_ID_CNS_CTRL;
1341 
1342 	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1343 	if (!*id)
1344 		return -ENOMEM;
1345 
1346 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1347 			sizeof(struct nvme_id_ctrl));
1348 	if (error)
1349 		kfree(*id);
1350 	return error;
1351 }
1352 
1353 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1354 		struct nvme_ns_id_desc *cur, bool *csi_seen)
1355 {
1356 	const char *warn_str = "ctrl returned bogus length:";
1357 	void *data = cur;
1358 
1359 	switch (cur->nidt) {
1360 	case NVME_NIDT_EUI64:
1361 		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1362 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1363 				 warn_str, cur->nidl);
1364 			return -1;
1365 		}
1366 		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1367 		return NVME_NIDT_EUI64_LEN;
1368 	case NVME_NIDT_NGUID:
1369 		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1370 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1371 				 warn_str, cur->nidl);
1372 			return -1;
1373 		}
1374 		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1375 		return NVME_NIDT_NGUID_LEN;
1376 	case NVME_NIDT_UUID:
1377 		if (cur->nidl != NVME_NIDT_UUID_LEN) {
1378 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1379 				 warn_str, cur->nidl);
1380 			return -1;
1381 		}
1382 		uuid_copy(&ids->uuid, data + sizeof(*cur));
1383 		return NVME_NIDT_UUID_LEN;
1384 	case NVME_NIDT_CSI:
1385 		if (cur->nidl != NVME_NIDT_CSI_LEN) {
1386 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1387 				 warn_str, cur->nidl);
1388 			return -1;
1389 		}
1390 		memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1391 		*csi_seen = true;
1392 		return NVME_NIDT_CSI_LEN;
1393 	default:
1394 		/* Skip unknown types */
1395 		return cur->nidl;
1396 	}
1397 }
1398 
1399 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1400 		struct nvme_ns_ids *ids)
1401 {
1402 	struct nvme_command c = { };
1403 	bool csi_seen = false;
1404 	int status, pos, len;
1405 	void *data;
1406 
1407 	if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1408 		return 0;
1409 	if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1410 		return 0;
1411 
1412 	c.identify.opcode = nvme_admin_identify;
1413 	c.identify.nsid = cpu_to_le32(nsid);
1414 	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1415 
1416 	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1417 	if (!data)
1418 		return -ENOMEM;
1419 
1420 	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1421 				      NVME_IDENTIFY_DATA_SIZE);
1422 	if (status) {
1423 		dev_warn(ctrl->device,
1424 			"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1425 			nsid, status);
1426 		goto free_data;
1427 	}
1428 
1429 	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1430 		struct nvme_ns_id_desc *cur = data + pos;
1431 
1432 		if (cur->nidl == 0)
1433 			break;
1434 
1435 		len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
1436 		if (len < 0)
1437 			break;
1438 
1439 		len += sizeof(*cur);
1440 	}
1441 
1442 	if (nvme_multi_css(ctrl) && !csi_seen) {
1443 		dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1444 			 nsid);
1445 		status = -EINVAL;
1446 	}
1447 
1448 free_data:
1449 	kfree(data);
1450 	return status;
1451 }
1452 
1453 static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1454 			struct nvme_ns_ids *ids, struct nvme_id_ns **id)
1455 {
1456 	struct nvme_command c = { };
1457 	int error;
1458 
1459 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1460 	c.identify.opcode = nvme_admin_identify;
1461 	c.identify.nsid = cpu_to_le32(nsid);
1462 	c.identify.cns = NVME_ID_CNS_NS;
1463 
1464 	*id = kmalloc(sizeof(**id), GFP_KERNEL);
1465 	if (!*id)
1466 		return -ENOMEM;
1467 
1468 	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1469 	if (error) {
1470 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1471 		goto out_free_id;
1472 	}
1473 
1474 	error = NVME_SC_INVALID_NS | NVME_SC_DNR;
1475 	if ((*id)->ncap == 0) /* namespace not allocated or attached */
1476 		goto out_free_id;
1477 
1478 	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1479 	    !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1480 		memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
1481 	if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1482 	    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1483 		memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
1484 
1485 	return 0;
1486 
1487 out_free_id:
1488 	kfree(*id);
1489 	return error;
1490 }
1491 
1492 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1493 		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1494 {
1495 	union nvme_result res = { 0 };
1496 	struct nvme_command c = { };
1497 	int ret;
1498 
1499 	c.features.opcode = op;
1500 	c.features.fid = cpu_to_le32(fid);
1501 	c.features.dword11 = cpu_to_le32(dword11);
1502 
1503 	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1504 			buffer, buflen, 0, NVME_QID_ANY, 0, 0);
1505 	if (ret >= 0 && result)
1506 		*result = le32_to_cpu(res.u32);
1507 	return ret;
1508 }
1509 
1510 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1511 		      unsigned int dword11, void *buffer, size_t buflen,
1512 		      u32 *result)
1513 {
1514 	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1515 			     buflen, result);
1516 }
1517 EXPORT_SYMBOL_GPL(nvme_set_features);
1518 
1519 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1520 		      unsigned int dword11, void *buffer, size_t buflen,
1521 		      u32 *result)
1522 {
1523 	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1524 			     buflen, result);
1525 }
1526 EXPORT_SYMBOL_GPL(nvme_get_features);
1527 
1528 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1529 {
1530 	u32 q_count = (*count - 1) | ((*count - 1) << 16);
1531 	u32 result;
1532 	int status, nr_io_queues;
1533 
1534 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1535 			&result);
1536 	if (status < 0)
1537 		return status;
1538 
1539 	/*
1540 	 * Degraded controllers might return an error when setting the queue
1541 	 * count.  We still want to be able to bring them online and offer
1542 	 * access to the admin queue, as that might be only way to fix them up.
1543 	 */
1544 	if (status > 0) {
1545 		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1546 		*count = 0;
1547 	} else {
1548 		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1549 		*count = min(*count, nr_io_queues);
1550 	}
1551 
1552 	return 0;
1553 }
1554 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1555 
1556 #define NVME_AEN_SUPPORTED \
1557 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1558 	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1559 
1560 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1561 {
1562 	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1563 	int status;
1564 
1565 	if (!supported_aens)
1566 		return;
1567 
1568 	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1569 			NULL, 0, &result);
1570 	if (status)
1571 		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1572 			 supported_aens);
1573 
1574 	queue_work(nvme_wq, &ctrl->async_event_work);
1575 }
1576 
1577 static int nvme_ns_open(struct nvme_ns *ns)
1578 {
1579 
1580 	/* should never be called due to GENHD_FL_HIDDEN */
1581 	if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1582 		goto fail;
1583 	if (!nvme_get_ns(ns))
1584 		goto fail;
1585 	if (!try_module_get(ns->ctrl->ops->module))
1586 		goto fail_put_ns;
1587 
1588 	return 0;
1589 
1590 fail_put_ns:
1591 	nvme_put_ns(ns);
1592 fail:
1593 	return -ENXIO;
1594 }
1595 
1596 static void nvme_ns_release(struct nvme_ns *ns)
1597 {
1598 
1599 	module_put(ns->ctrl->ops->module);
1600 	nvme_put_ns(ns);
1601 }
1602 
1603 static int nvme_open(struct block_device *bdev, fmode_t mode)
1604 {
1605 	return nvme_ns_open(bdev->bd_disk->private_data);
1606 }
1607 
1608 static void nvme_release(struct gendisk *disk, fmode_t mode)
1609 {
1610 	nvme_ns_release(disk->private_data);
1611 }
1612 
1613 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1614 {
1615 	/* some standard values */
1616 	geo->heads = 1 << 6;
1617 	geo->sectors = 1 << 5;
1618 	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1619 	return 0;
1620 }
1621 
1622 #ifdef CONFIG_BLK_DEV_INTEGRITY
1623 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1624 				u32 max_integrity_segments)
1625 {
1626 	struct blk_integrity integrity = { };
1627 
1628 	switch (pi_type) {
1629 	case NVME_NS_DPS_PI_TYPE3:
1630 		integrity.profile = &t10_pi_type3_crc;
1631 		integrity.tag_size = sizeof(u16) + sizeof(u32);
1632 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1633 		break;
1634 	case NVME_NS_DPS_PI_TYPE1:
1635 	case NVME_NS_DPS_PI_TYPE2:
1636 		integrity.profile = &t10_pi_type1_crc;
1637 		integrity.tag_size = sizeof(u16);
1638 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1639 		break;
1640 	default:
1641 		integrity.profile = NULL;
1642 		break;
1643 	}
1644 	integrity.tuple_size = ms;
1645 	blk_integrity_register(disk, &integrity);
1646 	blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1647 }
1648 #else
1649 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1650 				u32 max_integrity_segments)
1651 {
1652 }
1653 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1654 
1655 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1656 {
1657 	struct nvme_ctrl *ctrl = ns->ctrl;
1658 	struct request_queue *queue = disk->queue;
1659 	u32 size = queue_logical_block_size(queue);
1660 
1661 	if (ctrl->max_discard_sectors == 0) {
1662 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1663 		return;
1664 	}
1665 
1666 	if (ctrl->nr_streams && ns->sws && ns->sgs)
1667 		size *= ns->sws * ns->sgs;
1668 
1669 	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1670 			NVME_DSM_MAX_RANGES);
1671 
1672 	queue->limits.discard_alignment = 0;
1673 	queue->limits.discard_granularity = size;
1674 
1675 	/* If discard is already enabled, don't reset queue limits */
1676 	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1677 		return;
1678 
1679 	blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
1680 	blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1681 
1682 	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1683 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1684 }
1685 
1686 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1687 {
1688 	return !uuid_is_null(&ids->uuid) ||
1689 		memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1690 		memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1691 }
1692 
1693 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1694 {
1695 	return uuid_equal(&a->uuid, &b->uuid) &&
1696 		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1697 		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1698 		a->csi == b->csi;
1699 }
1700 
1701 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1702 				 u32 *phys_bs, u32 *io_opt)
1703 {
1704 	struct streams_directive_params s;
1705 	int ret;
1706 
1707 	if (!ctrl->nr_streams)
1708 		return 0;
1709 
1710 	ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
1711 	if (ret)
1712 		return ret;
1713 
1714 	ns->sws = le32_to_cpu(s.sws);
1715 	ns->sgs = le16_to_cpu(s.sgs);
1716 
1717 	if (ns->sws) {
1718 		*phys_bs = ns->sws * (1 << ns->lba_shift);
1719 		if (ns->sgs)
1720 			*io_opt = *phys_bs * ns->sgs;
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
1727 {
1728 	struct nvme_ctrl *ctrl = ns->ctrl;
1729 
1730 	/*
1731 	 * The PI implementation requires the metadata size to be equal to the
1732 	 * t10 pi tuple size.
1733 	 */
1734 	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1735 	if (ns->ms == sizeof(struct t10_pi_tuple))
1736 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1737 	else
1738 		ns->pi_type = 0;
1739 
1740 	ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1741 	if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1742 		return 0;
1743 	if (ctrl->ops->flags & NVME_F_FABRICS) {
1744 		/*
1745 		 * The NVMe over Fabrics specification only supports metadata as
1746 		 * part of the extended data LBA.  We rely on HCA/HBA support to
1747 		 * remap the separate metadata buffer from the block layer.
1748 		 */
1749 		if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1750 			return -EINVAL;
1751 
1752 		ns->features |= NVME_NS_EXT_LBAS;
1753 
1754 		/*
1755 		 * The current fabrics transport drivers support namespace
1756 		 * metadata formats only if nvme_ns_has_pi() returns true.
1757 		 * Suppress support for all other formats so the namespace will
1758 		 * have a 0 capacity and not be usable through the block stack.
1759 		 *
1760 		 * Note, this check will need to be modified if any drivers
1761 		 * gain the ability to use other metadata formats.
1762 		 */
1763 		if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
1764 			ns->features |= NVME_NS_METADATA_SUPPORTED;
1765 	} else {
1766 		/*
1767 		 * For PCIe controllers, we can't easily remap the separate
1768 		 * metadata buffer from the block layer and thus require a
1769 		 * separate metadata buffer for block layer metadata/PI support.
1770 		 * We allow extended LBAs for the passthrough interface, though.
1771 		 */
1772 		if (id->flbas & NVME_NS_FLBAS_META_EXT)
1773 			ns->features |= NVME_NS_EXT_LBAS;
1774 		else
1775 			ns->features |= NVME_NS_METADATA_SUPPORTED;
1776 	}
1777 
1778 	return 0;
1779 }
1780 
1781 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1782 		struct request_queue *q)
1783 {
1784 	bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1785 
1786 	if (ctrl->max_hw_sectors) {
1787 		u32 max_segments =
1788 			(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
1789 
1790 		max_segments = min_not_zero(max_segments, ctrl->max_segments);
1791 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1792 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1793 	}
1794 	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
1795 	blk_queue_dma_alignment(q, 7);
1796 	blk_queue_write_cache(q, vwc, vwc);
1797 }
1798 
1799 static void nvme_update_disk_info(struct gendisk *disk,
1800 		struct nvme_ns *ns, struct nvme_id_ns *id)
1801 {
1802 	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1803 	unsigned short bs = 1 << ns->lba_shift;
1804 	u32 atomic_bs, phys_bs, io_opt = 0;
1805 
1806 	/*
1807 	 * The block layer can't support LBA sizes larger than the page size
1808 	 * yet, so catch this early and don't allow block I/O.
1809 	 */
1810 	if (ns->lba_shift > PAGE_SHIFT) {
1811 		capacity = 0;
1812 		bs = (1 << 9);
1813 	}
1814 
1815 	blk_integrity_unregister(disk);
1816 
1817 	atomic_bs = phys_bs = bs;
1818 	nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
1819 	if (id->nabo == 0) {
1820 		/*
1821 		 * Bit 1 indicates whether NAWUPF is defined for this namespace
1822 		 * and whether it should be used instead of AWUPF. If NAWUPF ==
1823 		 * 0 then AWUPF must be used instead.
1824 		 */
1825 		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1826 			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1827 		else
1828 			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1829 	}
1830 
1831 	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1832 		/* NPWG = Namespace Preferred Write Granularity */
1833 		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1834 		/* NOWS = Namespace Optimal Write Size */
1835 		io_opt = bs * (1 + le16_to_cpu(id->nows));
1836 	}
1837 
1838 	blk_queue_logical_block_size(disk->queue, bs);
1839 	/*
1840 	 * Linux filesystems assume writing a single physical block is
1841 	 * an atomic operation. Hence limit the physical block size to the
1842 	 * value of the Atomic Write Unit Power Fail parameter.
1843 	 */
1844 	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1845 	blk_queue_io_min(disk->queue, phys_bs);
1846 	blk_queue_io_opt(disk->queue, io_opt);
1847 
1848 	/*
1849 	 * Register a metadata profile for PI, or the plain non-integrity NVMe
1850 	 * metadata masquerading as Type 0 if supported, otherwise reject block
1851 	 * I/O to namespaces with metadata except when the namespace supports
1852 	 * PI, as it can strip/insert in that case.
1853 	 */
1854 	if (ns->ms) {
1855 		if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1856 		    (ns->features & NVME_NS_METADATA_SUPPORTED))
1857 			nvme_init_integrity(disk, ns->ms, ns->pi_type,
1858 					    ns->ctrl->max_integrity_segments);
1859 		else if (!nvme_ns_has_pi(ns))
1860 			capacity = 0;
1861 	}
1862 
1863 	set_capacity_and_notify(disk, capacity);
1864 
1865 	nvme_config_discard(disk, ns);
1866 	blk_queue_max_write_zeroes_sectors(disk->queue,
1867 					   ns->ctrl->max_zeroes_sectors);
1868 
1869 	set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
1870 		test_bit(NVME_NS_FORCE_RO, &ns->flags));
1871 }
1872 
1873 static inline bool nvme_first_scan(struct gendisk *disk)
1874 {
1875 	/* nvme_alloc_ns() scans the disk prior to adding it */
1876 	return !disk_live(disk);
1877 }
1878 
1879 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
1880 {
1881 	struct nvme_ctrl *ctrl = ns->ctrl;
1882 	u32 iob;
1883 
1884 	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1885 	    is_power_of_2(ctrl->max_hw_sectors))
1886 		iob = ctrl->max_hw_sectors;
1887 	else
1888 		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1889 
1890 	if (!iob)
1891 		return;
1892 
1893 	if (!is_power_of_2(iob)) {
1894 		if (nvme_first_scan(ns->disk))
1895 			pr_warn("%s: ignoring unaligned IO boundary:%u\n",
1896 				ns->disk->disk_name, iob);
1897 		return;
1898 	}
1899 
1900 	if (blk_queue_is_zoned(ns->disk->queue)) {
1901 		if (nvme_first_scan(ns->disk))
1902 			pr_warn("%s: ignoring zoned namespace IO boundary\n",
1903 				ns->disk->disk_name);
1904 		return;
1905 	}
1906 
1907 	blk_queue_chunk_sectors(ns->queue, iob);
1908 }
1909 
1910 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
1911 {
1912 	unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1913 	int ret;
1914 
1915 	blk_mq_freeze_queue(ns->disk->queue);
1916 	ns->lba_shift = id->lbaf[lbaf].ds;
1917 	nvme_set_queue_limits(ns->ctrl, ns->queue);
1918 
1919 	ret = nvme_configure_metadata(ns, id);
1920 	if (ret)
1921 		goto out_unfreeze;
1922 	nvme_set_chunk_sectors(ns, id);
1923 	nvme_update_disk_info(ns->disk, ns, id);
1924 
1925 	if (ns->head->ids.csi == NVME_CSI_ZNS) {
1926 		ret = nvme_update_zone_info(ns, lbaf);
1927 		if (ret)
1928 			goto out_unfreeze;
1929 	}
1930 
1931 	set_bit(NVME_NS_READY, &ns->flags);
1932 	blk_mq_unfreeze_queue(ns->disk->queue);
1933 
1934 	if (blk_queue_is_zoned(ns->queue)) {
1935 		ret = nvme_revalidate_zones(ns);
1936 		if (ret && !nvme_first_scan(ns->disk))
1937 			goto out;
1938 	}
1939 
1940 	if (nvme_ns_head_multipath(ns->head)) {
1941 		blk_mq_freeze_queue(ns->head->disk->queue);
1942 		nvme_update_disk_info(ns->head->disk, ns, id);
1943 		nvme_mpath_revalidate_paths(ns);
1944 		blk_stack_limits(&ns->head->disk->queue->limits,
1945 				 &ns->queue->limits, 0);
1946 		disk_update_readahead(ns->head->disk);
1947 		blk_mq_unfreeze_queue(ns->head->disk->queue);
1948 	}
1949 	return 0;
1950 
1951 out_unfreeze:
1952 	blk_mq_unfreeze_queue(ns->disk->queue);
1953 out:
1954 	/*
1955 	 * If probing fails due an unsupported feature, hide the block device,
1956 	 * but still allow other access.
1957 	 */
1958 	if (ret == -ENODEV) {
1959 		ns->disk->flags |= GENHD_FL_HIDDEN;
1960 		ret = 0;
1961 	}
1962 	return ret;
1963 }
1964 
1965 static char nvme_pr_type(enum pr_type type)
1966 {
1967 	switch (type) {
1968 	case PR_WRITE_EXCLUSIVE:
1969 		return 1;
1970 	case PR_EXCLUSIVE_ACCESS:
1971 		return 2;
1972 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
1973 		return 3;
1974 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1975 		return 4;
1976 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
1977 		return 5;
1978 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1979 		return 6;
1980 	default:
1981 		return 0;
1982 	}
1983 };
1984 
1985 static int nvme_send_ns_head_pr_command(struct block_device *bdev,
1986 		struct nvme_command *c, u8 data[16])
1987 {
1988 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
1989 	int srcu_idx = srcu_read_lock(&head->srcu);
1990 	struct nvme_ns *ns = nvme_find_path(head);
1991 	int ret = -EWOULDBLOCK;
1992 
1993 	if (ns) {
1994 		c->common.nsid = cpu_to_le32(ns->head->ns_id);
1995 		ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
1996 	}
1997 	srcu_read_unlock(&head->srcu, srcu_idx);
1998 	return ret;
1999 }
2000 
2001 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
2002 		u8 data[16])
2003 {
2004 	c->common.nsid = cpu_to_le32(ns->head->ns_id);
2005 	return nvme_submit_sync_cmd(ns->queue, c, data, 16);
2006 }
2007 
2008 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
2009 				u64 key, u64 sa_key, u8 op)
2010 {
2011 	struct nvme_command c = { };
2012 	u8 data[16] = { 0, };
2013 
2014 	put_unaligned_le64(key, &data[0]);
2015 	put_unaligned_le64(sa_key, &data[8]);
2016 
2017 	c.common.opcode = op;
2018 	c.common.cdw10 = cpu_to_le32(cdw10);
2019 
2020 	if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
2021 	    bdev->bd_disk->fops == &nvme_ns_head_ops)
2022 		return nvme_send_ns_head_pr_command(bdev, &c, data);
2023 	return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
2024 }
2025 
2026 static int nvme_pr_register(struct block_device *bdev, u64 old,
2027 		u64 new, unsigned flags)
2028 {
2029 	u32 cdw10;
2030 
2031 	if (flags & ~PR_FL_IGNORE_KEY)
2032 		return -EOPNOTSUPP;
2033 
2034 	cdw10 = old ? 2 : 0;
2035 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
2036 	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
2037 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
2038 }
2039 
2040 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
2041 		enum pr_type type, unsigned flags)
2042 {
2043 	u32 cdw10;
2044 
2045 	if (flags & ~PR_FL_IGNORE_KEY)
2046 		return -EOPNOTSUPP;
2047 
2048 	cdw10 = nvme_pr_type(type) << 8;
2049 	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
2050 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
2051 }
2052 
2053 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
2054 		enum pr_type type, bool abort)
2055 {
2056 	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2057 
2058 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
2059 }
2060 
2061 static int nvme_pr_clear(struct block_device *bdev, u64 key)
2062 {
2063 	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2064 
2065 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
2066 }
2067 
2068 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2069 {
2070 	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2071 
2072 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
2073 }
2074 
2075 const struct pr_ops nvme_pr_ops = {
2076 	.pr_register	= nvme_pr_register,
2077 	.pr_reserve	= nvme_pr_reserve,
2078 	.pr_release	= nvme_pr_release,
2079 	.pr_preempt	= nvme_pr_preempt,
2080 	.pr_clear	= nvme_pr_clear,
2081 };
2082 
2083 #ifdef CONFIG_BLK_SED_OPAL
2084 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2085 		bool send)
2086 {
2087 	struct nvme_ctrl *ctrl = data;
2088 	struct nvme_command cmd = { };
2089 
2090 	if (send)
2091 		cmd.common.opcode = nvme_admin_security_send;
2092 	else
2093 		cmd.common.opcode = nvme_admin_security_recv;
2094 	cmd.common.nsid = 0;
2095 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2096 	cmd.common.cdw11 = cpu_to_le32(len);
2097 
2098 	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
2099 			NVME_QID_ANY, 1, 0);
2100 }
2101 EXPORT_SYMBOL_GPL(nvme_sec_submit);
2102 #endif /* CONFIG_BLK_SED_OPAL */
2103 
2104 #ifdef CONFIG_BLK_DEV_ZONED
2105 static int nvme_report_zones(struct gendisk *disk, sector_t sector,
2106 		unsigned int nr_zones, report_zones_cb cb, void *data)
2107 {
2108 	return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
2109 			data);
2110 }
2111 #else
2112 #define nvme_report_zones	NULL
2113 #endif /* CONFIG_BLK_DEV_ZONED */
2114 
2115 static const struct block_device_operations nvme_bdev_ops = {
2116 	.owner		= THIS_MODULE,
2117 	.ioctl		= nvme_ioctl,
2118 	.open		= nvme_open,
2119 	.release	= nvme_release,
2120 	.getgeo		= nvme_getgeo,
2121 	.report_zones	= nvme_report_zones,
2122 	.pr_ops		= &nvme_pr_ops,
2123 };
2124 
2125 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2126 {
2127 	unsigned long timeout =
2128 		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
2129 	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
2130 	int ret;
2131 
2132 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2133 		if (csts == ~0)
2134 			return -ENODEV;
2135 		if ((csts & NVME_CSTS_RDY) == bit)
2136 			break;
2137 
2138 		usleep_range(1000, 2000);
2139 		if (fatal_signal_pending(current))
2140 			return -EINTR;
2141 		if (time_after(jiffies, timeout)) {
2142 			dev_err(ctrl->device,
2143 				"Device not ready; aborting %s, CSTS=0x%x\n",
2144 				enabled ? "initialisation" : "reset", csts);
2145 			return -ENODEV;
2146 		}
2147 	}
2148 
2149 	return ret;
2150 }
2151 
2152 /*
2153  * If the device has been passed off to us in an enabled state, just clear
2154  * the enabled bit.  The spec says we should set the 'shutdown notification
2155  * bits', but doing so may cause the device to complete commands to the
2156  * admin queue ... and we don't know what memory that might be pointing at!
2157  */
2158 int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2159 {
2160 	int ret;
2161 
2162 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2163 	ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2164 
2165 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2166 	if (ret)
2167 		return ret;
2168 
2169 	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2170 		msleep(NVME_QUIRK_DELAY_AMOUNT);
2171 
2172 	return nvme_wait_ready(ctrl, ctrl->cap, false);
2173 }
2174 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2175 
2176 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2177 {
2178 	unsigned dev_page_min;
2179 	int ret;
2180 
2181 	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2182 	if (ret) {
2183 		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2184 		return ret;
2185 	}
2186 	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2187 
2188 	if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2189 		dev_err(ctrl->device,
2190 			"Minimum device page size %u too large for host (%u)\n",
2191 			1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2192 		return -ENODEV;
2193 	}
2194 
2195 	if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2196 		ctrl->ctrl_config = NVME_CC_CSS_CSI;
2197 	else
2198 		ctrl->ctrl_config = NVME_CC_CSS_NVM;
2199 	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2200 	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2201 	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2202 	ctrl->ctrl_config |= NVME_CC_ENABLE;
2203 
2204 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2205 	if (ret)
2206 		return ret;
2207 	return nvme_wait_ready(ctrl, ctrl->cap, true);
2208 }
2209 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2210 
2211 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2212 {
2213 	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2214 	u32 csts;
2215 	int ret;
2216 
2217 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2218 	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2219 
2220 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2221 	if (ret)
2222 		return ret;
2223 
2224 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2225 		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2226 			break;
2227 
2228 		msleep(100);
2229 		if (fatal_signal_pending(current))
2230 			return -EINTR;
2231 		if (time_after(jiffies, timeout)) {
2232 			dev_err(ctrl->device,
2233 				"Device shutdown incomplete; abort shutdown\n");
2234 			return -ENODEV;
2235 		}
2236 	}
2237 
2238 	return ret;
2239 }
2240 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2241 
2242 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2243 {
2244 	__le64 ts;
2245 	int ret;
2246 
2247 	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2248 		return 0;
2249 
2250 	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2251 	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2252 			NULL);
2253 	if (ret)
2254 		dev_warn_once(ctrl->device,
2255 			"could not set timestamp (%d)\n", ret);
2256 	return ret;
2257 }
2258 
2259 static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2260 {
2261 	struct nvme_feat_host_behavior *host;
2262 	int ret;
2263 
2264 	/* Don't bother enabling the feature if retry delay is not reported */
2265 	if (!ctrl->crdt[0])
2266 		return 0;
2267 
2268 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2269 	if (!host)
2270 		return 0;
2271 
2272 	host->acre = NVME_ENABLE_ACRE;
2273 	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2274 				host, sizeof(*host), NULL);
2275 	kfree(host);
2276 	return ret;
2277 }
2278 
2279 /*
2280  * The function checks whether the given total (exlat + enlat) latency of
2281  * a power state allows the latter to be used as an APST transition target.
2282  * It does so by comparing the latency to the primary and secondary latency
2283  * tolerances defined by module params. If there's a match, the corresponding
2284  * timeout value is returned and the matching tolerance index (1 or 2) is
2285  * reported.
2286  */
2287 static bool nvme_apst_get_transition_time(u64 total_latency,
2288 		u64 *transition_time, unsigned *last_index)
2289 {
2290 	if (total_latency <= apst_primary_latency_tol_us) {
2291 		if (*last_index == 1)
2292 			return false;
2293 		*last_index = 1;
2294 		*transition_time = apst_primary_timeout_ms;
2295 		return true;
2296 	}
2297 	if (apst_secondary_timeout_ms &&
2298 		total_latency <= apst_secondary_latency_tol_us) {
2299 		if (*last_index <= 2)
2300 			return false;
2301 		*last_index = 2;
2302 		*transition_time = apst_secondary_timeout_ms;
2303 		return true;
2304 	}
2305 	return false;
2306 }
2307 
2308 /*
2309  * APST (Autonomous Power State Transition) lets us program a table of power
2310  * state transitions that the controller will perform automatically.
2311  *
2312  * Depending on module params, one of the two supported techniques will be used:
2313  *
2314  * - If the parameters provide explicit timeouts and tolerances, they will be
2315  *   used to build a table with up to 2 non-operational states to transition to.
2316  *   The default parameter values were selected based on the values used by
2317  *   Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
2318  *   regeneration of the APST table in the event of switching between external
2319  *   and battery power, the timeouts and tolerances reflect a compromise
2320  *   between values used by Microsoft for AC and battery scenarios.
2321  * - If not, we'll configure the table with a simple heuristic: we are willing
2322  *   to spend at most 2% of the time transitioning between power states.
2323  *   Therefore, when running in any given state, we will enter the next
2324  *   lower-power non-operational state after waiting 50 * (enlat + exlat)
2325  *   microseconds, as long as that state's exit latency is under the requested
2326  *   maximum latency.
2327  *
2328  * We will not autonomously enter any non-operational state for which the total
2329  * latency exceeds ps_max_latency_us.
2330  *
2331  * Users can set ps_max_latency_us to zero to turn off APST.
2332  */
2333 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2334 {
2335 	struct nvme_feat_auto_pst *table;
2336 	unsigned apste = 0;
2337 	u64 max_lat_us = 0;
2338 	__le64 target = 0;
2339 	int max_ps = -1;
2340 	int state;
2341 	int ret;
2342 	unsigned last_lt_index = UINT_MAX;
2343 
2344 	/*
2345 	 * If APST isn't supported or if we haven't been initialized yet,
2346 	 * then don't do anything.
2347 	 */
2348 	if (!ctrl->apsta)
2349 		return 0;
2350 
2351 	if (ctrl->npss > 31) {
2352 		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2353 		return 0;
2354 	}
2355 
2356 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2357 	if (!table)
2358 		return 0;
2359 
2360 	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2361 		/* Turn off APST. */
2362 		dev_dbg(ctrl->device, "APST disabled\n");
2363 		goto done;
2364 	}
2365 
2366 	/*
2367 	 * Walk through all states from lowest- to highest-power.
2368 	 * According to the spec, lower-numbered states use more power.  NPSS,
2369 	 * despite the name, is the index of the lowest-power state, not the
2370 	 * number of states.
2371 	 */
2372 	for (state = (int)ctrl->npss; state >= 0; state--) {
2373 		u64 total_latency_us, exit_latency_us, transition_ms;
2374 
2375 		if (target)
2376 			table->entries[state] = target;
2377 
2378 		/*
2379 		 * Don't allow transitions to the deepest state if it's quirked
2380 		 * off.
2381 		 */
2382 		if (state == ctrl->npss &&
2383 		    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2384 			continue;
2385 
2386 		/*
2387 		 * Is this state a useful non-operational state for higher-power
2388 		 * states to autonomously transition to?
2389 		 */
2390 		if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2391 			continue;
2392 
2393 		exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2394 		if (exit_latency_us > ctrl->ps_max_latency_us)
2395 			continue;
2396 
2397 		total_latency_us = exit_latency_us +
2398 			le32_to_cpu(ctrl->psd[state].entry_lat);
2399 
2400 		/*
2401 		 * This state is good. It can be used as the APST idle target
2402 		 * for higher power states.
2403 		 */
2404 		if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
2405 			if (!nvme_apst_get_transition_time(total_latency_us,
2406 					&transition_ms, &last_lt_index))
2407 				continue;
2408 		} else {
2409 			transition_ms = total_latency_us + 19;
2410 			do_div(transition_ms, 20);
2411 			if (transition_ms > (1 << 24) - 1)
2412 				transition_ms = (1 << 24) - 1;
2413 		}
2414 
2415 		target = cpu_to_le64((state << 3) | (transition_ms << 8));
2416 		if (max_ps == -1)
2417 			max_ps = state;
2418 		if (total_latency_us > max_lat_us)
2419 			max_lat_us = total_latency_us;
2420 	}
2421 
2422 	if (max_ps == -1)
2423 		dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2424 	else
2425 		dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2426 			max_ps, max_lat_us, (int)sizeof(*table), table);
2427 	apste = 1;
2428 
2429 done:
2430 	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2431 				table, sizeof(*table), NULL);
2432 	if (ret)
2433 		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2434 	kfree(table);
2435 	return ret;
2436 }
2437 
2438 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2439 {
2440 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2441 	u64 latency;
2442 
2443 	switch (val) {
2444 	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2445 	case PM_QOS_LATENCY_ANY:
2446 		latency = U64_MAX;
2447 		break;
2448 
2449 	default:
2450 		latency = val;
2451 	}
2452 
2453 	if (ctrl->ps_max_latency_us != latency) {
2454 		ctrl->ps_max_latency_us = latency;
2455 		if (ctrl->state == NVME_CTRL_LIVE)
2456 			nvme_configure_apst(ctrl);
2457 	}
2458 }
2459 
2460 struct nvme_core_quirk_entry {
2461 	/*
2462 	 * NVMe model and firmware strings are padded with spaces.  For
2463 	 * simplicity, strings in the quirk table are padded with NULLs
2464 	 * instead.
2465 	 */
2466 	u16 vid;
2467 	const char *mn;
2468 	const char *fr;
2469 	unsigned long quirks;
2470 };
2471 
2472 static const struct nvme_core_quirk_entry core_quirks[] = {
2473 	{
2474 		/*
2475 		 * This Toshiba device seems to die using any APST states.  See:
2476 		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2477 		 */
2478 		.vid = 0x1179,
2479 		.mn = "THNSF5256GPUK TOSHIBA",
2480 		.quirks = NVME_QUIRK_NO_APST,
2481 	},
2482 	{
2483 		/*
2484 		 * This LiteON CL1-3D*-Q11 firmware version has a race
2485 		 * condition associated with actions related to suspend to idle
2486 		 * LiteON has resolved the problem in future firmware
2487 		 */
2488 		.vid = 0x14a4,
2489 		.fr = "22301111",
2490 		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2491 	},
2492 	{
2493 		/*
2494 		 * This Kioxia CD6-V Series / HPE PE8030 device times out and
2495 		 * aborts I/O during any load, but more easily reproducible
2496 		 * with discards (fstrim).
2497 		 *
2498 		 * The device is left in a state where it is also not possible
2499 		 * to use "nvme set-feature" to disable APST, but booting with
2500 		 * nvme_core.default_ps_max_latency=0 works.
2501 		 */
2502 		.vid = 0x1e0f,
2503 		.mn = "KCD6XVUL6T40",
2504 		.quirks = NVME_QUIRK_NO_APST,
2505 	}
2506 };
2507 
2508 /* match is null-terminated but idstr is space-padded. */
2509 static bool string_matches(const char *idstr, const char *match, size_t len)
2510 {
2511 	size_t matchlen;
2512 
2513 	if (!match)
2514 		return true;
2515 
2516 	matchlen = strlen(match);
2517 	WARN_ON_ONCE(matchlen > len);
2518 
2519 	if (memcmp(idstr, match, matchlen))
2520 		return false;
2521 
2522 	for (; matchlen < len; matchlen++)
2523 		if (idstr[matchlen] != ' ')
2524 			return false;
2525 
2526 	return true;
2527 }
2528 
2529 static bool quirk_matches(const struct nvme_id_ctrl *id,
2530 			  const struct nvme_core_quirk_entry *q)
2531 {
2532 	return q->vid == le16_to_cpu(id->vid) &&
2533 		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2534 		string_matches(id->fr, q->fr, sizeof(id->fr));
2535 }
2536 
2537 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2538 		struct nvme_id_ctrl *id)
2539 {
2540 	size_t nqnlen;
2541 	int off;
2542 
2543 	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2544 		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2545 		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2546 			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2547 			return;
2548 		}
2549 
2550 		if (ctrl->vs >= NVME_VS(1, 2, 1))
2551 			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2552 	}
2553 
2554 	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2555 	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2556 			"nqn.2014.08.org.nvmexpress:%04x%04x",
2557 			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2558 	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2559 	off += sizeof(id->sn);
2560 	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2561 	off += sizeof(id->mn);
2562 	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2563 }
2564 
2565 static void nvme_release_subsystem(struct device *dev)
2566 {
2567 	struct nvme_subsystem *subsys =
2568 		container_of(dev, struct nvme_subsystem, dev);
2569 
2570 	if (subsys->instance >= 0)
2571 		ida_simple_remove(&nvme_instance_ida, subsys->instance);
2572 	kfree(subsys);
2573 }
2574 
2575 static void nvme_destroy_subsystem(struct kref *ref)
2576 {
2577 	struct nvme_subsystem *subsys =
2578 			container_of(ref, struct nvme_subsystem, ref);
2579 
2580 	mutex_lock(&nvme_subsystems_lock);
2581 	list_del(&subsys->entry);
2582 	mutex_unlock(&nvme_subsystems_lock);
2583 
2584 	ida_destroy(&subsys->ns_ida);
2585 	device_del(&subsys->dev);
2586 	put_device(&subsys->dev);
2587 }
2588 
2589 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2590 {
2591 	kref_put(&subsys->ref, nvme_destroy_subsystem);
2592 }
2593 
2594 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2595 {
2596 	struct nvme_subsystem *subsys;
2597 
2598 	lockdep_assert_held(&nvme_subsystems_lock);
2599 
2600 	/*
2601 	 * Fail matches for discovery subsystems. This results
2602 	 * in each discovery controller bound to a unique subsystem.
2603 	 * This avoids issues with validating controller values
2604 	 * that can only be true when there is a single unique subsystem.
2605 	 * There may be multiple and completely independent entities
2606 	 * that provide discovery controllers.
2607 	 */
2608 	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2609 		return NULL;
2610 
2611 	list_for_each_entry(subsys, &nvme_subsystems, entry) {
2612 		if (strcmp(subsys->subnqn, subsysnqn))
2613 			continue;
2614 		if (!kref_get_unless_zero(&subsys->ref))
2615 			continue;
2616 		return subsys;
2617 	}
2618 
2619 	return NULL;
2620 }
2621 
2622 #define SUBSYS_ATTR_RO(_name, _mode, _show)			\
2623 	struct device_attribute subsys_attr_##_name = \
2624 		__ATTR(_name, _mode, _show, NULL)
2625 
2626 static ssize_t nvme_subsys_show_nqn(struct device *dev,
2627 				    struct device_attribute *attr,
2628 				    char *buf)
2629 {
2630 	struct nvme_subsystem *subsys =
2631 		container_of(dev, struct nvme_subsystem, dev);
2632 
2633 	return sysfs_emit(buf, "%s\n", subsys->subnqn);
2634 }
2635 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2636 
2637 static ssize_t nvme_subsys_show_type(struct device *dev,
2638 				    struct device_attribute *attr,
2639 				    char *buf)
2640 {
2641 	struct nvme_subsystem *subsys =
2642 		container_of(dev, struct nvme_subsystem, dev);
2643 
2644 	switch (subsys->subtype) {
2645 	case NVME_NQN_DISC:
2646 		return sysfs_emit(buf, "discovery\n");
2647 	case NVME_NQN_NVME:
2648 		return sysfs_emit(buf, "nvm\n");
2649 	default:
2650 		return sysfs_emit(buf, "reserved\n");
2651 	}
2652 }
2653 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
2654 
2655 #define nvme_subsys_show_str_function(field)				\
2656 static ssize_t subsys_##field##_show(struct device *dev,		\
2657 			    struct device_attribute *attr, char *buf)	\
2658 {									\
2659 	struct nvme_subsystem *subsys =					\
2660 		container_of(dev, struct nvme_subsystem, dev);		\
2661 	return sysfs_emit(buf, "%.*s\n",				\
2662 			   (int)sizeof(subsys->field), subsys->field);	\
2663 }									\
2664 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2665 
2666 nvme_subsys_show_str_function(model);
2667 nvme_subsys_show_str_function(serial);
2668 nvme_subsys_show_str_function(firmware_rev);
2669 
2670 static struct attribute *nvme_subsys_attrs[] = {
2671 	&subsys_attr_model.attr,
2672 	&subsys_attr_serial.attr,
2673 	&subsys_attr_firmware_rev.attr,
2674 	&subsys_attr_subsysnqn.attr,
2675 	&subsys_attr_subsystype.attr,
2676 #ifdef CONFIG_NVME_MULTIPATH
2677 	&subsys_attr_iopolicy.attr,
2678 #endif
2679 	NULL,
2680 };
2681 
2682 static const struct attribute_group nvme_subsys_attrs_group = {
2683 	.attrs = nvme_subsys_attrs,
2684 };
2685 
2686 static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2687 	&nvme_subsys_attrs_group,
2688 	NULL,
2689 };
2690 
2691 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
2692 {
2693 	return ctrl->opts && ctrl->opts->discovery_nqn;
2694 }
2695 
2696 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2697 		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2698 {
2699 	struct nvme_ctrl *tmp;
2700 
2701 	lockdep_assert_held(&nvme_subsystems_lock);
2702 
2703 	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2704 		if (nvme_state_terminal(tmp))
2705 			continue;
2706 
2707 		if (tmp->cntlid == ctrl->cntlid) {
2708 			dev_err(ctrl->device,
2709 				"Duplicate cntlid %u with %s, subsys %s, rejecting\n",
2710 				ctrl->cntlid, dev_name(tmp->device),
2711 				subsys->subnqn);
2712 			return false;
2713 		}
2714 
2715 		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2716 		    nvme_discovery_ctrl(ctrl))
2717 			continue;
2718 
2719 		dev_err(ctrl->device,
2720 			"Subsystem does not support multiple controllers\n");
2721 		return false;
2722 	}
2723 
2724 	return true;
2725 }
2726 
2727 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2728 {
2729 	struct nvme_subsystem *subsys, *found;
2730 	int ret;
2731 
2732 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2733 	if (!subsys)
2734 		return -ENOMEM;
2735 
2736 	subsys->instance = -1;
2737 	mutex_init(&subsys->lock);
2738 	kref_init(&subsys->ref);
2739 	INIT_LIST_HEAD(&subsys->ctrls);
2740 	INIT_LIST_HEAD(&subsys->nsheads);
2741 	nvme_init_subnqn(subsys, ctrl, id);
2742 	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2743 	memcpy(subsys->model, id->mn, sizeof(subsys->model));
2744 	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2745 	subsys->vendor_id = le16_to_cpu(id->vid);
2746 	subsys->cmic = id->cmic;
2747 
2748 	/* Versions prior to 1.4 don't necessarily report a valid type */
2749 	if (id->cntrltype == NVME_CTRL_DISC ||
2750 	    !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
2751 		subsys->subtype = NVME_NQN_DISC;
2752 	else
2753 		subsys->subtype = NVME_NQN_NVME;
2754 
2755 	if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
2756 		dev_err(ctrl->device,
2757 			"Subsystem %s is not a discovery controller",
2758 			subsys->subnqn);
2759 		kfree(subsys);
2760 		return -EINVAL;
2761 	}
2762 	subsys->awupf = le16_to_cpu(id->awupf);
2763 	nvme_mpath_default_iopolicy(subsys);
2764 
2765 	subsys->dev.class = nvme_subsys_class;
2766 	subsys->dev.release = nvme_release_subsystem;
2767 	subsys->dev.groups = nvme_subsys_attrs_groups;
2768 	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2769 	device_initialize(&subsys->dev);
2770 
2771 	mutex_lock(&nvme_subsystems_lock);
2772 	found = __nvme_find_get_subsystem(subsys->subnqn);
2773 	if (found) {
2774 		put_device(&subsys->dev);
2775 		subsys = found;
2776 
2777 		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2778 			ret = -EINVAL;
2779 			goto out_put_subsystem;
2780 		}
2781 	} else {
2782 		ret = device_add(&subsys->dev);
2783 		if (ret) {
2784 			dev_err(ctrl->device,
2785 				"failed to register subsystem device.\n");
2786 			put_device(&subsys->dev);
2787 			goto out_unlock;
2788 		}
2789 		ida_init(&subsys->ns_ida);
2790 		list_add_tail(&subsys->entry, &nvme_subsystems);
2791 	}
2792 
2793 	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2794 				dev_name(ctrl->device));
2795 	if (ret) {
2796 		dev_err(ctrl->device,
2797 			"failed to create sysfs link from subsystem.\n");
2798 		goto out_put_subsystem;
2799 	}
2800 
2801 	if (!found)
2802 		subsys->instance = ctrl->instance;
2803 	ctrl->subsys = subsys;
2804 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2805 	mutex_unlock(&nvme_subsystems_lock);
2806 	return 0;
2807 
2808 out_put_subsystem:
2809 	nvme_put_subsystem(subsys);
2810 out_unlock:
2811 	mutex_unlock(&nvme_subsystems_lock);
2812 	return ret;
2813 }
2814 
2815 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2816 		void *log, size_t size, u64 offset)
2817 {
2818 	struct nvme_command c = { };
2819 	u32 dwlen = nvme_bytes_to_numd(size);
2820 
2821 	c.get_log_page.opcode = nvme_admin_get_log_page;
2822 	c.get_log_page.nsid = cpu_to_le32(nsid);
2823 	c.get_log_page.lid = log_page;
2824 	c.get_log_page.lsp = lsp;
2825 	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2826 	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2827 	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2828 	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2829 	c.get_log_page.csi = csi;
2830 
2831 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2832 }
2833 
2834 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
2835 				struct nvme_effects_log **log)
2836 {
2837 	struct nvme_effects_log	*cel = xa_load(&ctrl->cels, csi);
2838 	int ret;
2839 
2840 	if (cel)
2841 		goto out;
2842 
2843 	cel = kzalloc(sizeof(*cel), GFP_KERNEL);
2844 	if (!cel)
2845 		return -ENOMEM;
2846 
2847 	ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2848 			cel, sizeof(*cel), 0);
2849 	if (ret) {
2850 		kfree(cel);
2851 		return ret;
2852 	}
2853 
2854 	xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2855 out:
2856 	*log = cel;
2857 	return 0;
2858 }
2859 
2860 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2861 {
2862 	u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2863 
2864 	if (check_shl_overflow(1U, units + page_shift - 9, &val))
2865 		return UINT_MAX;
2866 	return val;
2867 }
2868 
2869 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
2870 {
2871 	struct nvme_command c = { };
2872 	struct nvme_id_ctrl_nvm *id;
2873 	int ret;
2874 
2875 	if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
2876 		ctrl->max_discard_sectors = UINT_MAX;
2877 		ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
2878 	} else {
2879 		ctrl->max_discard_sectors = 0;
2880 		ctrl->max_discard_segments = 0;
2881 	}
2882 
2883 	/*
2884 	 * Even though NVMe spec explicitly states that MDTS is not applicable
2885 	 * to the write-zeroes, we are cautious and limit the size to the
2886 	 * controllers max_hw_sectors value, which is based on the MDTS field
2887 	 * and possibly other limiting factors.
2888 	 */
2889 	if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
2890 	    !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
2891 		ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
2892 	else
2893 		ctrl->max_zeroes_sectors = 0;
2894 
2895 	if (nvme_ctrl_limited_cns(ctrl))
2896 		return 0;
2897 
2898 	id = kzalloc(sizeof(*id), GFP_KERNEL);
2899 	if (!id)
2900 		return 0;
2901 
2902 	c.identify.opcode = nvme_admin_identify;
2903 	c.identify.cns = NVME_ID_CNS_CS_CTRL;
2904 	c.identify.csi = NVME_CSI_NVM;
2905 
2906 	ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
2907 	if (ret)
2908 		goto free_data;
2909 
2910 	if (id->dmrl)
2911 		ctrl->max_discard_segments = id->dmrl;
2912 	if (id->dmrsl)
2913 		ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl);
2914 	if (id->wzsl)
2915 		ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
2916 
2917 free_data:
2918 	kfree(id);
2919 	return ret;
2920 }
2921 
2922 static int nvme_init_identify(struct nvme_ctrl *ctrl)
2923 {
2924 	struct nvme_id_ctrl *id;
2925 	u32 max_hw_sectors;
2926 	bool prev_apst_enabled;
2927 	int ret;
2928 
2929 	ret = nvme_identify_ctrl(ctrl, &id);
2930 	if (ret) {
2931 		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2932 		return -EIO;
2933 	}
2934 
2935 	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2936 		ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
2937 		if (ret < 0)
2938 			goto out_free;
2939 	}
2940 
2941 	if (!(ctrl->ops->flags & NVME_F_FABRICS))
2942 		ctrl->cntlid = le16_to_cpu(id->cntlid);
2943 
2944 	if (!ctrl->identified) {
2945 		unsigned int i;
2946 
2947 		ret = nvme_init_subsystem(ctrl, id);
2948 		if (ret)
2949 			goto out_free;
2950 
2951 		/*
2952 		 * Check for quirks.  Quirk can depend on firmware version,
2953 		 * so, in principle, the set of quirks present can change
2954 		 * across a reset.  As a possible future enhancement, we
2955 		 * could re-scan for quirks every time we reinitialize
2956 		 * the device, but we'd have to make sure that the driver
2957 		 * behaves intelligently if the quirks change.
2958 		 */
2959 		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2960 			if (quirk_matches(id, &core_quirks[i]))
2961 				ctrl->quirks |= core_quirks[i].quirks;
2962 		}
2963 	}
2964 
2965 	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2966 		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2967 		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2968 	}
2969 
2970 	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2971 	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2972 	ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2973 
2974 	ctrl->oacs = le16_to_cpu(id->oacs);
2975 	ctrl->oncs = le16_to_cpu(id->oncs);
2976 	ctrl->mtfa = le16_to_cpu(id->mtfa);
2977 	ctrl->oaes = le32_to_cpu(id->oaes);
2978 	ctrl->wctemp = le16_to_cpu(id->wctemp);
2979 	ctrl->cctemp = le16_to_cpu(id->cctemp);
2980 
2981 	atomic_set(&ctrl->abort_limit, id->acl + 1);
2982 	ctrl->vwc = id->vwc;
2983 	if (id->mdts)
2984 		max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
2985 	else
2986 		max_hw_sectors = UINT_MAX;
2987 	ctrl->max_hw_sectors =
2988 		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2989 
2990 	nvme_set_queue_limits(ctrl, ctrl->admin_q);
2991 	ctrl->sgls = le32_to_cpu(id->sgls);
2992 	ctrl->kas = le16_to_cpu(id->kas);
2993 	ctrl->max_namespaces = le32_to_cpu(id->mnan);
2994 	ctrl->ctratt = le32_to_cpu(id->ctratt);
2995 
2996 	if (id->rtd3e) {
2997 		/* us -> s */
2998 		u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
2999 
3000 		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
3001 						 shutdown_timeout, 60);
3002 
3003 		if (ctrl->shutdown_timeout != shutdown_timeout)
3004 			dev_info(ctrl->device,
3005 				 "Shutdown timeout set to %u seconds\n",
3006 				 ctrl->shutdown_timeout);
3007 	} else
3008 		ctrl->shutdown_timeout = shutdown_timeout;
3009 
3010 	ctrl->npss = id->npss;
3011 	ctrl->apsta = id->apsta;
3012 	prev_apst_enabled = ctrl->apst_enabled;
3013 	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3014 		if (force_apst && id->apsta) {
3015 			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3016 			ctrl->apst_enabled = true;
3017 		} else {
3018 			ctrl->apst_enabled = false;
3019 		}
3020 	} else {
3021 		ctrl->apst_enabled = id->apsta;
3022 	}
3023 	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3024 
3025 	if (ctrl->ops->flags & NVME_F_FABRICS) {
3026 		ctrl->icdoff = le16_to_cpu(id->icdoff);
3027 		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3028 		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3029 		ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3030 
3031 		/*
3032 		 * In fabrics we need to verify the cntlid matches the
3033 		 * admin connect
3034 		 */
3035 		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3036 			dev_err(ctrl->device,
3037 				"Mismatching cntlid: Connect %u vs Identify "
3038 				"%u, rejecting\n",
3039 				ctrl->cntlid, le16_to_cpu(id->cntlid));
3040 			ret = -EINVAL;
3041 			goto out_free;
3042 		}
3043 
3044 		if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3045 			dev_err(ctrl->device,
3046 				"keep-alive support is mandatory for fabrics\n");
3047 			ret = -EINVAL;
3048 			goto out_free;
3049 		}
3050 	} else {
3051 		ctrl->hmpre = le32_to_cpu(id->hmpre);
3052 		ctrl->hmmin = le32_to_cpu(id->hmmin);
3053 		ctrl->hmminds = le32_to_cpu(id->hmminds);
3054 		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3055 	}
3056 
3057 	ret = nvme_mpath_init_identify(ctrl, id);
3058 	if (ret < 0)
3059 		goto out_free;
3060 
3061 	if (ctrl->apst_enabled && !prev_apst_enabled)
3062 		dev_pm_qos_expose_latency_tolerance(ctrl->device);
3063 	else if (!ctrl->apst_enabled && prev_apst_enabled)
3064 		dev_pm_qos_hide_latency_tolerance(ctrl->device);
3065 
3066 out_free:
3067 	kfree(id);
3068 	return ret;
3069 }
3070 
3071 /*
3072  * Initialize the cached copies of the Identify data and various controller
3073  * register in our nvme_ctrl structure.  This should be called as soon as
3074  * the admin queue is fully up and running.
3075  */
3076 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
3077 {
3078 	int ret;
3079 
3080 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3081 	if (ret) {
3082 		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3083 		return ret;
3084 	}
3085 
3086 	ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3087 
3088 	if (ctrl->vs >= NVME_VS(1, 1, 0))
3089 		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3090 
3091 	ret = nvme_init_identify(ctrl);
3092 	if (ret)
3093 		return ret;
3094 
3095 	ret = nvme_init_non_mdts_limits(ctrl);
3096 	if (ret < 0)
3097 		return ret;
3098 
3099 	ret = nvme_configure_apst(ctrl);
3100 	if (ret < 0)
3101 		return ret;
3102 
3103 	ret = nvme_configure_timestamp(ctrl);
3104 	if (ret < 0)
3105 		return ret;
3106 
3107 	ret = nvme_configure_directives(ctrl);
3108 	if (ret < 0)
3109 		return ret;
3110 
3111 	ret = nvme_configure_acre(ctrl);
3112 	if (ret < 0)
3113 		return ret;
3114 
3115 	if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3116 		ret = nvme_hwmon_init(ctrl);
3117 		if (ret < 0)
3118 			return ret;
3119 	}
3120 
3121 	ctrl->identified = true;
3122 
3123 	return 0;
3124 }
3125 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3126 
3127 static int nvme_dev_open(struct inode *inode, struct file *file)
3128 {
3129 	struct nvme_ctrl *ctrl =
3130 		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3131 
3132 	switch (ctrl->state) {
3133 	case NVME_CTRL_LIVE:
3134 		break;
3135 	default:
3136 		return -EWOULDBLOCK;
3137 	}
3138 
3139 	nvme_get_ctrl(ctrl);
3140 	if (!try_module_get(ctrl->ops->module)) {
3141 		nvme_put_ctrl(ctrl);
3142 		return -EINVAL;
3143 	}
3144 
3145 	file->private_data = ctrl;
3146 	return 0;
3147 }
3148 
3149 static int nvme_dev_release(struct inode *inode, struct file *file)
3150 {
3151 	struct nvme_ctrl *ctrl =
3152 		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3153 
3154 	module_put(ctrl->ops->module);
3155 	nvme_put_ctrl(ctrl);
3156 	return 0;
3157 }
3158 
3159 static const struct file_operations nvme_dev_fops = {
3160 	.owner		= THIS_MODULE,
3161 	.open		= nvme_dev_open,
3162 	.release	= nvme_dev_release,
3163 	.unlocked_ioctl	= nvme_dev_ioctl,
3164 	.compat_ioctl	= compat_ptr_ioctl,
3165 };
3166 
3167 static ssize_t nvme_sysfs_reset(struct device *dev,
3168 				struct device_attribute *attr, const char *buf,
3169 				size_t count)
3170 {
3171 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3172 	int ret;
3173 
3174 	ret = nvme_reset_ctrl_sync(ctrl);
3175 	if (ret < 0)
3176 		return ret;
3177 	return count;
3178 }
3179 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3180 
3181 static ssize_t nvme_sysfs_rescan(struct device *dev,
3182 				struct device_attribute *attr, const char *buf,
3183 				size_t count)
3184 {
3185 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3186 
3187 	nvme_queue_scan(ctrl);
3188 	return count;
3189 }
3190 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
3191 
3192 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
3193 {
3194 	struct gendisk *disk = dev_to_disk(dev);
3195 
3196 	if (disk->fops == &nvme_bdev_ops)
3197 		return nvme_get_ns_from_dev(dev)->head;
3198 	else
3199 		return disk->private_data;
3200 }
3201 
3202 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3203 		char *buf)
3204 {
3205 	struct nvme_ns_head *head = dev_to_ns_head(dev);
3206 	struct nvme_ns_ids *ids = &head->ids;
3207 	struct nvme_subsystem *subsys = head->subsys;
3208 	int serial_len = sizeof(subsys->serial);
3209 	int model_len = sizeof(subsys->model);
3210 
3211 	if (!uuid_is_null(&ids->uuid))
3212 		return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
3213 
3214 	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3215 		return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
3216 
3217 	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3218 		return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
3219 
3220 	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
3221 				  subsys->serial[serial_len - 1] == '\0'))
3222 		serial_len--;
3223 	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
3224 				 subsys->model[model_len - 1] == '\0'))
3225 		model_len--;
3226 
3227 	return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
3228 		serial_len, subsys->serial, model_len, subsys->model,
3229 		head->ns_id);
3230 }
3231 static DEVICE_ATTR_RO(wwid);
3232 
3233 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3234 		char *buf)
3235 {
3236 	return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3237 }
3238 static DEVICE_ATTR_RO(nguid);
3239 
3240 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3241 		char *buf)
3242 {
3243 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3244 
3245 	/* For backward compatibility expose the NGUID to userspace if
3246 	 * we have no UUID set
3247 	 */
3248 	if (uuid_is_null(&ids->uuid)) {
3249 		printk_ratelimited(KERN_WARNING
3250 				   "No UUID available providing old NGUID\n");
3251 		return sysfs_emit(buf, "%pU\n", ids->nguid);
3252 	}
3253 	return sysfs_emit(buf, "%pU\n", &ids->uuid);
3254 }
3255 static DEVICE_ATTR_RO(uuid);
3256 
3257 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3258 		char *buf)
3259 {
3260 	return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3261 }
3262 static DEVICE_ATTR_RO(eui);
3263 
3264 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3265 		char *buf)
3266 {
3267 	return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3268 }
3269 static DEVICE_ATTR_RO(nsid);
3270 
3271 static struct attribute *nvme_ns_id_attrs[] = {
3272 	&dev_attr_wwid.attr,
3273 	&dev_attr_uuid.attr,
3274 	&dev_attr_nguid.attr,
3275 	&dev_attr_eui.attr,
3276 	&dev_attr_nsid.attr,
3277 #ifdef CONFIG_NVME_MULTIPATH
3278 	&dev_attr_ana_grpid.attr,
3279 	&dev_attr_ana_state.attr,
3280 #endif
3281 	NULL,
3282 };
3283 
3284 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3285 		struct attribute *a, int n)
3286 {
3287 	struct device *dev = container_of(kobj, struct device, kobj);
3288 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3289 
3290 	if (a == &dev_attr_uuid.attr) {
3291 		if (uuid_is_null(&ids->uuid) &&
3292 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3293 			return 0;
3294 	}
3295 	if (a == &dev_attr_nguid.attr) {
3296 		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3297 			return 0;
3298 	}
3299 	if (a == &dev_attr_eui.attr) {
3300 		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3301 			return 0;
3302 	}
3303 #ifdef CONFIG_NVME_MULTIPATH
3304 	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
3305 		if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
3306 			return 0;
3307 		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
3308 			return 0;
3309 	}
3310 #endif
3311 	return a->mode;
3312 }
3313 
3314 static const struct attribute_group nvme_ns_id_attr_group = {
3315 	.attrs		= nvme_ns_id_attrs,
3316 	.is_visible	= nvme_ns_id_attrs_are_visible,
3317 };
3318 
3319 const struct attribute_group *nvme_ns_id_attr_groups[] = {
3320 	&nvme_ns_id_attr_group,
3321 	NULL,
3322 };
3323 
3324 #define nvme_show_str_function(field)						\
3325 static ssize_t  field##_show(struct device *dev,				\
3326 			    struct device_attribute *attr, char *buf)		\
3327 {										\
3328         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3329         return sysfs_emit(buf, "%.*s\n",					\
3330 		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
3331 }										\
3332 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3333 
3334 nvme_show_str_function(model);
3335 nvme_show_str_function(serial);
3336 nvme_show_str_function(firmware_rev);
3337 
3338 #define nvme_show_int_function(field)						\
3339 static ssize_t  field##_show(struct device *dev,				\
3340 			    struct device_attribute *attr, char *buf)		\
3341 {										\
3342         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3343         return sysfs_emit(buf, "%d\n", ctrl->field);				\
3344 }										\
3345 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3346 
3347 nvme_show_int_function(cntlid);
3348 nvme_show_int_function(numa_node);
3349 nvme_show_int_function(queue_count);
3350 nvme_show_int_function(sqsize);
3351 nvme_show_int_function(kato);
3352 
3353 static ssize_t nvme_sysfs_delete(struct device *dev,
3354 				struct device_attribute *attr, const char *buf,
3355 				size_t count)
3356 {
3357 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3358 
3359 	if (device_remove_file_self(dev, attr))
3360 		nvme_delete_ctrl_sync(ctrl);
3361 	return count;
3362 }
3363 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3364 
3365 static ssize_t nvme_sysfs_show_transport(struct device *dev,
3366 					 struct device_attribute *attr,
3367 					 char *buf)
3368 {
3369 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3370 
3371 	return sysfs_emit(buf, "%s\n", ctrl->ops->name);
3372 }
3373 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3374 
3375 static ssize_t nvme_sysfs_show_state(struct device *dev,
3376 				     struct device_attribute *attr,
3377 				     char *buf)
3378 {
3379 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3380 	static const char *const state_name[] = {
3381 		[NVME_CTRL_NEW]		= "new",
3382 		[NVME_CTRL_LIVE]	= "live",
3383 		[NVME_CTRL_RESETTING]	= "resetting",
3384 		[NVME_CTRL_CONNECTING]	= "connecting",
3385 		[NVME_CTRL_DELETING]	= "deleting",
3386 		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
3387 		[NVME_CTRL_DEAD]	= "dead",
3388 	};
3389 
3390 	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3391 	    state_name[ctrl->state])
3392 		return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
3393 
3394 	return sysfs_emit(buf, "unknown state\n");
3395 }
3396 
3397 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3398 
3399 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3400 					 struct device_attribute *attr,
3401 					 char *buf)
3402 {
3403 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3404 
3405 	return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
3406 }
3407 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3408 
3409 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
3410 					struct device_attribute *attr,
3411 					char *buf)
3412 {
3413 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3414 
3415 	return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
3416 }
3417 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
3418 
3419 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
3420 					struct device_attribute *attr,
3421 					char *buf)
3422 {
3423 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3424 
3425 	return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
3426 }
3427 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
3428 
3429 static ssize_t nvme_sysfs_show_address(struct device *dev,
3430 					 struct device_attribute *attr,
3431 					 char *buf)
3432 {
3433 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3434 
3435 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3436 }
3437 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3438 
3439 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
3440 		struct device_attribute *attr, char *buf)
3441 {
3442 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3443 	struct nvmf_ctrl_options *opts = ctrl->opts;
3444 
3445 	if (ctrl->opts->max_reconnects == -1)
3446 		return sysfs_emit(buf, "off\n");
3447 	return sysfs_emit(buf, "%d\n",
3448 			  opts->max_reconnects * opts->reconnect_delay);
3449 }
3450 
3451 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
3452 		struct device_attribute *attr, const char *buf, size_t count)
3453 {
3454 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3455 	struct nvmf_ctrl_options *opts = ctrl->opts;
3456 	int ctrl_loss_tmo, err;
3457 
3458 	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
3459 	if (err)
3460 		return -EINVAL;
3461 
3462 	if (ctrl_loss_tmo < 0)
3463 		opts->max_reconnects = -1;
3464 	else
3465 		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3466 						opts->reconnect_delay);
3467 	return count;
3468 }
3469 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
3470 	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
3471 
3472 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
3473 		struct device_attribute *attr, char *buf)
3474 {
3475 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3476 
3477 	if (ctrl->opts->reconnect_delay == -1)
3478 		return sysfs_emit(buf, "off\n");
3479 	return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
3480 }
3481 
3482 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
3483 		struct device_attribute *attr, const char *buf, size_t count)
3484 {
3485 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3486 	unsigned int v;
3487 	int err;
3488 
3489 	err = kstrtou32(buf, 10, &v);
3490 	if (err)
3491 		return err;
3492 
3493 	ctrl->opts->reconnect_delay = v;
3494 	return count;
3495 }
3496 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
3497 	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
3498 
3499 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
3500 		struct device_attribute *attr, char *buf)
3501 {
3502 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3503 
3504 	if (ctrl->opts->fast_io_fail_tmo == -1)
3505 		return sysfs_emit(buf, "off\n");
3506 	return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
3507 }
3508 
3509 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
3510 		struct device_attribute *attr, const char *buf, size_t count)
3511 {
3512 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3513 	struct nvmf_ctrl_options *opts = ctrl->opts;
3514 	int fast_io_fail_tmo, err;
3515 
3516 	err = kstrtoint(buf, 10, &fast_io_fail_tmo);
3517 	if (err)
3518 		return -EINVAL;
3519 
3520 	if (fast_io_fail_tmo < 0)
3521 		opts->fast_io_fail_tmo = -1;
3522 	else
3523 		opts->fast_io_fail_tmo = fast_io_fail_tmo;
3524 	return count;
3525 }
3526 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
3527 	nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
3528 
3529 static struct attribute *nvme_dev_attrs[] = {
3530 	&dev_attr_reset_controller.attr,
3531 	&dev_attr_rescan_controller.attr,
3532 	&dev_attr_model.attr,
3533 	&dev_attr_serial.attr,
3534 	&dev_attr_firmware_rev.attr,
3535 	&dev_attr_cntlid.attr,
3536 	&dev_attr_delete_controller.attr,
3537 	&dev_attr_transport.attr,
3538 	&dev_attr_subsysnqn.attr,
3539 	&dev_attr_address.attr,
3540 	&dev_attr_state.attr,
3541 	&dev_attr_numa_node.attr,
3542 	&dev_attr_queue_count.attr,
3543 	&dev_attr_sqsize.attr,
3544 	&dev_attr_hostnqn.attr,
3545 	&dev_attr_hostid.attr,
3546 	&dev_attr_ctrl_loss_tmo.attr,
3547 	&dev_attr_reconnect_delay.attr,
3548 	&dev_attr_fast_io_fail_tmo.attr,
3549 	&dev_attr_kato.attr,
3550 	NULL
3551 };
3552 
3553 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3554 		struct attribute *a, int n)
3555 {
3556 	struct device *dev = container_of(kobj, struct device, kobj);
3557 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3558 
3559 	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3560 		return 0;
3561 	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3562 		return 0;
3563 	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
3564 		return 0;
3565 	if (a == &dev_attr_hostid.attr && !ctrl->opts)
3566 		return 0;
3567 	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
3568 		return 0;
3569 	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
3570 		return 0;
3571 	if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
3572 		return 0;
3573 
3574 	return a->mode;
3575 }
3576 
3577 static const struct attribute_group nvme_dev_attrs_group = {
3578 	.attrs		= nvme_dev_attrs,
3579 	.is_visible	= nvme_dev_attrs_are_visible,
3580 };
3581 
3582 static const struct attribute_group *nvme_dev_attr_groups[] = {
3583 	&nvme_dev_attrs_group,
3584 	NULL,
3585 };
3586 
3587 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3588 		unsigned nsid)
3589 {
3590 	struct nvme_ns_head *h;
3591 
3592 	lockdep_assert_held(&subsys->lock);
3593 
3594 	list_for_each_entry(h, &subsys->nsheads, entry) {
3595 		if (h->ns_id != nsid)
3596 			continue;
3597 		if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
3598 			return h;
3599 	}
3600 
3601 	return NULL;
3602 }
3603 
3604 static int __nvme_check_ids(struct nvme_subsystem *subsys,
3605 		struct nvme_ns_head *new)
3606 {
3607 	struct nvme_ns_head *h;
3608 
3609 	lockdep_assert_held(&subsys->lock);
3610 
3611 	list_for_each_entry(h, &subsys->nsheads, entry) {
3612 		if (nvme_ns_ids_valid(&new->ids) &&
3613 		    nvme_ns_ids_equal(&new->ids, &h->ids))
3614 			return -EINVAL;
3615 	}
3616 
3617 	return 0;
3618 }
3619 
3620 static void nvme_cdev_rel(struct device *dev)
3621 {
3622 	ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3623 }
3624 
3625 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
3626 {
3627 	cdev_device_del(cdev, cdev_device);
3628 	put_device(cdev_device);
3629 }
3630 
3631 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
3632 		const struct file_operations *fops, struct module *owner)
3633 {
3634 	int minor, ret;
3635 
3636 	minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL);
3637 	if (minor < 0)
3638 		return minor;
3639 	cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3640 	cdev_device->class = nvme_ns_chr_class;
3641 	cdev_device->release = nvme_cdev_rel;
3642 	device_initialize(cdev_device);
3643 	cdev_init(cdev, fops);
3644 	cdev->owner = owner;
3645 	ret = cdev_device_add(cdev, cdev_device);
3646 	if (ret)
3647 		put_device(cdev_device);
3648 
3649 	return ret;
3650 }
3651 
3652 static int nvme_ns_chr_open(struct inode *inode, struct file *file)
3653 {
3654 	return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3655 }
3656 
3657 static int nvme_ns_chr_release(struct inode *inode, struct file *file)
3658 {
3659 	nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3660 	return 0;
3661 }
3662 
3663 static const struct file_operations nvme_ns_chr_fops = {
3664 	.owner		= THIS_MODULE,
3665 	.open		= nvme_ns_chr_open,
3666 	.release	= nvme_ns_chr_release,
3667 	.unlocked_ioctl	= nvme_ns_chr_ioctl,
3668 	.compat_ioctl	= compat_ptr_ioctl,
3669 };
3670 
3671 static int nvme_add_ns_cdev(struct nvme_ns *ns)
3672 {
3673 	int ret;
3674 
3675 	ns->cdev_device.parent = ns->ctrl->device;
3676 	ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3677 			   ns->ctrl->instance, ns->head->instance);
3678 	if (ret)
3679 		return ret;
3680 
3681 	return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3682 			     ns->ctrl->ops->module);
3683 }
3684 
3685 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3686 		unsigned nsid, struct nvme_ns_ids *ids)
3687 {
3688 	struct nvme_ns_head *head;
3689 	size_t size = sizeof(*head);
3690 	int ret = -ENOMEM;
3691 
3692 #ifdef CONFIG_NVME_MULTIPATH
3693 	size += num_possible_nodes() * sizeof(struct nvme_ns *);
3694 #endif
3695 
3696 	head = kzalloc(size, GFP_KERNEL);
3697 	if (!head)
3698 		goto out;
3699 	ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3700 	if (ret < 0)
3701 		goto out_free_head;
3702 	head->instance = ret;
3703 	INIT_LIST_HEAD(&head->list);
3704 	ret = init_srcu_struct(&head->srcu);
3705 	if (ret)
3706 		goto out_ida_remove;
3707 	head->subsys = ctrl->subsys;
3708 	head->ns_id = nsid;
3709 	head->ids = *ids;
3710 	kref_init(&head->ref);
3711 
3712 	ret = __nvme_check_ids(ctrl->subsys, head);
3713 	if (ret) {
3714 		dev_err(ctrl->device,
3715 			"duplicate IDs for nsid %d\n", nsid);
3716 		goto out_cleanup_srcu;
3717 	}
3718 
3719 	if (head->ids.csi) {
3720 		ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3721 		if (ret)
3722 			goto out_cleanup_srcu;
3723 	} else
3724 		head->effects = ctrl->effects;
3725 
3726 	ret = nvme_mpath_alloc_disk(ctrl, head);
3727 	if (ret)
3728 		goto out_cleanup_srcu;
3729 
3730 	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3731 
3732 	kref_get(&ctrl->subsys->ref);
3733 
3734 	return head;
3735 out_cleanup_srcu:
3736 	cleanup_srcu_struct(&head->srcu);
3737 out_ida_remove:
3738 	ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3739 out_free_head:
3740 	kfree(head);
3741 out:
3742 	if (ret > 0)
3743 		ret = blk_status_to_errno(nvme_error_status(ret));
3744 	return ERR_PTR(ret);
3745 }
3746 
3747 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3748 		struct nvme_ns_ids *ids, bool is_shared)
3749 {
3750 	struct nvme_ctrl *ctrl = ns->ctrl;
3751 	struct nvme_ns_head *head = NULL;
3752 	int ret = 0;
3753 
3754 	mutex_lock(&ctrl->subsys->lock);
3755 	head = nvme_find_ns_head(ctrl->subsys, nsid);
3756 	if (!head) {
3757 		head = nvme_alloc_ns_head(ctrl, nsid, ids);
3758 		if (IS_ERR(head)) {
3759 			ret = PTR_ERR(head);
3760 			goto out_unlock;
3761 		}
3762 		head->shared = is_shared;
3763 	} else {
3764 		ret = -EINVAL;
3765 		if (!is_shared || !head->shared) {
3766 			dev_err(ctrl->device,
3767 				"Duplicate unshared namespace %d\n", nsid);
3768 			goto out_put_ns_head;
3769 		}
3770 		if (!nvme_ns_ids_equal(&head->ids, ids)) {
3771 			dev_err(ctrl->device,
3772 				"IDs don't match for shared namespace %d\n",
3773 					nsid);
3774 			goto out_put_ns_head;
3775 		}
3776 	}
3777 
3778 	list_add_tail_rcu(&ns->siblings, &head->list);
3779 	ns->head = head;
3780 	mutex_unlock(&ctrl->subsys->lock);
3781 	return 0;
3782 
3783 out_put_ns_head:
3784 	nvme_put_ns_head(head);
3785 out_unlock:
3786 	mutex_unlock(&ctrl->subsys->lock);
3787 	return ret;
3788 }
3789 
3790 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3791 {
3792 	struct nvme_ns *ns, *ret = NULL;
3793 
3794 	down_read(&ctrl->namespaces_rwsem);
3795 	list_for_each_entry(ns, &ctrl->namespaces, list) {
3796 		if (ns->head->ns_id == nsid) {
3797 			if (!nvme_get_ns(ns))
3798 				continue;
3799 			ret = ns;
3800 			break;
3801 		}
3802 		if (ns->head->ns_id > nsid)
3803 			break;
3804 	}
3805 	up_read(&ctrl->namespaces_rwsem);
3806 	return ret;
3807 }
3808 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3809 
3810 /*
3811  * Add the namespace to the controller list while keeping the list ordered.
3812  */
3813 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
3814 {
3815 	struct nvme_ns *tmp;
3816 
3817 	list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
3818 		if (tmp->head->ns_id < ns->head->ns_id) {
3819 			list_add(&ns->list, &tmp->list);
3820 			return;
3821 		}
3822 	}
3823 	list_add(&ns->list, &ns->ctrl->namespaces);
3824 }
3825 
3826 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
3827 		struct nvme_ns_ids *ids)
3828 {
3829 	struct nvme_ns *ns;
3830 	struct gendisk *disk;
3831 	struct nvme_id_ns *id;
3832 	int node = ctrl->numa_node;
3833 
3834 	if (nvme_identify_ns(ctrl, nsid, ids, &id))
3835 		return;
3836 
3837 	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3838 	if (!ns)
3839 		goto out_free_id;
3840 
3841 	disk = blk_mq_alloc_disk(ctrl->tagset, ns);
3842 	if (IS_ERR(disk))
3843 		goto out_free_ns;
3844 	disk->fops = &nvme_bdev_ops;
3845 	disk->private_data = ns;
3846 
3847 	ns->disk = disk;
3848 	ns->queue = disk->queue;
3849 
3850 	if (ctrl->opts && ctrl->opts->data_digest)
3851 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3852 
3853 	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3854 	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3855 		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3856 
3857 	ns->ctrl = ctrl;
3858 	kref_init(&ns->kref);
3859 
3860 	if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
3861 		goto out_cleanup_disk;
3862 
3863 	/*
3864 	 * Without the multipath code enabled, multiple controller per
3865 	 * subsystems are visible as devices and thus we cannot use the
3866 	 * subsystem instance.
3867 	 */
3868 	if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
3869 		sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
3870 			ns->head->instance);
3871 
3872 	if (nvme_update_ns_info(ns, id))
3873 		goto out_unlink_ns;
3874 
3875 	down_write(&ctrl->namespaces_rwsem);
3876 	nvme_ns_add_to_ctrl_list(ns);
3877 	up_write(&ctrl->namespaces_rwsem);
3878 	nvme_get_ctrl(ctrl);
3879 
3880 	if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
3881 		goto out_cleanup_ns_from_list;
3882 
3883 	if (!nvme_ns_head_multipath(ns->head))
3884 		nvme_add_ns_cdev(ns);
3885 
3886 	nvme_mpath_add_disk(ns, id);
3887 	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3888 	kfree(id);
3889 
3890 	return;
3891 
3892  out_cleanup_ns_from_list:
3893 	nvme_put_ctrl(ctrl);
3894 	down_write(&ctrl->namespaces_rwsem);
3895 	list_del_init(&ns->list);
3896 	up_write(&ctrl->namespaces_rwsem);
3897  out_unlink_ns:
3898 	mutex_lock(&ctrl->subsys->lock);
3899 	list_del_rcu(&ns->siblings);
3900 	if (list_empty(&ns->head->list))
3901 		list_del_init(&ns->head->entry);
3902 	mutex_unlock(&ctrl->subsys->lock);
3903 	nvme_put_ns_head(ns->head);
3904  out_cleanup_disk:
3905 	blk_cleanup_disk(disk);
3906  out_free_ns:
3907 	kfree(ns);
3908  out_free_id:
3909 	kfree(id);
3910 }
3911 
3912 static void nvme_ns_remove(struct nvme_ns *ns)
3913 {
3914 	bool last_path = false;
3915 
3916 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3917 		return;
3918 
3919 	clear_bit(NVME_NS_READY, &ns->flags);
3920 	set_capacity(ns->disk, 0);
3921 	nvme_fault_inject_fini(&ns->fault_inject);
3922 
3923 	mutex_lock(&ns->ctrl->subsys->lock);
3924 	list_del_rcu(&ns->siblings);
3925 	if (list_empty(&ns->head->list)) {
3926 		list_del_init(&ns->head->entry);
3927 		last_path = true;
3928 	}
3929 	mutex_unlock(&ns->ctrl->subsys->lock);
3930 
3931 	/* guarantee not available in head->list */
3932 	synchronize_rcu();
3933 
3934 	/* wait for concurrent submissions */
3935 	if (nvme_mpath_clear_current_path(ns))
3936 		synchronize_srcu(&ns->head->srcu);
3937 
3938 	if (!nvme_ns_head_multipath(ns->head))
3939 		nvme_cdev_del(&ns->cdev, &ns->cdev_device);
3940 	del_gendisk(ns->disk);
3941 	blk_cleanup_queue(ns->queue);
3942 
3943 	down_write(&ns->ctrl->namespaces_rwsem);
3944 	list_del_init(&ns->list);
3945 	up_write(&ns->ctrl->namespaces_rwsem);
3946 
3947 	if (last_path)
3948 		nvme_mpath_shutdown_disk(ns->head);
3949 	nvme_put_ns(ns);
3950 }
3951 
3952 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3953 {
3954 	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3955 
3956 	if (ns) {
3957 		nvme_ns_remove(ns);
3958 		nvme_put_ns(ns);
3959 	}
3960 }
3961 
3962 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
3963 {
3964 	struct nvme_id_ns *id;
3965 	int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3966 
3967 	if (test_bit(NVME_NS_DEAD, &ns->flags))
3968 		goto out;
3969 
3970 	ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
3971 	if (ret)
3972 		goto out;
3973 
3974 	ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3975 	if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
3976 		dev_err(ns->ctrl->device,
3977 			"identifiers changed for nsid %d\n", ns->head->ns_id);
3978 		goto out_free_id;
3979 	}
3980 
3981 	ret = nvme_update_ns_info(ns, id);
3982 
3983 out_free_id:
3984 	kfree(id);
3985 out:
3986 	/*
3987 	 * Only remove the namespace if we got a fatal error back from the
3988 	 * device, otherwise ignore the error and just move on.
3989 	 *
3990 	 * TODO: we should probably schedule a delayed retry here.
3991 	 */
3992 	if (ret > 0 && (ret & NVME_SC_DNR))
3993 		nvme_ns_remove(ns);
3994 }
3995 
3996 static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3997 {
3998 	struct nvme_ns_ids ids = { };
3999 	struct nvme_ns *ns;
4000 
4001 	if (nvme_identify_ns_descs(ctrl, nsid, &ids))
4002 		return;
4003 
4004 	ns = nvme_find_get_ns(ctrl, nsid);
4005 	if (ns) {
4006 		nvme_validate_ns(ns, &ids);
4007 		nvme_put_ns(ns);
4008 		return;
4009 	}
4010 
4011 	switch (ids.csi) {
4012 	case NVME_CSI_NVM:
4013 		nvme_alloc_ns(ctrl, nsid, &ids);
4014 		break;
4015 	case NVME_CSI_ZNS:
4016 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
4017 			dev_warn(ctrl->device,
4018 				"nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
4019 				nsid);
4020 			break;
4021 		}
4022 		if (!nvme_multi_css(ctrl)) {
4023 			dev_warn(ctrl->device,
4024 				"command set not reported for nsid: %d\n",
4025 				nsid);
4026 			break;
4027 		}
4028 		nvme_alloc_ns(ctrl, nsid, &ids);
4029 		break;
4030 	default:
4031 		dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
4032 			ids.csi, nsid);
4033 		break;
4034 	}
4035 }
4036 
4037 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
4038 					unsigned nsid)
4039 {
4040 	struct nvme_ns *ns, *next;
4041 	LIST_HEAD(rm_list);
4042 
4043 	down_write(&ctrl->namespaces_rwsem);
4044 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4045 		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
4046 			list_move_tail(&ns->list, &rm_list);
4047 	}
4048 	up_write(&ctrl->namespaces_rwsem);
4049 
4050 	list_for_each_entry_safe(ns, next, &rm_list, list)
4051 		nvme_ns_remove(ns);
4052 
4053 }
4054 
4055 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
4056 {
4057 	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
4058 	__le32 *ns_list;
4059 	u32 prev = 0;
4060 	int ret = 0, i;
4061 
4062 	if (nvme_ctrl_limited_cns(ctrl))
4063 		return -EOPNOTSUPP;
4064 
4065 	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
4066 	if (!ns_list)
4067 		return -ENOMEM;
4068 
4069 	for (;;) {
4070 		struct nvme_command cmd = {
4071 			.identify.opcode	= nvme_admin_identify,
4072 			.identify.cns		= NVME_ID_CNS_NS_ACTIVE_LIST,
4073 			.identify.nsid		= cpu_to_le32(prev),
4074 		};
4075 
4076 		ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
4077 					    NVME_IDENTIFY_DATA_SIZE);
4078 		if (ret) {
4079 			dev_warn(ctrl->device,
4080 				"Identify NS List failed (status=0x%x)\n", ret);
4081 			goto free;
4082 		}
4083 
4084 		for (i = 0; i < nr_entries; i++) {
4085 			u32 nsid = le32_to_cpu(ns_list[i]);
4086 
4087 			if (!nsid)	/* end of the list? */
4088 				goto out;
4089 			nvme_validate_or_alloc_ns(ctrl, nsid);
4090 			while (++prev < nsid)
4091 				nvme_ns_remove_by_nsid(ctrl, prev);
4092 		}
4093 	}
4094  out:
4095 	nvme_remove_invalid_namespaces(ctrl, prev);
4096  free:
4097 	kfree(ns_list);
4098 	return ret;
4099 }
4100 
4101 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4102 {
4103 	struct nvme_id_ctrl *id;
4104 	u32 nn, i;
4105 
4106 	if (nvme_identify_ctrl(ctrl, &id))
4107 		return;
4108 	nn = le32_to_cpu(id->nn);
4109 	kfree(id);
4110 
4111 	for (i = 1; i <= nn; i++)
4112 		nvme_validate_or_alloc_ns(ctrl, i);
4113 
4114 	nvme_remove_invalid_namespaces(ctrl, nn);
4115 }
4116 
4117 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4118 {
4119 	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
4120 	__le32 *log;
4121 	int error;
4122 
4123 	log = kzalloc(log_size, GFP_KERNEL);
4124 	if (!log)
4125 		return;
4126 
4127 	/*
4128 	 * We need to read the log to clear the AEN, but we don't want to rely
4129 	 * on it for the changed namespace information as userspace could have
4130 	 * raced with us in reading the log page, which could cause us to miss
4131 	 * updates.
4132 	 */
4133 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
4134 			NVME_CSI_NVM, log, log_size, 0);
4135 	if (error)
4136 		dev_warn(ctrl->device,
4137 			"reading changed ns log failed: %d\n", error);
4138 
4139 	kfree(log);
4140 }
4141 
4142 static void nvme_scan_work(struct work_struct *work)
4143 {
4144 	struct nvme_ctrl *ctrl =
4145 		container_of(work, struct nvme_ctrl, scan_work);
4146 
4147 	/* No tagset on a live ctrl means IO queues could not created */
4148 	if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
4149 		return;
4150 
4151 	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4152 		dev_info(ctrl->device, "rescanning namespaces.\n");
4153 		nvme_clear_changed_ns_log(ctrl);
4154 	}
4155 
4156 	mutex_lock(&ctrl->scan_lock);
4157 	if (nvme_scan_ns_list(ctrl) != 0)
4158 		nvme_scan_ns_sequential(ctrl);
4159 	mutex_unlock(&ctrl->scan_lock);
4160 }
4161 
4162 /*
4163  * This function iterates the namespace list unlocked to allow recovery from
4164  * controller failure. It is up to the caller to ensure the namespace list is
4165  * not modified by scan work while this function is executing.
4166  */
4167 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
4168 {
4169 	struct nvme_ns *ns, *next;
4170 	LIST_HEAD(ns_list);
4171 
4172 	/*
4173 	 * make sure to requeue I/O to all namespaces as these
4174 	 * might result from the scan itself and must complete
4175 	 * for the scan_work to make progress
4176 	 */
4177 	nvme_mpath_clear_ctrl_paths(ctrl);
4178 
4179 	/* prevent racing with ns scanning */
4180 	flush_work(&ctrl->scan_work);
4181 
4182 	/*
4183 	 * The dead states indicates the controller was not gracefully
4184 	 * disconnected. In that case, we won't be able to flush any data while
4185 	 * removing the namespaces' disks; fail all the queues now to avoid
4186 	 * potentially having to clean up the failed sync later.
4187 	 */
4188 	if (ctrl->state == NVME_CTRL_DEAD)
4189 		nvme_kill_queues(ctrl);
4190 
4191 	/* this is a no-op when called from the controller reset handler */
4192 	nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
4193 
4194 	down_write(&ctrl->namespaces_rwsem);
4195 	list_splice_init(&ctrl->namespaces, &ns_list);
4196 	up_write(&ctrl->namespaces_rwsem);
4197 
4198 	list_for_each_entry_safe(ns, next, &ns_list, list)
4199 		nvme_ns_remove(ns);
4200 }
4201 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4202 
4203 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
4204 {
4205 	struct nvme_ctrl *ctrl =
4206 		container_of(dev, struct nvme_ctrl, ctrl_device);
4207 	struct nvmf_ctrl_options *opts = ctrl->opts;
4208 	int ret;
4209 
4210 	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4211 	if (ret)
4212 		return ret;
4213 
4214 	if (opts) {
4215 		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4216 		if (ret)
4217 			return ret;
4218 
4219 		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
4220 				opts->trsvcid ?: "none");
4221 		if (ret)
4222 			return ret;
4223 
4224 		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
4225 				opts->host_traddr ?: "none");
4226 		if (ret)
4227 			return ret;
4228 
4229 		ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
4230 				opts->host_iface ?: "none");
4231 	}
4232 	return ret;
4233 }
4234 
4235 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4236 {
4237 	char *envp[2] = { NULL, NULL };
4238 	u32 aen_result = ctrl->aen_result;
4239 
4240 	ctrl->aen_result = 0;
4241 	if (!aen_result)
4242 		return;
4243 
4244 	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
4245 	if (!envp[0])
4246 		return;
4247 	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4248 	kfree(envp[0]);
4249 }
4250 
4251 static void nvme_async_event_work(struct work_struct *work)
4252 {
4253 	struct nvme_ctrl *ctrl =
4254 		container_of(work, struct nvme_ctrl, async_event_work);
4255 
4256 	nvme_aen_uevent(ctrl);
4257 
4258 	/*
4259 	 * The transport drivers must guarantee AER submission here is safe by
4260 	 * flushing ctrl async_event_work after changing the controller state
4261 	 * from LIVE and before freeing the admin queue.
4262 	*/
4263 	if (ctrl->state == NVME_CTRL_LIVE)
4264 		ctrl->ops->submit_async_event(ctrl);
4265 }
4266 
4267 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4268 {
4269 
4270 	u32 csts;
4271 
4272 	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4273 		return false;
4274 
4275 	if (csts == ~0)
4276 		return false;
4277 
4278 	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4279 }
4280 
4281 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
4282 {
4283 	struct nvme_fw_slot_info_log *log;
4284 
4285 	log = kmalloc(sizeof(*log), GFP_KERNEL);
4286 	if (!log)
4287 		return;
4288 
4289 	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
4290 			log, sizeof(*log), 0))
4291 		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4292 	kfree(log);
4293 }
4294 
4295 static void nvme_fw_act_work(struct work_struct *work)
4296 {
4297 	struct nvme_ctrl *ctrl = container_of(work,
4298 				struct nvme_ctrl, fw_act_work);
4299 	unsigned long fw_act_timeout;
4300 
4301 	if (ctrl->mtfa)
4302 		fw_act_timeout = jiffies +
4303 				msecs_to_jiffies(ctrl->mtfa * 100);
4304 	else
4305 		fw_act_timeout = jiffies +
4306 				msecs_to_jiffies(admin_timeout * 1000);
4307 
4308 	nvme_stop_queues(ctrl);
4309 	while (nvme_ctrl_pp_status(ctrl)) {
4310 		if (time_after(jiffies, fw_act_timeout)) {
4311 			dev_warn(ctrl->device,
4312 				"Fw activation timeout, reset controller\n");
4313 			nvme_try_sched_reset(ctrl);
4314 			return;
4315 		}
4316 		msleep(100);
4317 	}
4318 
4319 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4320 		return;
4321 
4322 	nvme_start_queues(ctrl);
4323 	/* read FW slot information to clear the AER */
4324 	nvme_get_fw_slot_info(ctrl);
4325 }
4326 
4327 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4328 {
4329 	u32 aer_notice_type = (result & 0xff00) >> 8;
4330 
4331 	trace_nvme_async_event(ctrl, aer_notice_type);
4332 
4333 	switch (aer_notice_type) {
4334 	case NVME_AER_NOTICE_NS_CHANGED:
4335 		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4336 		nvme_queue_scan(ctrl);
4337 		break;
4338 	case NVME_AER_NOTICE_FW_ACT_STARTING:
4339 		/*
4340 		 * We are (ab)using the RESETTING state to prevent subsequent
4341 		 * recovery actions from interfering with the controller's
4342 		 * firmware activation.
4343 		 */
4344 		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4345 			queue_work(nvme_wq, &ctrl->fw_act_work);
4346 		break;
4347 #ifdef CONFIG_NVME_MULTIPATH
4348 	case NVME_AER_NOTICE_ANA:
4349 		if (!ctrl->ana_log_buf)
4350 			break;
4351 		queue_work(nvme_wq, &ctrl->ana_work);
4352 		break;
4353 #endif
4354 	case NVME_AER_NOTICE_DISC_CHANGED:
4355 		ctrl->aen_result = result;
4356 		break;
4357 	default:
4358 		dev_warn(ctrl->device, "async event result %08x\n", result);
4359 	}
4360 }
4361 
4362 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4363 		volatile union nvme_result *res)
4364 {
4365 	u32 result = le32_to_cpu(res->u32);
4366 	u32 aer_type = result & 0x07;
4367 
4368 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4369 		return;
4370 
4371 	switch (aer_type) {
4372 	case NVME_AER_NOTICE:
4373 		nvme_handle_aen_notice(ctrl, result);
4374 		break;
4375 	case NVME_AER_ERROR:
4376 	case NVME_AER_SMART:
4377 	case NVME_AER_CSS:
4378 	case NVME_AER_VS:
4379 		trace_nvme_async_event(ctrl, aer_type);
4380 		ctrl->aen_result = result;
4381 		break;
4382 	default:
4383 		break;
4384 	}
4385 	queue_work(nvme_wq, &ctrl->async_event_work);
4386 }
4387 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4388 
4389 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4390 {
4391 	nvme_mpath_stop(ctrl);
4392 	nvme_stop_keep_alive(ctrl);
4393 	nvme_stop_failfast_work(ctrl);
4394 	flush_work(&ctrl->async_event_work);
4395 	cancel_work_sync(&ctrl->fw_act_work);
4396 }
4397 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4398 
4399 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4400 {
4401 	nvme_start_keep_alive(ctrl);
4402 
4403 	nvme_enable_aen(ctrl);
4404 
4405 	if (ctrl->queue_count > 1) {
4406 		nvme_queue_scan(ctrl);
4407 		nvme_start_queues(ctrl);
4408 	}
4409 }
4410 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4411 
4412 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4413 {
4414 	nvme_hwmon_exit(ctrl);
4415 	nvme_fault_inject_fini(&ctrl->fault_inject);
4416 	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4417 	cdev_device_del(&ctrl->cdev, ctrl->device);
4418 	nvme_put_ctrl(ctrl);
4419 }
4420 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4421 
4422 static void nvme_free_cels(struct nvme_ctrl *ctrl)
4423 {
4424 	struct nvme_effects_log	*cel;
4425 	unsigned long i;
4426 
4427 	xa_for_each(&ctrl->cels, i, cel) {
4428 		xa_erase(&ctrl->cels, i);
4429 		kfree(cel);
4430 	}
4431 
4432 	xa_destroy(&ctrl->cels);
4433 }
4434 
4435 static void nvme_free_ctrl(struct device *dev)
4436 {
4437 	struct nvme_ctrl *ctrl =
4438 		container_of(dev, struct nvme_ctrl, ctrl_device);
4439 	struct nvme_subsystem *subsys = ctrl->subsys;
4440 
4441 	if (!subsys || ctrl->instance != subsys->instance)
4442 		ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4443 
4444 	nvme_free_cels(ctrl);
4445 	nvme_mpath_uninit(ctrl);
4446 	__free_page(ctrl->discard_page);
4447 
4448 	if (subsys) {
4449 		mutex_lock(&nvme_subsystems_lock);
4450 		list_del(&ctrl->subsys_entry);
4451 		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4452 		mutex_unlock(&nvme_subsystems_lock);
4453 	}
4454 
4455 	ctrl->ops->free_ctrl(ctrl);
4456 
4457 	if (subsys)
4458 		nvme_put_subsystem(subsys);
4459 }
4460 
4461 /*
4462  * Initialize a NVMe controller structures.  This needs to be called during
4463  * earliest initialization so that we have the initialized structured around
4464  * during probing.
4465  */
4466 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4467 		const struct nvme_ctrl_ops *ops, unsigned long quirks)
4468 {
4469 	int ret;
4470 
4471 	ctrl->state = NVME_CTRL_NEW;
4472 	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4473 	spin_lock_init(&ctrl->lock);
4474 	mutex_init(&ctrl->scan_lock);
4475 	INIT_LIST_HEAD(&ctrl->namespaces);
4476 	xa_init(&ctrl->cels);
4477 	init_rwsem(&ctrl->namespaces_rwsem);
4478 	ctrl->dev = dev;
4479 	ctrl->ops = ops;
4480 	ctrl->quirks = quirks;
4481 	ctrl->numa_node = NUMA_NO_NODE;
4482 	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4483 	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4484 	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4485 	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4486 	init_waitqueue_head(&ctrl->state_wq);
4487 
4488 	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4489 	INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4490 	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4491 	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4492 
4493 	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4494 			PAGE_SIZE);
4495 	ctrl->discard_page = alloc_page(GFP_KERNEL);
4496 	if (!ctrl->discard_page) {
4497 		ret = -ENOMEM;
4498 		goto out;
4499 	}
4500 
4501 	ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
4502 	if (ret < 0)
4503 		goto out;
4504 	ctrl->instance = ret;
4505 
4506 	device_initialize(&ctrl->ctrl_device);
4507 	ctrl->device = &ctrl->ctrl_device;
4508 	ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
4509 			ctrl->instance);
4510 	ctrl->device->class = nvme_class;
4511 	ctrl->device->parent = ctrl->dev;
4512 	ctrl->device->groups = nvme_dev_attr_groups;
4513 	ctrl->device->release = nvme_free_ctrl;
4514 	dev_set_drvdata(ctrl->device, ctrl);
4515 	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4516 	if (ret)
4517 		goto out_release_instance;
4518 
4519 	nvme_get_ctrl(ctrl);
4520 	cdev_init(&ctrl->cdev, &nvme_dev_fops);
4521 	ctrl->cdev.owner = ops->module;
4522 	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4523 	if (ret)
4524 		goto out_free_name;
4525 
4526 	/*
4527 	 * Initialize latency tolerance controls.  The sysfs files won't
4528 	 * be visible to userspace unless the device actually supports APST.
4529 	 */
4530 	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4531 	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4532 		min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4533 
4534 	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4535 	nvme_mpath_init_ctrl(ctrl);
4536 
4537 	return 0;
4538 out_free_name:
4539 	nvme_put_ctrl(ctrl);
4540 	kfree_const(ctrl->device->kobj.name);
4541 out_release_instance:
4542 	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4543 out:
4544 	if (ctrl->discard_page)
4545 		__free_page(ctrl->discard_page);
4546 	return ret;
4547 }
4548 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4549 
4550 static void nvme_start_ns_queue(struct nvme_ns *ns)
4551 {
4552 	if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
4553 		blk_mq_unquiesce_queue(ns->queue);
4554 }
4555 
4556 static void nvme_stop_ns_queue(struct nvme_ns *ns)
4557 {
4558 	if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
4559 		blk_mq_quiesce_queue(ns->queue);
4560 	else
4561 		blk_mq_wait_quiesce_done(ns->queue);
4562 }
4563 
4564 /*
4565  * Prepare a queue for teardown.
4566  *
4567  * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
4568  * the capacity to 0 after that to avoid blocking dispatchers that may be
4569  * holding bd_butex.  This will end buffered writers dirtying pages that can't
4570  * be synced.
4571  */
4572 static void nvme_set_queue_dying(struct nvme_ns *ns)
4573 {
4574 	if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
4575 		return;
4576 
4577 	blk_set_queue_dying(ns->queue);
4578 	nvme_start_ns_queue(ns);
4579 
4580 	set_capacity_and_notify(ns->disk, 0);
4581 }
4582 
4583 /**
4584  * nvme_kill_queues(): Ends all namespace queues
4585  * @ctrl: the dead controller that needs to end
4586  *
4587  * Call this function when the driver determines it is unable to get the
4588  * controller in a state capable of servicing IO.
4589  */
4590 void nvme_kill_queues(struct nvme_ctrl *ctrl)
4591 {
4592 	struct nvme_ns *ns;
4593 
4594 	down_read(&ctrl->namespaces_rwsem);
4595 
4596 	/* Forcibly unquiesce queues to avoid blocking dispatch */
4597 	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4598 		nvme_start_admin_queue(ctrl);
4599 
4600 	list_for_each_entry(ns, &ctrl->namespaces, list)
4601 		nvme_set_queue_dying(ns);
4602 
4603 	up_read(&ctrl->namespaces_rwsem);
4604 }
4605 EXPORT_SYMBOL_GPL(nvme_kill_queues);
4606 
4607 void nvme_unfreeze(struct nvme_ctrl *ctrl)
4608 {
4609 	struct nvme_ns *ns;
4610 
4611 	down_read(&ctrl->namespaces_rwsem);
4612 	list_for_each_entry(ns, &ctrl->namespaces, list)
4613 		blk_mq_unfreeze_queue(ns->queue);
4614 	up_read(&ctrl->namespaces_rwsem);
4615 }
4616 EXPORT_SYMBOL_GPL(nvme_unfreeze);
4617 
4618 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4619 {
4620 	struct nvme_ns *ns;
4621 
4622 	down_read(&ctrl->namespaces_rwsem);
4623 	list_for_each_entry(ns, &ctrl->namespaces, list) {
4624 		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4625 		if (timeout <= 0)
4626 			break;
4627 	}
4628 	up_read(&ctrl->namespaces_rwsem);
4629 	return timeout;
4630 }
4631 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4632 
4633 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4634 {
4635 	struct nvme_ns *ns;
4636 
4637 	down_read(&ctrl->namespaces_rwsem);
4638 	list_for_each_entry(ns, &ctrl->namespaces, list)
4639 		blk_mq_freeze_queue_wait(ns->queue);
4640 	up_read(&ctrl->namespaces_rwsem);
4641 }
4642 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4643 
4644 void nvme_start_freeze(struct nvme_ctrl *ctrl)
4645 {
4646 	struct nvme_ns *ns;
4647 
4648 	down_read(&ctrl->namespaces_rwsem);
4649 	list_for_each_entry(ns, &ctrl->namespaces, list)
4650 		blk_freeze_queue_start(ns->queue);
4651 	up_read(&ctrl->namespaces_rwsem);
4652 }
4653 EXPORT_SYMBOL_GPL(nvme_start_freeze);
4654 
4655 void nvme_stop_queues(struct nvme_ctrl *ctrl)
4656 {
4657 	struct nvme_ns *ns;
4658 
4659 	down_read(&ctrl->namespaces_rwsem);
4660 	list_for_each_entry(ns, &ctrl->namespaces, list)
4661 		nvme_stop_ns_queue(ns);
4662 	up_read(&ctrl->namespaces_rwsem);
4663 }
4664 EXPORT_SYMBOL_GPL(nvme_stop_queues);
4665 
4666 void nvme_start_queues(struct nvme_ctrl *ctrl)
4667 {
4668 	struct nvme_ns *ns;
4669 
4670 	down_read(&ctrl->namespaces_rwsem);
4671 	list_for_each_entry(ns, &ctrl->namespaces, list)
4672 		nvme_start_ns_queue(ns);
4673 	up_read(&ctrl->namespaces_rwsem);
4674 }
4675 EXPORT_SYMBOL_GPL(nvme_start_queues);
4676 
4677 void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
4678 {
4679 	if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4680 		blk_mq_quiesce_queue(ctrl->admin_q);
4681 	else
4682 		blk_mq_wait_quiesce_done(ctrl->admin_q);
4683 }
4684 EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);
4685 
4686 void nvme_start_admin_queue(struct nvme_ctrl *ctrl)
4687 {
4688 	if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4689 		blk_mq_unquiesce_queue(ctrl->admin_q);
4690 }
4691 EXPORT_SYMBOL_GPL(nvme_start_admin_queue);
4692 
4693 void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
4694 {
4695 	struct nvme_ns *ns;
4696 
4697 	down_read(&ctrl->namespaces_rwsem);
4698 	list_for_each_entry(ns, &ctrl->namespaces, list)
4699 		blk_sync_queue(ns->queue);
4700 	up_read(&ctrl->namespaces_rwsem);
4701 }
4702 EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
4703 
4704 void nvme_sync_queues(struct nvme_ctrl *ctrl)
4705 {
4706 	nvme_sync_io_queues(ctrl);
4707 	if (ctrl->admin_q)
4708 		blk_sync_queue(ctrl->admin_q);
4709 }
4710 EXPORT_SYMBOL_GPL(nvme_sync_queues);
4711 
4712 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
4713 {
4714 	if (file->f_op != &nvme_dev_fops)
4715 		return NULL;
4716 	return file->private_data;
4717 }
4718 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
4719 
4720 /*
4721  * Check we didn't inadvertently grow the command structure sizes:
4722  */
4723 static inline void _nvme_check_size(void)
4724 {
4725 	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4726 	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4727 	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4728 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4729 	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4730 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4731 	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4732 	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4733 	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4734 	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4735 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4736 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4737 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4738 	BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
4739 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4740 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
4741 	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4742 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4743 	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4744 	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4745 }
4746 
4747 
4748 static int __init nvme_core_init(void)
4749 {
4750 	int result = -ENOMEM;
4751 
4752 	_nvme_check_size();
4753 
4754 	nvme_wq = alloc_workqueue("nvme-wq",
4755 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4756 	if (!nvme_wq)
4757 		goto out;
4758 
4759 	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4760 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4761 	if (!nvme_reset_wq)
4762 		goto destroy_wq;
4763 
4764 	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4765 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4766 	if (!nvme_delete_wq)
4767 		goto destroy_reset_wq;
4768 
4769 	result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
4770 			NVME_MINORS, "nvme");
4771 	if (result < 0)
4772 		goto destroy_delete_wq;
4773 
4774 	nvme_class = class_create(THIS_MODULE, "nvme");
4775 	if (IS_ERR(nvme_class)) {
4776 		result = PTR_ERR(nvme_class);
4777 		goto unregister_chrdev;
4778 	}
4779 	nvme_class->dev_uevent = nvme_class_uevent;
4780 
4781 	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
4782 	if (IS_ERR(nvme_subsys_class)) {
4783 		result = PTR_ERR(nvme_subsys_class);
4784 		goto destroy_class;
4785 	}
4786 
4787 	result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
4788 				     "nvme-generic");
4789 	if (result < 0)
4790 		goto destroy_subsys_class;
4791 
4792 	nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic");
4793 	if (IS_ERR(nvme_ns_chr_class)) {
4794 		result = PTR_ERR(nvme_ns_chr_class);
4795 		goto unregister_generic_ns;
4796 	}
4797 
4798 	return 0;
4799 
4800 unregister_generic_ns:
4801 	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4802 destroy_subsys_class:
4803 	class_destroy(nvme_subsys_class);
4804 destroy_class:
4805 	class_destroy(nvme_class);
4806 unregister_chrdev:
4807 	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4808 destroy_delete_wq:
4809 	destroy_workqueue(nvme_delete_wq);
4810 destroy_reset_wq:
4811 	destroy_workqueue(nvme_reset_wq);
4812 destroy_wq:
4813 	destroy_workqueue(nvme_wq);
4814 out:
4815 	return result;
4816 }
4817 
4818 static void __exit nvme_core_exit(void)
4819 {
4820 	class_destroy(nvme_ns_chr_class);
4821 	class_destroy(nvme_subsys_class);
4822 	class_destroy(nvme_class);
4823 	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4824 	unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4825 	destroy_workqueue(nvme_delete_wq);
4826 	destroy_workqueue(nvme_reset_wq);
4827 	destroy_workqueue(nvme_wq);
4828 	ida_destroy(&nvme_ns_chr_minor_ida);
4829 	ida_destroy(&nvme_instance_ida);
4830 }
4831 
4832 MODULE_LICENSE("GPL");
4833 MODULE_VERSION("1.0");
4834 module_init(nvme_core_init);
4835 module_exit(nvme_core_exit);
4836